removed fuel-library dir
8
conf.py
@ -128,7 +128,7 @@ html_logo = '_static/fuel_gradient_200.png'
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
html_favicon = None
|
||||
html_favicon = '_static/mirantis_icon.png'
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
@ -137,7 +137,7 @@ html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
html_last_updated_fmt = '%b %d, %Y'
|
||||
html_last_updated_fmt = '%c, %Z'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
@ -159,10 +159,10 @@ html_sidebars = {
|
||||
html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
html_split_index = False
|
||||
html_split_index = True
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
html_show_sourcelink = True
|
||||
html_show_sourcelink = False
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
html_show_sphinx = False
|
||||
|
@ -1,161 +0,0 @@
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf pdf text man changes linkcheck doctest gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " text to make text files"
|
||||
@echo " pdf to make pdf files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/fuel.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/fuel.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/fuel"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/fuel"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
pdf:
|
||||
$(SPHINXBUILD) -b pdf $(ALLSPHINXOPTS) $(BUILDDIR)/pdf
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/pdf."
|
||||
@echo "Run \`make' in that directory to run these through pdf" \
|
||||
"(use \`make pdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
@ -1,507 +0,0 @@
|
||||
/*
|
||||
* bootstrap-sphinx.css
|
||||
* ~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* Sphinx stylesheet -- Twitter Bootstrap theme.
|
||||
*/
|
||||
|
||||
/*
|
||||
body {
|
||||
padding-top: 52px;
|
||||
}
|
||||
*/
|
||||
.navbar .brand {
|
||||
color: #FFF;
|
||||
text-shadow: #777 2px 2px 3px;
|
||||
}
|
||||
|
||||
{%- block sidebarlogo %}
|
||||
{%- if logo %}
|
||||
.navbar h3 a, .navbar .brand {
|
||||
background: transparent url("{{ logo }}") no-repeat 22px 3px;
|
||||
padding-left: 62px;
|
||||
}
|
||||
{%- endif %}
|
||||
{%- endblock %}
|
||||
|
||||
|
||||
body {
|
||||
padding-top: 52px;
|
||||
font-family: 'PT Sans', arial, sans-serif;
|
||||
line-height: 150%;
|
||||
font-size: 16px;
|
||||
background: #f5f5f5 url(img/fuel_tile.jpg);
|
||||
color: #41454d;
|
||||
border: none 1px solid #ececec;
|
||||
|
||||
margin: 0;
|
||||
min-width: 740px;
|
||||
}
|
||||
|
||||
div.section {
|
||||
padding-top: 40px;
|
||||
}
|
||||
|
||||
div.container {
|
||||
border-top-left-radius: 6px;
|
||||
border-top-right-radius: 6px;
|
||||
border-bottom-left-radius: 6px;
|
||||
border-bottom-right-radius: 6px;
|
||||
background: white;
|
||||
border-top: 1px solid #ddd;
|
||||
border-bottom: 1px solid #ddd;
|
||||
padding: 0 10px 0 10px;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
div.mira_header {
|
||||
margin: 0 96px;
|
||||
height: 60px;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
a#fuel_logo {
|
||||
display: block;
|
||||
width: 277px;
|
||||
height: 50px;
|
||||
background: url(img/FUEL_small.png);
|
||||
}
|
||||
|
||||
div.mcontainer {margin: 0 80px; min-width: 960px;}
|
||||
div.footer_container {}
|
||||
div.header_container {}
|
||||
|
||||
div.document {
|
||||
background-color: white;
|
||||
text-align: left;
|
||||
background-repeat: repeat-x;
|
||||
}
|
||||
|
||||
div.bodywrapper {
|
||||
margin: 0 240px 0 0;
|
||||
border-right: 1px dotted #f0f0f1;
|
||||
}
|
||||
|
||||
div.body {
|
||||
margin: 0;
|
||||
padding: 0.5em 20px 20px 20px;
|
||||
}
|
||||
|
||||
div.related {
|
||||
font-size: 1em;
|
||||
|
||||
|
||||
}
|
||||
|
||||
div.related ul {
|
||||
background: white;
|
||||
color: #c5cfd8;
|
||||
height: 2em;
|
||||
border-top: 1px solid #ddd;
|
||||
border-bottom: 1px solid #ddd;
|
||||
}
|
||||
|
||||
div.rel_top ul {
|
||||
border-top-left-radius: 6px;
|
||||
border-top-right-radius: 6px;
|
||||
}
|
||||
|
||||
div.rel_bottom ul {
|
||||
border-bottom-left-radius: 6px;
|
||||
border-bottom-right-radius: 6px;
|
||||
}
|
||||
|
||||
div.related ul li {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
height: 2em;
|
||||
float: left;
|
||||
}
|
||||
|
||||
div.related ul li.right {
|
||||
float: right;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
div.related ul li a {
|
||||
margin: 0;
|
||||
padding: 0 5px 0 5px;
|
||||
line-height: 1.75em;
|
||||
color: #D32F1A;
|
||||
text-shadow: 1px 1px 0 #fff;
|
||||
}
|
||||
|
||||
div.related ul li a:hover {
|
||||
color: #f00;
|
||||
}
|
||||
|
||||
div.sphinxsidebarwrapper {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
background:#fbfbfc;
|
||||
margin: 0;
|
||||
padding: 15px;
|
||||
width: 210px;
|
||||
float: right;
|
||||
font-size: 1em;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3, div.sphinxsidebar h4 {
|
||||
margin: 1.5em 0 0 0;
|
||||
padding: 0.1em 0 0.1em 0;
|
||||
|
||||
}
|
||||
|
||||
p.topless {
|
||||
margin-top: 0.2em;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 a {
|
||||
color: black !important;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul li {
|
||||
margin-top: 3px;
|
||||
margin-bottom: 3px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul li a {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
margin-top: 7px;
|
||||
padding: 0;
|
||||
line-height: 130%;
|
||||
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul {
|
||||
margin-top: 12px;
|
||||
margin-left: 20px;
|
||||
list-style-image: url(img/bull_white.png);
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul ul {
|
||||
margin-top: 3px;
|
||||
list-style-image: url(img/bull_white_sm.png);
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul ul ul {
|
||||
margin-top: 3px;
|
||||
list-style-image: url(img/bull_white_smlst.png);
|
||||
margin-left: 15px;
|
||||
}
|
||||
|
||||
|
||||
|
||||
div.sphinxsidebar ul ul ul li {
|
||||
line-height: 13px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul ul li a {
|
||||
font-size: 12px;
|
||||
color:#ce4736;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul ul ul li {
|
||||
margin-top: 4px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul ul ul ul li a {
|
||||
font-size: 12px;
|
||||
color:#bb6c62;
|
||||
}
|
||||
|
||||
div.footer {
|
||||
width: 660px;
|
||||
margin: 0 auto;
|
||||
height: 35px;
|
||||
color: #686a72;
|
||||
padding: 15px 8px 10px 0;
|
||||
clear: both;
|
||||
font-size: 15px;
|
||||
text-align: right;
|
||||
text-shadow: 1px 1px 0 #fff;
|
||||
background: url(img/mirantis_small.png) no-repeat center left;
|
||||
}
|
||||
|
||||
div.footer a {
|
||||
color: #e07e71;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
div.footer a:hover {
|
||||
color: #b25f54;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
/* -- body styles ----------------------------------------------------------- */
|
||||
|
||||
p {
|
||||
font-family: 'PT Sans', arial, sans-serif;
|
||||
font-size: 16px;
|
||||
margin: 0.8em 0 0.5em 0;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #D32F1A;
|
||||
text-decoration: none;
|
||||
text-shadow: 1px 1px 0 #fff;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: #f00;
|
||||
}
|
||||
|
||||
div.body a {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
ul {
|
||||
list-style-image: url(img/bull_red.png);
|
||||
}
|
||||
|
||||
ul ul {
|
||||
list-style-image: url(img/bull_white.png);
|
||||
}
|
||||
|
||||
ul ul ul {
|
||||
list-style-image: url(img/bull_white_sm.png);
|
||||
}
|
||||
|
||||
|
||||
h1 {
|
||||
font-family: 'PT Sans Narrow', 'Arial Narrow', 'arial', sans-serif;
|
||||
_margin: 0;
|
||||
_padding: 0.7em 0 0.3em 0;
|
||||
_font-size: 28px;
|
||||
color: black;
|
||||
_font-weight: bold;
|
||||
text-rendering: auto;
|
||||
}
|
||||
|
||||
h2 {
|
||||
_margin: 1.3em 0 0.2em 0;
|
||||
_font-size: 25px;
|
||||
_padding: 0;
|
||||
color:#ba2414;
|
||||
_font-weight: bold;
|
||||
text-rendering: auto;
|
||||
}
|
||||
|
||||
h3 {
|
||||
font-family: 'PT Sans Narrow', 'Arial Narrow', 'arial', sans-serif;
|
||||
_margin: 2em 0 0 0;
|
||||
_font-size: 22px;
|
||||
color: black;
|
||||
_font-weight: normal;
|
||||
text-transform: uppercase;
|
||||
text-rendering: auto;
|
||||
}
|
||||
|
||||
h4 {
|
||||
font-family: 'PT Sans', 'arial', sans-serif;
|
||||
_margin: 1.5em 0 0.3em 0;
|
||||
_font-size: 19px;
|
||||
color: #38424b;
|
||||
_font-weight: normal;
|
||||
text-transform: uppercase;
|
||||
text-rendering: auto;
|
||||
}
|
||||
|
||||
div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
|
||||
text-decoration: none;
|
||||
text-rendering: auto;
|
||||
}
|
||||
|
||||
div.body h1 a, div.body h3 a {
|
||||
color:black !important;
|
||||
}
|
||||
|
||||
div.body h2 a {
|
||||
color:#ba2414 !important;
|
||||
}
|
||||
|
||||
div.body h4 a {
|
||||
color:#38424b !important;
|
||||
}
|
||||
|
||||
|
||||
|
||||
h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
|
||||
display: none;
|
||||
margin: 0 0 0 0.3em;
|
||||
padding: 0 0.2em 0 0.2em;
|
||||
color: #aaa!important;
|
||||
}
|
||||
|
||||
h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
|
||||
h5:hover a.anchor, h6:hover a.anchor {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
|
||||
h5 a.anchor:hover, h6 a.anchor:hover {
|
||||
color: #777;
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
a.headerlink {
|
||||
color: #c60f0f!important;
|
||||
font-size: 1em;
|
||||
margin-left: 6px;
|
||||
padding: 0 4px 0 4px;
|
||||
text-decoration: none!important;
|
||||
}
|
||||
|
||||
a.headerlink:hover {
|
||||
background-color: #ccc;
|
||||
color: white!important;
|
||||
}
|
||||
|
||||
cite, code, tt {
|
||||
font-family: 'Consolas', 'Deja Vu Sans Mono',
|
||||
'Bitstream Vera Sans Mono', monospace;
|
||||
font-size: 0.95em;
|
||||
letter-spacing: 0.01em;
|
||||
}
|
||||
|
||||
tt {
|
||||
background-color: #f2f2f2;
|
||||
border-bottom: 1px solid #ddd;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
tt.descname, tt.descclassname, tt.xref {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
hr {
|
||||
border: 1px solid #abc;
|
||||
margin: 2em;
|
||||
}
|
||||
|
||||
a tt {
|
||||
border: 0;
|
||||
color: #CA7900;
|
||||
}
|
||||
|
||||
a tt:hover {
|
||||
color: #2491CF;
|
||||
}
|
||||
|
||||
pre {
|
||||
font-family: 'Consolas', 'Deja Vu Sans Mono',
|
||||
'Bitstream Vera Sans Mono', monospace;
|
||||
font-size: 0.95em;
|
||||
letter-spacing: 0.015em;
|
||||
line-height: 120%;
|
||||
padding: 0.5em;
|
||||
background: #F5F7FA;
|
||||
border: 1px solid #ECEFF3;
|
||||
border-radius: 3px;
|
||||
|
||||
}
|
||||
|
||||
pre a {
|
||||
color: inherit;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
td.linenos pre {
|
||||
padding: 0.5em 0;
|
||||
}
|
||||
|
||||
div.quotebar {
|
||||
background-color: #f8f8f8;
|
||||
max-width: 250px;
|
||||
float: right;
|
||||
padding: 2px 7px;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
|
||||
div.topic {
|
||||
margin-top: 15px;
|
||||
padding: 15px;
|
||||
border-radius: 6px;
|
||||
background: #fbfbfc;
|
||||
text-shadow: 1px 1px 0 white;
|
||||
-moz-box-shadow: inset 2px 2px 7px #D3D8DD;
|
||||
-webkit-box-shadow: inset 2px 2px 7px #D3D8DD;
|
||||
box-shadow: inset 2px 2px 7px #D3D8DD;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
margin: 0 0.5em 0 0.5em;
|
||||
}
|
||||
|
||||
table td, table th {
|
||||
padding: 0.2em 0.5em 0.2em 0.5em;
|
||||
}
|
||||
|
||||
div.admonition, div.warning {
|
||||
font-size: 0.9em;
|
||||
margin: 1em 0 1em 0;
|
||||
border: 1px solid #86989B;
|
||||
background-color: #f7f7f7;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.admonition p, div.warning p {
|
||||
margin: 0.5em 1em 0.5em 1em;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.admonition pre, div.warning pre {
|
||||
margin: 0.4em 1em 0.4em 1em;
|
||||
}
|
||||
|
||||
div.admonition p.admonition-title,
|
||||
div.warning p.admonition-title {
|
||||
margin: 0;
|
||||
padding: 0.1em 0 0.1em 0.5em;
|
||||
color: white;
|
||||
border-bottom: 1px solid #86989B;
|
||||
font-weight: bold;
|
||||
background-color: #AFC1C4;
|
||||
}
|
||||
|
||||
div.warning {
|
||||
border: 1px solid #940000;
|
||||
}
|
||||
|
||||
div.warning p.admonition-title {
|
||||
background-color: #CF0000;
|
||||
border-bottom-color: #940000;
|
||||
}
|
||||
|
||||
div.admonition ul, div.admonition ol,
|
||||
div.warning ul, div.warning ol {
|
||||
margin: 0.1em 0.5em 0.5em 3em;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.versioninfo {
|
||||
margin: 1em 0 0 0;
|
||||
border: 1px solid #ccc;
|
||||
background-color: #DDEAF0;
|
||||
padding: 8px;
|
||||
line-height: 1.3em;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.viewcode-back {
|
||||
font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
|
||||
'Verdana', sans-serif;
|
||||
}
|
||||
|
||||
div.viewcode-block:target {
|
||||
background-color: #f4debf;
|
||||
border-top: 1px solid #ac9;
|
||||
border-bottom: 1px solid #ac9;
|
||||
}
|
9
fuel-library/_static/bootstrap.css
vendored
6
fuel-library/_static/bootstrap.js
vendored
Before Width: | Height: | Size: 9.9 KiB |
Before Width: | Height: | Size: 1.3 KiB |
Before Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 1.1 KiB |
Before Width: | Height: | Size: 1.1 KiB |
Before Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 8.6 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 4.7 KiB |
4
fuel-library/_static/jquery.js
vendored
@ -1,4 +0,0 @@
|
||||
<li class="dropdown">
|
||||
<a href="{{ pathto(master_doc) }}" class="dropdown-toggle" data-toggle="dropdown">{{ _('Site') }} <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu globaltoc">{{ toctree(maxdepth=1) }}</ul>
|
||||
</li>
|
@ -1,131 +0,0 @@
|
||||
{% extends "basic/layout.html" %}
|
||||
{% set script_files = script_files + ['_static/bootstrap.js'] %}
|
||||
{% set css_files = ['_static/bootstrap.css', '_static/bootstrap-sphinx.css', 'http://fonts.googleapis.com/css?family=PT+Sans:400,700,400italic', 'http://fonts.googleapis.com/css?family=PT+Sans+Narrow:400,700'] + css_files %}
|
||||
|
||||
{# Sidebar: Rework into our Boostrap nav section. #}
|
||||
{% macro navBar() %}
|
||||
<div id="navbar" class="navbar navbar-fixed-top">
|
||||
<div class="navbar-inner">
|
||||
<div class="container-fluid">
|
||||
<a class="brand" href="{{ pathto(master_doc) }}">{{ project|e }}</a>
|
||||
<span class="navbar-text pull-left"><b>{{ version|e }}</b></span>
|
||||
<ul class="nav">
|
||||
<li class="divider-vertical"></li>
|
||||
{% block sidebartoc %}
|
||||
{% include "globaltoc.html" %}
|
||||
{% include "localtoc.html" %}
|
||||
{% endblock %}
|
||||
{% block sidebarrel %}
|
||||
{% include "relations.html" %}
|
||||
{% endblock %}
|
||||
</ul>
|
||||
{% block sidebarsearch %}
|
||||
{% include "searchbox.html" %}
|
||||
{% endblock %}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endmacro %}
|
||||
|
||||
{%- block extrahead %}
|
||||
<script type="text/javascript">
|
||||
(function () {
|
||||
/**
|
||||
* Patch TOC list.
|
||||
*
|
||||
* Will mutate the underlying span to have a correct ul for nav.
|
||||
*
|
||||
* @param $span: Span containing nested UL's to mutate.
|
||||
* @param minLevel: Starting level for nested lists. (1: global, 2: local).
|
||||
*/
|
||||
var patchToc = function ($ul, minLevel) {
|
||||
var findA;
|
||||
|
||||
// Find all a "internal" tags, traversing recursively.
|
||||
findA = function ($elem, level) {
|
||||
var level = level || 0,
|
||||
$items = $elem.find("> li > a.internal, > ul, > li > ul");
|
||||
|
||||
// Iterate everything in order.
|
||||
$items.each(function (index, item) {
|
||||
var $item = $(item),
|
||||
tag = item.tagName.toLowerCase(),
|
||||
pad = 15 + ((level - minLevel) * 10);
|
||||
|
||||
if (tag === 'a' && level >= minLevel) {
|
||||
// Add to existing padding.
|
||||
$item.css('padding-left', pad + "px");
|
||||
console.log(level, $item, 'padding-left', pad + "px");
|
||||
} else if (tag === 'ul') {
|
||||
// Recurse.
|
||||
findA($item, level + 1);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
console.log("HERE");
|
||||
findA($ul);
|
||||
};
|
||||
|
||||
$(document).ready(function () {
|
||||
// Add styling, structure to TOC's.
|
||||
$(".dropdown-menu").each(function () {
|
||||
$(this).find("ul").each(function (index, item){
|
||||
var $item = $(item);
|
||||
$item.addClass('unstyled');
|
||||
});
|
||||
$(this).find("li").each(function () {
|
||||
$(this).parent().append(this);
|
||||
});
|
||||
});
|
||||
|
||||
// Patch in level.
|
||||
patchToc($("ul.globaltoc"), 2);
|
||||
patchToc($("ul.localtoc"), 2);
|
||||
|
||||
// Enable dropdown.
|
||||
$('.dropdown-toggle').dropdown();
|
||||
});
|
||||
}());
|
||||
</script>
|
||||
{% endblock %}
|
||||
|
||||
{% block header %}{{ navBar() }}{% endblock %}
|
||||
|
||||
{# Silence the sidebar's, relbar's #}
|
||||
{% block sidebar1 %}{% endblock %}
|
||||
{% block sidebar2 %}{% endblock %}
|
||||
{% block relbar1 %}{% endblock %}
|
||||
{% block relbar2 %}{% endblock %}
|
||||
|
||||
{%- block content %}
|
||||
<div class="container">
|
||||
{% block body %} {% endblock %}
|
||||
</div>
|
||||
{%- endblock %}
|
||||
|
||||
{%- block footer %}
|
||||
<footer class="footer">
|
||||
<div class="container">
|
||||
<p class="pull-right"><a href="#">Back to top</a></p>
|
||||
<p>
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}© <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}<br/>
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %}<br/>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}<br/>
|
||||
{%- endif %}
|
||||
{%- if show_sphinx %}
|
||||
{% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}<br/>
|
||||
{%- endif %}
|
||||
</p>
|
||||
</div>
|
||||
</footer>
|
||||
{%- endblock %}
|
||||
|
@ -1,5 +0,0 @@
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown">{{ _('Page') }} <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu localtoc">{{ toc }}</ul>
|
||||
<!--<span class="localtoc">{{ toc }}</span>-->
|
||||
</li>
|
@ -1,8 +0,0 @@
|
||||
{%- if prev %}
|
||||
<li><a href="{{ prev.link|e }}"
|
||||
title="{{ _('previous chapter') }}">{{ "«"|safe }} {{ prev.title }}</a></li>
|
||||
{%- endif %}
|
||||
{%- if next %}
|
||||
<li><a href="{{ next.link|e }}"
|
||||
title="{{ _('next chapter') }}">{{ next.title }} {{ "»"|safe }}</a></li>
|
||||
{%- endif %}
|
@ -1,7 +0,0 @@
|
||||
{%- if pagename != "search" %}
|
||||
<form class="navbar-search pull-right" style="margin-bottom:-3px;" action="{{ pathto('search') }}" method="get">
|
||||
<input type="text" name="q" class="search-query" placeholder="Search" />
|
||||
<input type="hidden" name="check_keywords" value="yes" />
|
||||
<input type="hidden" name="area" value="default" />
|
||||
</form>
|
||||
{%- endif %}
|
@ -1,4 +0,0 @@
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<li><a href="{{ pathto('_sources/' + sourcename, true)|e }}"
|
||||
rel="nofollow">{{ _('Source') }}</a></li>
|
||||
{%- endif %}
|
@ -1,25 +0,0 @@
|
||||
/*
|
||||
* bootstrap-sphinx.css
|
||||
* ~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* Sphinx stylesheet -- Twitter Bootstrap theme.
|
||||
* qwerty
|
||||
*/
|
||||
|
||||
body {
|
||||
padding-top: 52px;
|
||||
}
|
||||
|
||||
.navbar .brand {
|
||||
color: #FFF;
|
||||
text-shadow: #777 2px 2px 3px;
|
||||
}
|
||||
|
||||
{%- block sidebarlogo %}
|
||||
{%- if logo %}
|
||||
.navbar h3 a, .navbar .brand {
|
||||
background: transparent url("{{ logo }}") no-repeat 22px 3px;
|
||||
padding-left: 62px;
|
||||
}
|
||||
{%- endif %}
|
||||
{%- endblock %}
|
Before Width: | Height: | Size: 8.6 KiB |
Before Width: | Height: | Size: 14 KiB |
@ -1,5 +0,0 @@
|
||||
# Twitter Bootstrap Theme
|
||||
[theme]
|
||||
inherit = basic
|
||||
stylesheet = basic.css
|
||||
pygments_style = tango
|
@ -1,100 +0,0 @@
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions += ['sphinx.ext.inheritance_diagram', 'sphinxcontrib.blockdiag', 'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag', 'sphinxcontrib.nwdiag']
|
||||
|
||||
# The encoding of source files.
|
||||
source_encoding = 'utf-8-sig'
|
||||
#source_encoding = 'shift_jis'
|
||||
|
||||
# The language for content autogenerated by Sphinx.
|
||||
#language = 'en'
|
||||
#language = 'ja'
|
||||
|
||||
# The theme to use for HTML and HTML Help pages.
|
||||
#html_theme = 'default'
|
||||
#html_theme = 'sphinxdoc'
|
||||
#html_theme = 'scrolls'
|
||||
#html_theme = 'agogo'
|
||||
#html_theme = 'traditional'
|
||||
#html_theme = 'nature'
|
||||
#html_theme = 'haiku'
|
||||
|
||||
# If this is not the empty string, a 'Last updated on:' timestamp
|
||||
# is inserted at every page bottom, using the given strftime() format.
|
||||
# Default is '%b %d, %Y' (or a locale-dependent equivalent).
|
||||
html_last_updated_fmt = '%Y/%m/%d'
|
||||
|
||||
# Enable Antialiasing
|
||||
blockdiag_antialias = True
|
||||
acttdiag_antialias = True
|
||||
seqdiag_antialias = True
|
||||
nwdiag_antialias = True
|
||||
|
||||
extensions += ['rst2pdf.pdfbuilder']
|
||||
pdf_documents = [
|
||||
(master_doc, project, project, copyright),
|
||||
]
|
||||
pdf_stylesheets = ['b4', 'kerning']
|
||||
pdf_language = "en"
|
||||
# Mode for literal blocks wider than the frame. Can be
|
||||
# overflow, shrink or truncate
|
||||
pdf_fit_mode = "shrink"
|
||||
|
||||
# Section level that forces a break page.
|
||||
# For example: 1 means top-level sections start in a new page
|
||||
# 0 means disabled
|
||||
#pdf_break_level = 0
|
||||
|
||||
# When a section starts in a new page, force it to be 'even', 'odd',
|
||||
# or just use 'any'
|
||||
pdf_breakside = 'any'
|
||||
|
||||
# Insert footnotes where they are defined instead of
|
||||
# at the end.
|
||||
pdf_inline_footnotes = True
|
||||
|
||||
# verbosity level. 0 1 or 2
|
||||
pdf_verbosity = 0
|
||||
|
||||
# If false, no index is generated.
|
||||
pdf_use_index = True
|
||||
|
||||
# If false, no modindex is generated.
|
||||
pdf_use_modindex = True
|
||||
|
||||
# If false, no coverpage is generated.
|
||||
pdf_use_coverpage = True
|
||||
|
||||
# Name of the cover page template to use
|
||||
#pdf_cover_template = 'sphinxcover.tmpl'
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#pdf_appendices = []
|
||||
|
||||
# Enable experimental feature to split table cells. Use it
|
||||
# if you get "DelayedTable too big" errors
|
||||
#pdf_splittables = False
|
||||
|
||||
# Set the default DPI for images
|
||||
#pdf_default_dpi = 72
|
||||
|
||||
# Enable rst2pdf extension modules (default is only vectorpdf)
|
||||
# you need vectorpdf if you want to use sphinx's graphviz support
|
||||
#pdf_extensions = ['vectorpdf']
|
||||
|
||||
# Page template name for "regular" pages
|
||||
#pdf_page_template = 'cutePage'
|
||||
|
||||
# Show Table Of Contents at the beginning?
|
||||
pdf_use_toc = True
|
||||
|
||||
# How many levels deep should the table of contents be?
|
||||
pdf_toc_depth = 2
|
||||
|
||||
# Add section number to section references
|
||||
pdf_use_numbered_links = False
|
||||
|
||||
# Background images fitting mode
|
||||
pdf_fit_background_mode = 'scale'
|
||||
|
||||
# pdf_font_path = ['C:\\Windows\\Fonts\\', '/usr/share/fonts']
|
@ -1,253 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# "Fuel" documentation build configuration file, created by
|
||||
# sphinx-quickstart on Tue Sep 25 14:02:29 2012.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys, os
|
||||
|
||||
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
if on_rtd:
|
||||
extensions = ['sphinx.ext.autodoc']
|
||||
display_github = False
|
||||
else:
|
||||
extensions = ['sphinx.ext.autodoc','rst2pdf.pdfbuilder']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Fuel™ for OpenStack'
|
||||
copyright = u'2013, Mirantis'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '3.0.1'
|
||||
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '3.0.1'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'bootstrap'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
html_theme_path = ["_templates"]
|
||||
|
||||
html_add_permalinks = None
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = '_static\img\FUEL_small.png'
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
html_show_sphinx = False
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'fueldoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'fuel.tex', u'Fuel for OpenStack Documentation',
|
||||
u'Mirantis', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output --------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'fuel', u'Fuel™ for OpenStack Documentation',
|
||||
[u'Mirantis'], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output ------------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'fuel', u'Fuel for OpenStack Documentation',
|
||||
u'Mirantis', 'fuel', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
# -- Additional Settings -------------------------------------------------------
|
||||
execfile('./common_conf.py')
|
@ -1,17 +0,0 @@
|
||||
==============================
|
||||
Fuel for OpenStack: User Guide
|
||||
==============================
|
||||
|
||||
Table of contents
|
||||
=================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
pages/0010-package-contents
|
||||
pages/0020-introduction
|
||||
pages/0040-reference-architecture
|
||||
pages/0050-installation-instructions
|
||||
pages/0055-production-considerations
|
||||
pages/0060-frequently-asked-questions
|
||||
pages/0090-creating-fuel-pm-from-scratch
|
@ -1,199 +0,0 @@
|
||||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. pdf to make PDF files
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\fuel.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\fuel.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pdf" (
|
||||
%SPHINXBUILD% -b pdf %ALLSPHINXOPTS% %BUILDDIR%/pdf
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/pdf.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
@ -1,21 +0,0 @@
|
||||
OpenStack is a very versatile and flexible cloud management platform. By exposing
|
||||
its portfolio of cloud infrastructure services – compute, storage, networking
|
||||
and other core resources — through ReST APIs, it enables a wide range of control
|
||||
over these services, both from the perspective of an integrated Infrastructure
|
||||
as a Service (IaaS) controlled by applications, as well as automated
|
||||
manipulation of the infrastructure itself.
|
||||
|
||||
This architectural flexibility doesn’t set itself up magically; it asks you, the
|
||||
user and cloud administrator, to organize and manage a large array of
|
||||
configuration options. Consequently, getting the most out of your OpenStack
|
||||
cloud over time – in terms of flexibility, scalability, and manageability –
|
||||
requires a thoughtful combination of automation and configuration choices.
|
||||
|
||||
Mirantis Fuel for OpenStack was created to solve exactly this problem. This
|
||||
step-by-step guide takes you through this process of:
|
||||
|
||||
* Configuring OpenStack and its supporting components into a robust cloud architecture
|
||||
* Deploying that architecture through an effective, well-integrated automation
|
||||
package that sets up and maintains the components and their configurations
|
||||
* Providing access to a well-integrated, up-to-date set of components known to
|
||||
work together
|
@ -1,7 +0,0 @@
|
||||
Preface
|
||||
=======
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
.. include:: /pages/package-contents/0010-package-contents.rst
|
||||
|
@ -1,14 +0,0 @@
|
||||
.. _Introduction:
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
.. include:: /pages/introduction/0010-introduction.rst
|
||||
.. include:: /pages/introduction/0020-what-is-fuel.rst
|
||||
.. include:: /pages/introduction/0030-how-it-works.rst
|
||||
.. include:: /pages/introduction/0040-reference-topologies.rst
|
||||
.. include:: /pages/introduction/0050-supported-software.rst
|
||||
.. include:: /pages/introduction/0060-download-fuel.rst
|
||||
.. include:: /pages/introduction/0070-release-notes.rst
|
@ -1,17 +0,0 @@
|
||||
.. _Reference-Architecture:
|
||||
|
||||
Reference Architecture
|
||||
======================
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
.. include:: /pages/reference-architecture/0010-overview.rst
|
||||
.. include:: /pages/reference-architecture/0015-closer-look.rst
|
||||
.. include:: /pages/reference-architecture/0020-logical-setup.rst
|
||||
.. include:: /pages/reference-architecture/0030-cluster-sizing.rst
|
||||
.. include:: /pages/reference-architecture/0040-network-setup.rst
|
||||
.. include:: /pages/reference-architecture/0050-technical-considerations-overview.rst
|
||||
.. include:: /pages/reference-architecture/0060-quantum-vs-nova-network.rst
|
||||
.. include:: /pages/reference-architecture/0070-cinder-vs-nova-volume.rst
|
||||
.. include:: /pages/reference-architecture/0080-swift-notes.rst
|
||||
|
@ -1,18 +0,0 @@
|
||||
.. _Create-Cluster:
|
||||
|
||||
Create a multi-node OpenStack cluster using Fuel
|
||||
================================================
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
.. include:: /pages/installation-instructions/0000-preamble.rst
|
||||
.. include:: /pages/installation-instructions/0010-introduction.rst
|
||||
.. include:: /pages/installation-instructions/0015-before-you-start.rst
|
||||
.. include:: /pages/installation-instructions/0020-machines.rst
|
||||
.. include:: /pages/installation-instructions/0040-installing-configuring-puppet-master.rst
|
||||
.. include:: /pages/installation-instructions/0050-configuring-cobbler.rst
|
||||
.. include:: /pages/installation-instructions/0057-prepare-for-deployment.rst
|
||||
.. include:: /pages/installation-instructions/0060-understand-the-manifest.rst
|
||||
.. include:: /pages/installation-instructions/0070-orchestration.rst
|
||||
.. include:: /pages/installation-instructions/0080-testing-openstack.rst
|
||||
|
@ -1,11 +0,0 @@
|
||||
.. _Production:
|
||||
|
||||
Production Considerations
|
||||
=========================
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
.. include:: /pages/production-considerations/0010-introduction.rst
|
||||
.. include:: /pages/production-considerations/0015-sizing-hardware.rst
|
||||
.. include:: /pages/production-considerations/0020-deployment-pipeline.rst
|
||||
.. include:: /pages/production-considerations/0030-large-deployments.rst
|
@ -1,11 +0,0 @@
|
||||
.. _Production:
|
||||
|
||||
Advanced Configuration Topics
|
||||
=============================
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
.. include:: /pages/advanced-topics/0010-introduction.rst
|
||||
.. include:: /pages/advanced-topics/0020-custom-plug-ins.rst
|
||||
.. include:: /pages/advanced-topics/0030-quantum-HA.rst
|
||||
.. include:: /pages/advanced-topics/0040-bonding.rst
|
@ -1,15 +0,0 @@
|
||||
.. _FAQ:
|
||||
|
||||
FAQ (Frequently Asked Questions)
|
||||
================================
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
Known Issues and Workarounds
|
||||
----------------------------
|
||||
|
||||
.. include:: /pages/frequently-asked-questions/0010-rabbitmq.rst
|
||||
.. include:: /pages/frequently-asked-questions/0020-galera.rst
|
||||
.. include:: /pages/frequently-asked-questions/0070-common-technical-issues.rst
|
||||
.. include:: /pages/frequently-asked-questions/0080-other-questions.rst
|
||||
|
@ -1,11 +0,0 @@
|
||||
.. _Create-PM:
|
||||
|
||||
Appendix A: Creating Fuel-pm from scratch
|
||||
==========================================
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
.. include:: /pages/creating-fuel-pm/0010-creating-fuel-pm-from-scratch.rst
|
||||
.. include:: /pages/creating-fuel-pm/0045-configuring-fuel-pm.rst
|
||||
.. include:: /pages/creating-fuel-pm/0050-installing-configuring-cobbler.rst
|
||||
.. include:: /pages/creating-fuel-pm/0060-register-with-fuel.rst
|
@ -1,3 +0,0 @@
|
||||
This section explains how to perform tasks that go beyond a simple OpenStack
|
||||
cluster, from configuring OpenStack Networking for high-availability to adding
|
||||
your own custom components to your cluster using Fuel.
|
@ -1,457 +0,0 @@
|
||||
Adding and configuring custom services
|
||||
--------------------------------------
|
||||
|
||||
Fuel is designed to help you easily install a standard OpenStack cluster, but
|
||||
what if your cluster is not standard? What if you need services or components
|
||||
that are not included with the standard Fuel distribution? This document is
|
||||
designed to give you all of the information you need in order to add custom
|
||||
services and packages to a Fuel-deployed cluster.
|
||||
|
||||
Fuel usage scenarios and how they affect installation
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Two basic Fuel usage scenarios exist.
|
||||
|
||||
In the first scenario, a deployment engineer takes the Fuel ISO image, deploys
|
||||
the master node, makes necessary changes to configuration files, and then deploys
|
||||
OpenStack. In this scenario, each node gets a clean OpenStack installation.
|
||||
|
||||
In the second scenario, the master node and other nodes in the cluster have
|
||||
already been installed, and the deployment engineer has to deploy OpenStack to
|
||||
an existing configuration.
|
||||
|
||||
For the purposes of this discussion, the main difference between these two
|
||||
scenarios is that service in the second scenario may be using an operating system
|
||||
that has already been customized; for the clean install of the first scenario,
|
||||
any customizations have to be performed on-the-fly, as part of the deployment.
|
||||
|
||||
In most cases, best practices dictate that you deploy and test OpenStack first,
|
||||
and then add any custom services. Fuel works using puppet manifests, so the
|
||||
simplest way to install a new service is to edit the current site.pp file on the
|
||||
Puppet master machine and start an additional deployment paths on the target node.
|
||||
|
||||
While that is the ideal means for installing a new service or component, it's
|
||||
not an option in situations in which OpenStack actually requires the new service
|
||||
or component. For example, hardware drivers and management software often must
|
||||
be installed before OpenStack itself. You still, however, have the option to
|
||||
create a separate customized site.pp file and run a deployment pass before
|
||||
installing OpenStack. One advantage to this method is that any version mismatches
|
||||
between the component and OpenStack dependencies should be easy to isolate.
|
||||
|
||||
Finally, if this is not an option, you can inject a custom component installation
|
||||
into the existing fuel manifests. If you elect to go this route, you'll need to be
|
||||
aware of software source compatibility issues, as well as installation stages,
|
||||
component versions, incompatible dependencies, and declared resource names.
|
||||
|
||||
In short, simple custom component installation may be accomplished by editing the
|
||||
site.pp file, but more complex components should be added as new Fuel components.
|
||||
|
||||
Let's look at what you need to know.
|
||||
|
||||
Installing the new service along with Fuel
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When it comes to installing your new service or component alongside Fuel, you
|
||||
have several options. How you go about it depends on where in the process the
|
||||
component needs to be available. Let's look at each step and how it can impact
|
||||
your installation.
|
||||
|
||||
**Boot the master node**
|
||||
|
||||
In most cases, you will be installing the master node from the Fuel ISO. This is
|
||||
a semi-automatic step, and doesn't allow for any custom components. If for some
|
||||
reason you need to install a node at this level, you will need to use the manual
|
||||
Fuel installation procedure.
|
||||
|
||||
**Cobbler configuration**
|
||||
|
||||
If your customizations need to take place before the install of the operating
|
||||
system, or even as part of the operating system install, you can do them at this
|
||||
step. This is also where you would make customizations to other services. At this
|
||||
level, you are making changes to the operating system kickstart/pre-seed files,
|
||||
and may include any custom software source and components required to install
|
||||
the operating system for a node. Anything that needs to be installed before
|
||||
OpenStack should be configured during this step.
|
||||
|
||||
**OpenStack installation**
|
||||
|
||||
It is during this step that you perform any Puppet, Astute, or MCollective
|
||||
configuration. In most cases, this means customizing the Puppet site.pp file to
|
||||
add any custom components during the actual OpenStack installation.
|
||||
|
||||
This step actually includes several different stages. (In fact, Puppet stdlib
|
||||
defines several additional default stages that fuel does not use.) These stages
|
||||
include:
|
||||
|
||||
#. ``Puppetlabs-repo``. MCollective uses this stage to add the PuppetLabs
|
||||
repositories during operating system and Puppet deployment.
|
||||
|
||||
#. ``Openstack-custom-repo``. Additional repositories required by OpenStack
|
||||
are configured at this stage. Additionally, to avoid compatibility issues, the
|
||||
PuppetLabs repositories are switched off at this stage. As a general rule, it
|
||||
is a good idea to turn off any unnecessary software repositories defined for
|
||||
Operating System installation.
|
||||
|
||||
#. ``FUEL``. During this stage, Fuel performs any actions defined for the
|
||||
current Operating System.
|
||||
|
||||
#. ``Netconfig``. During this stage, Fuel performs all network configuration
|
||||
actions. This means that you should include any custom components that are
|
||||
related to the network in this stage.
|
||||
|
||||
#. ``Main``. The actual OpenStack installation process happens during this
|
||||
stage. Install any non-network-related components during this stage or after it.
|
||||
|
||||
**Post-OpenStack install**
|
||||
|
||||
At this point, OpenStack is installed. You may add any components you like at
|
||||
this point, as long as they don't break OpenStack itself.
|
||||
|
||||
Defining a new component
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In general, we recommend you follow these steps to define a new component:
|
||||
|
||||
#. **Custom stages. Optional.**
|
||||
|
||||
Declare a custom stage or stages to help Puppet understand the required
|
||||
installation sequence. Stages are special markers indicating the sequence of
|
||||
actions. Best practice is to use the input parameter ``Before`` for every stage,
|
||||
to help define the correct sequence. The default built-in stage is ``main``.
|
||||
Every Puppet action is automatically assigned to the ``main`` stage if no
|
||||
stage is explicitly specified for the action. However, because Fuel installs
|
||||
almost all of OpenStack during the main stage, custom stages may not help, so
|
||||
future plans include breaking the OpenStack installation to several stages.
|
||||
|
||||
Don't forget to take into account other existing stages; training several
|
||||
parallel sequences of stages increases the chances that Puppet will order them
|
||||
incorrectly if you do not explicitly specify the order.
|
||||
|
||||
*Example*::
|
||||
|
||||
stage {'Custom stage 1':
|
||||
before => Stage['Custom stage 2'],
|
||||
}
|
||||
stage {'Custom stage 2':
|
||||
before => Stage['main'],
|
||||
}
|
||||
|
||||
Note that there are several limitations to stages, and they should be used
|
||||
with caution and only with the simplest of classes. You can find more information
|
||||
here: http://docs.puppetlabs.com/puppet/2.7/reference/lang_run_stages.html.
|
||||
|
||||
#. **Custom repositories. Optional.**
|
||||
|
||||
If the custom component requires a custom software source, you may declare a
|
||||
new repository and add it during one of the early stages of the installation.
|
||||
|
||||
#. **Common variable definition**
|
||||
|
||||
It is a good idea to have all common variables defined in a single place.
|
||||
Unlike variables in many other languages, Puppet variables are actually
|
||||
constants, and may be assigned only once inside a given scope.
|
||||
|
||||
#. **OS and condition-dependent variable definition**
|
||||
|
||||
It is also a good idea to assign all common operating system or
|
||||
condition-dependent variables to a single location, preferably near the other
|
||||
common variables. Also, be sure to always use a default section when defining
|
||||
conditional operators.
|
||||
|
||||
*Example*::
|
||||
|
||||
case $::osfamily {
|
||||
# RedHat in most cases should work for CentOS and Fedora as well
|
||||
'RedHat': {
|
||||
# List of packages to get from URL/path.
|
||||
# Separate list should be defined for each separate URL!
|
||||
$custom_package_list_from_url = ['qpid-cpp-server-0.14-16.el6.x86_64.rpm']
|
||||
}
|
||||
'Debian': {
|
||||
# List of packages to get from URL/path.
|
||||
# Separate list should be defined for each separate URL!
|
||||
$custom_package_list_from_url = [ "qpidd_0.14-2_amd64.deb" ]
|
||||
}
|
||||
default: {
|
||||
fail("Module install_custom_package does not support ${::operatingsystem}")
|
||||
}
|
||||
}
|
||||
|
||||
#. **Define installation procedures for independent custom components as classes**
|
||||
|
||||
You can think of public classes as singleton collections, or simply as a named
|
||||
block of code with its own namespace. Each class should be defined only once,
|
||||
but every class may be used with different input variable sets. The best
|
||||
practice is to define a separate class for every component, define required
|
||||
sub-classes for sub-components, and include class-dependent required resources
|
||||
within the actual class/subclass.
|
||||
|
||||
*Example*::
|
||||
|
||||
class add_custom_service (
|
||||
# Input parameter definitions:
|
||||
# Name of the service to place behind HAProxy. Mandatory.
|
||||
# This name appears as a new HAProxy configuration block in
|
||||
# /etc/haproxy/haproxy.cfg.
|
||||
$service_name_in_haproxy_config,
|
||||
$custom_package_download_url,
|
||||
$custom_package_list_from_url,
|
||||
#The list of remaining input parameters
|
||||
...
|
||||
) {
|
||||
# HAProxy::params is a container class holding default parameters for the
|
||||
# haproxy class. It adds and populates the Global and Default sections in
|
||||
# /etc/haproxy/haproxy.cfg.
|
||||
# If you install a custom service over the already deployed HAProxy
|
||||
# configuration, it is probably better to comment out the following string:
|
||||
include haproxy::params
|
||||
#Class resources definitions:
|
||||
# Define the list of package names to be installed
|
||||
define install_custom_package_from_url (
|
||||
$custom_package_download_url,
|
||||
$package_provider = undef
|
||||
) {
|
||||
exec { "download-${name}" :
|
||||
command => "/usr/bin/wget -P/tmp ${custom_package_download_url}/${name}",
|
||||
creates => "/tmp/${name}",
|
||||
} ->
|
||||
install_custom_package { "${name}" :
|
||||
provider => $package_provider,
|
||||
source => "/tmp/${name}",
|
||||
}
|
||||
}
|
||||
define install_custom_package (
|
||||
$package_provider = undef,
|
||||
$package_source = undef
|
||||
) {
|
||||
package { "custom-${name}" :
|
||||
ensure => present,
|
||||
provider => $package_provider,
|
||||
source => $package_source
|
||||
}
|
||||
}
|
||||
|
||||
#Here we actually install all the packages from a single URL.
|
||||
if is_array($custom_package_list_from_url) {
|
||||
install_custom_package_from_url { $custom_package_list_from_url :
|
||||
provider => $package_provider,
|
||||
custom_package_download_url => $custom_package_download_url,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#. **Target nodes**
|
||||
|
||||
Every component should be explicitly assigned to a particular target node or nodes.
|
||||
To do that, declare the node or nodes within site.pp. When Puppet runs the
|
||||
manifest for each node, it compares each node definition with the name of the
|
||||
current hostname and applies only to classes assigned to the current node.
|
||||
Node definitions may include regular expressions. For example, you can apply
|
||||
the class 'add custom service' to all controller nodes with hostnames
|
||||
``fuel-controller-01`` to ``fuel-controller-xxx``, where xxx = any integer
|
||||
value using the following definition:
|
||||
|
||||
*Example*::
|
||||
|
||||
node /fuel-controller-[\d+]/ {
|
||||
include stdlib
|
||||
class { 'add_custom_service':
|
||||
stage => 'Custom stage 1',
|
||||
service_name_in_haproxy_config => $service_name_in_haproxy_config,
|
||||
custom_package_download_url => $custom_package_download_url,
|
||||
custom_package_list_from_url => $custom_package_list_from_url,
|
||||
}
|
||||
}
|
||||
|
||||
Fuel API Reference
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
**add_haproxy_service**
|
||||
Location: Top level
|
||||
|
||||
As the name suggests, this function enables you to create a new HAProxy service.
|
||||
The service is defined in the ``/etc/haproxy/haproxy.cfg`` file, and generally
|
||||
looks something like this::
|
||||
|
||||
listen keystone-2
|
||||
bind 10.0.74.253:35357
|
||||
bind 10.0.0.110:35357
|
||||
balance roundrobin
|
||||
option httplog
|
||||
server fuel-controller-01.example.com 10.0.0.101:35357 check
|
||||
server fuel-controller-02.example.com 10.0.0.102:35357 check
|
||||
|
||||
To accomplish this, you might create a Fuel statement such as::
|
||||
|
||||
add_haproxy_service { 'keystone-2' :
|
||||
order => 30,
|
||||
balancers => {'fuel-controller-01.example.com' => '10.0.0.101',
|
||||
'fuel-controller-02.example.com' => '10.0.0.102'},
|
||||
virtual_ips => {'10.0.74.253', '10.0.0.110'},
|
||||
port => '35357',
|
||||
haproxy_config_options => { 'option' => ['httplog'], 'balance' => 'roundrobin' },
|
||||
balancer_port => '35357',
|
||||
balancermember_options => 'check',
|
||||
mode => 'tcp',
|
||||
define_cookies => false,
|
||||
define_backend => false,
|
||||
collect_exported => false
|
||||
}
|
||||
|
||||
Let's look at how the command works.
|
||||
|
||||
**Usage:** ::
|
||||
|
||||
add_haproxy_service { '<SERVICE_NAME>' :
|
||||
order => $order,
|
||||
balancers => $balancers,
|
||||
virtual_ips => $virtual_ips,
|
||||
port => $port,
|
||||
haproxy_config_options => $haproxy_config_options,
|
||||
balancer_port => $balancer_port,
|
||||
balancermember_options => $balancermember_options,
|
||||
mode => $mode, #Optional. Default is 'tcp'.
|
||||
define_cookies => $define_cookies, #Optional. Default false.
|
||||
define_backend => $define_backend,#Optional. Default false.
|
||||
collect_exported => $collect_exported, #Optional. Default false.
|
||||
}
|
||||
|
||||
**Parameters:**
|
||||
|
||||
``<'Service name'>``
|
||||
|
||||
The name of the new HAProxy listener section. In our example it was ``keystone-2``.
|
||||
If you want to include an IP address or port in the listener name, you have the
|
||||
option to use a name such as::
|
||||
|
||||
'stats 0.0.0.0:9000 #Listen on all IP's on port 9000'
|
||||
|
||||
``order``
|
||||
|
||||
This parameter determines the order of the file fragments. It is optional, but
|
||||
we strongly recommend setting it manually. Fuel already has several different
|
||||
order values from 1 to 100 hardcoded for HAProxy configuration. So if your
|
||||
HAProxy configuration fragments appear in the wrong places in
|
||||
``/etc/haproxy/haproxy.cfg``, it is probably because of an incorrect order value.
|
||||
It is safe to set order values greater than 100 in order to place your custom
|
||||
configuration block at the end of ``haproxy.cfg``.
|
||||
|
||||
Puppet assembles configuration files from fragments. First it creates several
|
||||
configuration fragments and temporarily stores all of them as separate files.
|
||||
Every fragment has a name such as ``${order}-${fragment_name}``, so the order
|
||||
determines the number of the current fragment in the fragment sequence.
|
||||
After all the fragments are created, Puppet reads the fragment names and sorts
|
||||
them in ascending order, concatenating all the fragments in that order. So a
|
||||
fragment with a smaller order value always goes before all fragments with a
|
||||
greater order value.
|
||||
|
||||
The ``keystone-2`` fragment from the example above has ``order = 30`` so it's
|
||||
placed after the ``keystone-1`` section (``order = 20``) and the ``nova-api-1``
|
||||
section (``order = 40``).
|
||||
|
||||
``balancers``
|
||||
|
||||
Balancers (or **Backends** in HAProxy terms) are a hash of
|
||||
``{ "$::hostname" => $::ipaddress }`` values. The default is
|
||||
``{ "<current hostname>" => <current ipaddress> }``, but that value is set for
|
||||
compatability only, and may not work correctly in HA mode. Instead, the default
|
||||
for HA mode is to explicitly set the ``balancers`` as ::
|
||||
|
||||
Haproxy_service {
|
||||
balancers => $controller_internal_addresses
|
||||
}
|
||||
|
||||
which ``$controller_internal_addresses`` representing a hash of all the
|
||||
controllers with a corresponding internal IP address; this value is set in ``site.pp``.
|
||||
|
||||
So the ``balancers`` parameter is a list of HAProxy listener balance members
|
||||
(``hostnames``) with corresponding IP addresses. The following strings from the
|
||||
``keystone-2`` listener example represent balancers::
|
||||
|
||||
server fuel-controller-01.example.com 10.0.0.101:35357 check
|
||||
server fuel-controller-02.example.com 10.0.0.102:35357 check
|
||||
|
||||
Every key pair in the ``balancers`` hash adds a new string to the list of
|
||||
listener section balancers. Different options may be set for every string.
|
||||
|
||||
``virtual_ips``
|
||||
|
||||
This parameter represents an array of IP addresses (or **Frontends** in HAProxy
|
||||
terms) of the current listener. Every IP address in this array adds a new string
|
||||
to the bind section of the current listeners. The following strings from the
|
||||
``keystone-2`` listener example represent virtual IPs::
|
||||
|
||||
bind 10.0.74.253:35357
|
||||
bind 10.0.0.110:35357
|
||||
|
||||
``port``
|
||||
|
||||
This parameters specifies the frontend port for the listeners. Currently you must
|
||||
set the same port frontends. The following strings from the ``keystone-2``
|
||||
listener example represent the frontend port, where the port is 35357::
|
||||
|
||||
bind 10.0.74.253:35357
|
||||
bind 10.0.0.110:35357
|
||||
|
||||
``haproxy_config_options``
|
||||
|
||||
This parameter represents a hash of key pairs of HAProxy listener options in the
|
||||
form ``{ 'option name' => 'option value' }``. Every key pair from this hash
|
||||
adds a new string to the listener options.
|
||||
|
||||
Please note: Every HAProxy option may require a different input value type, such
|
||||
as strings or a list of multiple options per single string.
|
||||
|
||||
The '`keystone-2`` listener example has the
|
||||
``{ 'option' => ['httplog'], 'balance' => 'roundrobin' }`` option array and this
|
||||
array is represented as the following in the resulting ``/etc/haproxy/haproxy.cfg``: ::
|
||||
|
||||
balance roundrobin
|
||||
option httplog
|
||||
|
||||
``balancer_port``
|
||||
|
||||
This parameter represents the balancer (backend) port. By default, the
|
||||
``balancer_port`` is the same as the frontend ``port``. The following strings
|
||||
from the ``keystone-2`` listener example represent ``balancer_port``, where
|
||||
port is ``35357``::
|
||||
|
||||
server fuel-controller-01.example.com 10.0.0.101:35357 check
|
||||
server fuel-controller-02.example.com 10.0.0.102:35357 check
|
||||
|
||||
``balancermember_options``
|
||||
|
||||
This is a string of options added to each balancer (backend) member. The
|
||||
``keystone-2`` listener example has the single ``check`` option::
|
||||
|
||||
server fuel-controller-01.example.com 10.0.0.101:35357 check
|
||||
server fuel-controller-02.example.com 10.0.0.102:35357 check
|
||||
|
||||
``mode``
|
||||
|
||||
This optional parameter represents the HAProxy listener mode. The default value
|
||||
is ``tcp``, but Fuel writes ``mode http`` to the defaults section of
|
||||
``/etc/haproxy/haproxy.cfg``. You can set the same option via
|
||||
``haproxy_config_options``. A separate mode parameter is required to set some
|
||||
modes by default on every new listener addition. The ``keystone-2`` listener
|
||||
example has no ``mode`` option and so it works in the default Fuel-configured
|
||||
HTTP mode.
|
||||
|
||||
``define_cookies``
|
||||
|
||||
This optional boolean parameter is a Fuel-only feature. The default is ``false``,
|
||||
but if set to ``true``, Fuel directly adds ``cookie ${hostname}`` to every
|
||||
balance member (backend).
|
||||
|
||||
The ``keystone-2`` listener example has no ``define_cookies`` option. Typically,
|
||||
frontend cookies are added with ``haproxy_config_options`` and backend cookies
|
||||
with ``balancermember_options``.
|
||||
|
||||
``collect_exported``
|
||||
|
||||
This optional boolean parameter has a default value of ``false``. True means
|
||||
'collect exported @@balancermember resources' (when every balancermember node
|
||||
exports itself), while false means 'rely on the existing declared balancermember
|
||||
resources' (for when you know the full set of balancermembers in advance and use
|
||||
``haproxy::balancermember`` with array arguments, which allows you to deploy
|
||||
everything in one run).
|
@ -1,10 +0,0 @@
|
||||
OpenStack Networking HA
|
||||
-----------------------
|
||||
|
||||
Fuel introduces support for OpenStack Networking (formerly known as Quantum) in
|
||||
a high-availability configuration. To accomplish this, Fuel uses a combination
|
||||
of Pacemaker and Corosync to ensure that if the networking service goes down, it
|
||||
will be restarted, either on the existing node or on separate node.
|
||||
|
||||
This document explains how to configure these options in your own installation.
|
||||
|
@ -1,283 +0,0 @@
|
||||
L23network
|
||||
----------
|
||||
|
||||
NOTE: THIS DOCUMENT HAS NOT BEEN EDITED AND IS NOT READY FOR PUBLIC CONSUMPTION.
|
||||
|
||||
Puppet module for configuring network interfaces on 2nd and 3rd level (802.1q vlans, access ports, NIC-bonding, assign IP addresses, dhcp, and interfaces without IP addresses).
|
||||
|
||||
Can work together with Open vSwitch or standard linux way.
|
||||
|
||||
At this moment we support Centos 6.3 (RHEL6) and Ubuntu 12.04 or above.
|
||||
|
||||
|
||||
Usage
|
||||
^^^^^
|
||||
|
||||
Place this module at /etc/puppet/modules or on another path that contains your puppet modules.
|
||||
|
||||
Include L23network module and initialize it. I recommend to do it in an early stage::
|
||||
|
||||
#Network configuration
|
||||
stage {'netconfig':
|
||||
before => Stage['main'],
|
||||
}
|
||||
class {'l23network': stage=> 'netconfig'}
|
||||
|
||||
If you do not plan to use Open vSwitch -- you can disable it::
|
||||
|
||||
class {'l23network': use_ovs=>false, stage=> 'netconfig'}
|
||||
|
||||
|
||||
|
||||
|
||||
L2 network configuation (Open vSwitch only)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Current layout is:
|
||||
* *bridges* -- A "Bridge" is a virtual ethernet L2 switch. You can plug ports into it.
|
||||
* *ports* -- A Port is an interface you plug into the bridge (switch). It's a virtual. (virtual what?)
|
||||
* *interface* -- A physical implementation of port.
|
||||
|
||||
Then in your manifest you can either use the things as parameterized classes::
|
||||
|
||||
class {"l23network": }
|
||||
|
||||
l23network::l2::bridge{"br-mgmt": }
|
||||
l23network::l2::port{"eth0": bridge => "br-mgmt"}
|
||||
l23network::l2::port{"mmm0": bridge => "br-mgmt"}
|
||||
l23network::l2::port{"mmm1": bridge => "br-mgmt"}
|
||||
|
||||
l23network::l2::bridge{"br-ex": }
|
||||
l23network::l2::port{"eth0": bridge => "br-ex"}
|
||||
l23network::l2::port{"eth1": bridge => "br-ex", ifname_order_prefix='ovs'}
|
||||
l23network::l2::port{"eee0": bridge => "br-ex", skip_existing => true}
|
||||
l23network::l2::port{"eee1": bridge => "br-ex", type=>'internal'}
|
||||
|
||||
You can define type for the port. Port type can be
|
||||
'system', 'internal', 'tap', 'gre', 'ipsec_gre', 'capwap', 'patch', 'null'.
|
||||
If you do not define type for port (or define '') -- ovs-vsctl will have default behavior
|
||||
(see http://openvswitch.org/cgi-bin/ovsman.cgi?page=utilities%2Fovs-vsctl.8).
|
||||
|
||||
You can use *skip_existing* option if you do not want to interrupt configuration while adding an existing port or bridge.
|
||||
|
||||
|
||||
|
||||
L3 network configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
::
|
||||
|
||||
### Simple IP address definition, DHCP or address-less interfaces
|
||||
l23network::l3::ifconfig {"eth0": ipaddr=>'192.168.1.1/24'}
|
||||
l23network::l3::ifconfig {"xXxXxXx":
|
||||
interface => 'eth1',
|
||||
ipaddr => '192.168.2.1',
|
||||
netmask => '255.255.255.0'
|
||||
}
|
||||
l23network::l3::ifconfig {"eth2": ipaddr=>'dhcp'}
|
||||
l23network::l3::ifconfig {"eth3": ipaddr=>'none'}
|
||||
|
||||
Option *ipaddr* can contains IP address, 'dhcp', or 'none' string. In this example we describe configuration of 4 network interfaces:
|
||||
* Interface *eth0* have short CIDR-notated form of IP address definition.
|
||||
* Interface *eth1*
|
||||
* Interface *eth2* will be configured to use dhcp protocol.
|
||||
* Interface *eth3* will be configured as interface without IP address. Often you will need to create "master" interface for 802.1q vlans (in native linux implementation) or as slave interface for bonding.
|
||||
|
||||
CIDR-notated form of IP address has more priority, that classic *ipaddr* and *netmask* definition.
|
||||
If you omitted *natmask* and did not use CIDR-notated form -- default *netmask* value will be used as '255.255.255.0'.::
|
||||
|
||||
### Multiple IP addresses for one interface (aliases)
|
||||
|
||||
l23network::l3::ifconfig {"eth0":
|
||||
ipaddr => ['192.168.0.1/24', '192.168.1.1/24', '192.168.2.1/24']
|
||||
}
|
||||
|
||||
You can pass a list of CIDR-notated IP addresses to the *ipaddr* parameter to assign many IP addresses to one interface. This will create aliases (not subinterfaces). Array can contain one or more elements. ::
|
||||
|
||||
### UP and DOWN interface order
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr=>'192.168.1.1/24'
|
||||
}
|
||||
l23network::l3::ifconfig {"br-ex":
|
||||
ipaddr=>'192.168.10.1/24',
|
||||
ifname_order_prefix='ovs'
|
||||
}
|
||||
l23network::l3::ifconfig {"aaa0":
|
||||
ipaddr=>'192.168.20.1/24',
|
||||
ifname_order_prefix='zzz'
|
||||
}
|
||||
|
||||
Centos and Ubuntu (at startup OS) start and configure network interfaces in alphabetical order
|
||||
by interface configuration file names. In the example above we change configuration process order by *ifname_order_prefix* keyword. We will have this order::
|
||||
|
||||
ifcfg-eth1
|
||||
ifcfg-ovs-br-ex
|
||||
ifcfg-zzz-aaa0
|
||||
|
||||
And OS will configure interfaces br-ex and aaa0 after eth0::
|
||||
|
||||
### Default gateway
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr => '192.168.2.5/24',
|
||||
gateway => '192.168.2.1',
|
||||
check_by_ping => '8.8.8.8',
|
||||
check_by_ping_timeout => '30'
|
||||
}
|
||||
|
||||
In this example we define default *gateway* and options for waiting so that the network stays up.
|
||||
Parameter *check_by_ping* define IP address, that will be pinged. Puppet will be blocked for waiting response for *check_by_ping_timeout* seconds.
|
||||
Parameter *check_by_ping* can be IP address, 'gateway', or 'none' string for disabling checking.
|
||||
By default gateway will be pinged. ::
|
||||
|
||||
### DNS-specific options
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr => '192.168.2.5/24',
|
||||
dns_nameservers => ['8.8.8.8','8.8.4.4'],
|
||||
dns_search => ['aaa.com','bbb.com'],
|
||||
dns_domain => 'qqq.com'
|
||||
}
|
||||
|
||||
Also we can specify DNS nameservers, and search list that will be inserted (by resolvconf lib) to /etc/resolv.conf .
|
||||
Option *dns_domain* implemented only in Ubuntu. ::
|
||||
|
||||
### DHCP-specific options
|
||||
|
||||
l23network::l3::ifconfig {"eth2":
|
||||
ipaddr => 'dhcp',
|
||||
dhcp_hostname => 'compute312',
|
||||
dhcp_nowait => false,
|
||||
}
|
||||
|
||||
|
||||
|
||||
Bonding
|
||||
^^^^^^^
|
||||
|
||||
### Using standard linux bond (ifenslave)
|
||||
For bonding two interfaces you need to:
|
||||
* Specify these interfaces as interfaces without IP addresses
|
||||
* Specify that the interfaces depend on the master-bond-interface
|
||||
* Assign IP address to the master-bond-interface.
|
||||
* Specify bond-specific properties for master-bond-interface (if defaults are not suitable for you)
|
||||
|
||||
for example (defaults included)::
|
||||
|
||||
l23network::l3::ifconfig {'eth1': ipaddr=>'none', bond_master=>'bond0'} ->
|
||||
l23network::l3::ifconfig {'eth2': ipaddr=>'none', bond_master=>'bond0'} ->
|
||||
l23network::l3::ifconfig {'bond0':
|
||||
ipaddr => '192.168.232.1',
|
||||
netmask => '255.255.255.0',
|
||||
bond_mode => 0,
|
||||
bond_miimon => 100,
|
||||
bond_lacp_rate => 1,
|
||||
}
|
||||
|
||||
|
||||
More information about bonding network interfaces you can get in manuals for your operating system:
|
||||
* https://help.ubuntu.com/community/UbuntuBonding
|
||||
* http://wiki.centos.org/TipsAndTricks/BondingInterfaces
|
||||
|
||||
### Using Open vSwitch
|
||||
For bonding two interfaces you need:
|
||||
* Specify OVS bridge
|
||||
* Specify special resource "bond" and add it to bridge. Specify bond-specific parameters.
|
||||
* Assign IP address to the newly-created network interface (if needed).
|
||||
|
||||
In this example we add "eth1" and "eth2" interfaces to bridge "bridge0" as bond "bond1". ::
|
||||
|
||||
l23network::l2::bridge{'bridge0': } ->
|
||||
l23network::l2::bond{'bond1':
|
||||
bridge => 'bridge0',
|
||||
ports => ['eth1', 'eth2'],
|
||||
properties => [
|
||||
'lacp=active',
|
||||
'other_config:lacp-time=fast'
|
||||
],
|
||||
} ->
|
||||
l23network::l3::ifconfig {'bond1':
|
||||
ipaddr => '192.168.232.1',
|
||||
netmask => '255.255.255.0',
|
||||
}
|
||||
|
||||
Open vSwitch provides lot of parameters for different configurations.
|
||||
We can specify them in the "properties" option as a list of parameter=value
|
||||
(or parameter:key=value) strings.
|
||||
The most of them you can see in [open vSwitch documentation page](http://openvswitch.org/support/).
|
||||
|
||||
|
||||
|
||||
802.1q vlan access ports
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
### Using standard linux way
|
||||
We can use tagged vlans over ordinary network interfaces (or over bonds).
|
||||
L23networks support two variants of naming vlan interfaces:
|
||||
* *vlanXXX* -- 802.1q tag gives from the vlan interface name, but you need to specify
|
||||
parent interface name in the **vlandev** parameter.
|
||||
* *eth0.101* -- 802.1q tag and parent interface name gives from the vlan interface name
|
||||
|
||||
If you need to use 802.1q vlans over bonds -- you can use only the first variant.
|
||||
|
||||
In this example we can see both variants: ::
|
||||
|
||||
l23network::l3::ifconfig {'vlan6':
|
||||
ipaddr => '192.168.6.1',
|
||||
netmask => '255.255.255.0',
|
||||
vlandev => 'bond0',
|
||||
}
|
||||
l23network::l3::ifconfig {'vlan5':
|
||||
ipaddr => 'none',
|
||||
vlandev => 'bond0',
|
||||
}
|
||||
L23network:L3:Ifconfig['bond0'] -> L23network:L3:Ifconfig['vlan6'] -> L23network:L3:Ifconfig['vlan5']
|
||||
|
||||
l23network::l3::ifconfig {'eth0':
|
||||
ipaddr => '192.168.0.5',
|
||||
netmask => '255.255.255.0',
|
||||
gateway => '192.168.0.1',
|
||||
} ->
|
||||
l23network::l3::ifconfig {'eth0.101':
|
||||
ipaddr => '192.168.101.1',
|
||||
netmask => '255.255.255.0',
|
||||
} ->
|
||||
l23network::l3::ifconfig {'eth0.102':
|
||||
ipaddr => 'none',
|
||||
}
|
||||
|
||||
### Using Open vSwitch
|
||||
In the Open vSwitch all internal traffic is virtually tagged.
|
||||
For creating the 802.1q tagged access port you need to specify vlan tag when adding a port to a bridge.
|
||||
In this example we create two ports with tags 10 and 20, and assign an IP address to interface with tag 10::
|
||||
|
||||
l23network::l2::bridge{'bridge0': } ->
|
||||
l23network::l2::port{'vl10':
|
||||
bridge => 'bridge0',
|
||||
type => 'internal',
|
||||
port_properties => [
|
||||
'tag=10'
|
||||
],
|
||||
} ->
|
||||
l23network::l2::port{'vl20':
|
||||
bridge => 'bridge0',
|
||||
type => 'internal',
|
||||
port_properties => [
|
||||
'tag=20'
|
||||
],
|
||||
} ->
|
||||
l23network::l3::ifconfig {'vl10':
|
||||
ipaddr => '192.168.101.1/24',
|
||||
} ->
|
||||
l23network::l3::ifconfig {'vl20':
|
||||
ipaddr => 'none',
|
||||
}
|
||||
|
||||
Information about vlans in open vSwitch you can get in [open vSwitch documentation page](http://openvswitch.org/support/config-cookbooks/vlan-configuration-cookbook/).
|
||||
|
||||
**IMPORTANT:** You can't use vlan interface names like vlanXXX if you do not want double-tagging of your network traffic.
|
||||
|
||||
---
|
||||
When I began to write this module, I checked https://github.com/ekarlso/puppet-vswitch. Elcarso, big thanks...
|
||||
|
||||
|
@ -1,184 +0,0 @@
|
||||
Installing Puppet Master is a one-time procedure for the entire
|
||||
infrastructure. Once done, Puppet Master will act as a single point of
|
||||
control for all of your servers, and you will never have to return to
|
||||
these installation steps again.
|
||||
|
||||
Initial Setup
|
||||
-------------
|
||||
|
||||
On VirtualBox (https://www.virtualbox.org/wiki/Downloads), please create or make
|
||||
sure the following hostonly adapters exist and are configured correctly:
|
||||
|
||||
* VirtualBox -> File -> Preferences...
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet0)
|
||||
|
||||
::
|
||||
|
||||
IPv4 Address: 10.0.0.1
|
||||
IPv4 Network Mask: 255.255.255.0
|
||||
DHCP server: disabled
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet1):
|
||||
|
||||
IPv4 Address: 10.0.1.1
|
||||
|
||||
IPv4 Network Mask: 255.255.255.0
|
||||
|
||||
DHCP server: disabled
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet2) ::
|
||||
|
||||
IPv4 Address: 0.0.0.0
|
||||
|
||||
IPv4 Network Mask: 255.255.255.0
|
||||
|
||||
DHCP server: disabled
|
||||
|
||||
In this example, only the first two adapters will be used, but you can choose to
|
||||
use the third to handle your storage network traffic.
|
||||
|
||||
After creating these interfaces, reboot the host machine to make sure that
|
||||
DHCP isn't running in the background.
|
||||
|
||||
Installing on Windows isn't recommended, but if you're attempting it,
|
||||
you will also need to set up the IP address & network mask under
|
||||
Control Panel > Network and Internet > Network and Sharing Center for the
|
||||
Virtual HostOnly Network adapter.
|
||||
|
||||
Next, follow these steps to create the virtual hardware:
|
||||
|
||||
* Machine -> New
|
||||
|
||||
Name: fuel-pm
|
||||
|
||||
Type: Linux
|
||||
|
||||
Version: Red Hat (64 Bit)
|
||||
|
||||
Memory: 2048MB
|
||||
|
||||
* Machine -> Settings -> Network
|
||||
|
||||
* Adapter 1
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Host-only Adapter
|
||||
* Name: vboxnet0
|
||||
|
||||
* Adapter 2
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: eth0 (or whichever physical network has your internet connection)
|
||||
|
||||
It is important that host-only ``Adapter 1`` goes first, as Cobbler will use
|
||||
vboxnet0 for PXE, and VirtualBox boots from the LAN on the first available
|
||||
network adapter.
|
||||
|
||||
OS Installation
|
||||
---------------
|
||||
|
||||
Pick and download an Operating System image. This image will be used as the
|
||||
base OS for the Puppet Master node. These instructions assume that you are using
|
||||
`CentOS 6.4 <http://isoredirect.centos.org/centos/6/isos/x86_64/>`_.
|
||||
|
||||
**PLEASE NOTE**: These are the only operating systems on which Fuel 3.0 has
|
||||
been tested and certified. Using other operating systems can, and in many
|
||||
cases will, produce unpredictable results.
|
||||
|
||||
Mount the downloaded ISO to the machine's CD/DVD drive. In case of VirtualBox,
|
||||
mount it to the fuel-pm virtual machine:
|
||||
|
||||
* Machine -> Settings -> Storage -> CD/DVD Drive -> Choose a virtual CD/DVD
|
||||
disk file
|
||||
|
||||
Boot the server (or VM) from the CD/DVD drive and install the chosen OS.
|
||||
Be sure to choose the root password carefully.
|
||||
|
||||
Set up the eth0 interface. This interface will be used for communication between
|
||||
the Puppet Master and Puppet agents, as well as for Cobbler.
|
||||
|
||||
``# vi /etc/sysconfig/network-scripts/ifcfg-eth0``::
|
||||
|
||||
DEVICE="eth0"
|
||||
BOOTPROTO="static"
|
||||
IPADDR="10.0.0.100"
|
||||
NETMASK="255.255.255.0"
|
||||
ONBOOT="yes"
|
||||
TYPE="Ethernet"
|
||||
PEERDNS="no"
|
||||
|
||||
Apply network settings::
|
||||
|
||||
# /etc/sysconfig/network-scripts/ifup eth0
|
||||
|
||||
Set up the eth1 interface. This will be the public interface.
|
||||
|
||||
``# vi /etc/sysconfig/network-scripts/ifcfg-eth1``::
|
||||
|
||||
DEVICE="eth1"
|
||||
BOOTPROTO="dhcp"
|
||||
ONBOOT="no"
|
||||
TYPE="Ethernet"
|
||||
|
||||
Apply network settings
|
||||
|
||||
``# /etc/sysconfig/network-scripts/ifup eth1``
|
||||
|
||||
Add DNS for Internet hostnames resolution
|
||||
|
||||
``# vi /etc/resolv.conf``
|
||||
|
||||
Replace localdomain with your domain name, and replace 8.8.8.8 with your DNS IP.
|
||||
|
||||
Note: you can look up your DNS server on your host machine using
|
||||
``ipconfig /all`` on Windows, or using ``cat /etc/resolv.conf`` under Linux. ::
|
||||
|
||||
search localdomain
|
||||
nameserver 8.8.8.8
|
||||
|
||||
Check that a ping to your host machine works. This means that the management
|
||||
network segment is available
|
||||
|
||||
``# ping 10.0.0.1``
|
||||
|
||||
Now check to make sure that internet access is working properly::
|
||||
|
||||
``# ping google.com``
|
||||
|
||||
Next, set up the packages repository:
|
||||
|
||||
``# vi /etc/yum.repos.d/puppet.repo``::
|
||||
|
||||
[puppetlabs-dependencies]
|
||||
name=Puppet Labs Dependencies
|
||||
baseurl=http://yum.puppetlabs.com/el/$releasever/dependencies/$basearch/
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
|
||||
[puppetlabs]
|
||||
name=Puppet Labs Packages
|
||||
baseurl=http://yum.puppetlabs.com/el/$releasever/products/$basearch/
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
|
||||
Install Puppet Master
|
||||
|
||||
``# rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm``
|
||||
``# yum upgrade``
|
||||
``# yum install puppet-server-2.7.19``
|
||||
``# service puppetmaster start``
|
||||
``# chkconfig puppetmaster on``
|
||||
``# service iptables stop``
|
||||
``# chkconfig iptables off``
|
||||
|
||||
* Install PuppetDB::
|
||||
|
||||
# yum install puppetdb puppetdb-terminus
|
||||
# chkconfig puppetdb on
|
||||
|
||||
* Finally, make sure to turn off selinux::
|
||||
|
||||
# sed -i s/SELINUX=.*/SELINUX=disabled/ /etc/selinux/config
|
||||
# setenforce 0
|
@ -1,161 +0,0 @@
|
||||
.. _Configuring-Fuel-PM:
|
||||
|
||||
Configuring fuel-pm
|
||||
--------------------------------
|
||||
Once the installation is complete, you will need to finish the configuration to
|
||||
adjust for your own local values.
|
||||
|
||||
* Check network settings and connectivity and correct any errors:
|
||||
|
||||
* Check the hostname. Running ::
|
||||
|
||||
# hostname
|
||||
|
||||
should return ::
|
||||
|
||||
fuel-pm
|
||||
|
||||
If not, set the hostname:
|
||||
|
||||
``# vi /etc/sysconfig/network`` ::
|
||||
|
||||
HOSTNAME=fuel-pm
|
||||
|
||||
* Check the fully qualified hostname (FQDN) value. ::
|
||||
|
||||
# hostname -f
|
||||
|
||||
should return ::
|
||||
|
||||
fuel-pm.localdomain
|
||||
|
||||
If not, correct the ``/etc/resolv.conf`` file by replacing ``localdomain``
|
||||
below with your actual domain name, and ``8.8.8.8`` with your actual DNS server.
|
||||
|
||||
Note: you can look up your DNS server on your host machine using
|
||||
``ipconfig /all`` on Windows, or using ``cat /etc/resolv.conf`` under Linux ::
|
||||
|
||||
search localdomain
|
||||
nameserver 8.8.8.8
|
||||
|
||||
* Run ::
|
||||
|
||||
# hostname fuel-pm
|
||||
|
||||
or reboot to apply changes to the hostname.
|
||||
|
||||
|
||||
* Add the OpenStack hostnames to your domain. You can do this by actually
|
||||
adding them to DNS, or by simply editing the ``/etc/hosts`` file. In either
|
||||
case, replace ``localdomain`` with your domain name.
|
||||
|
||||
``# vi /etc/hosts``::
|
||||
|
||||
127.0.0.1 localhost
|
||||
10.0.0.100 fuel-pm.localdomain fuel-pm
|
||||
10.0.0.101 fuel-controller-01.localdomain fuel-controller-01
|
||||
10.0.0.102 fuel-controller-02.localdomain fuel-controller-02
|
||||
10.0.0.103 fuel-controller-03.localdomain fuel-controller-03
|
||||
10.0.0.110 fuel-compute-01.localdomain fuel-compute-01
|
||||
|
||||
Enabling Stored Configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Fuel's Puppet manifests call for storing exported resources in the
|
||||
Puppet database using PuppetDB, so the next step is to configure
|
||||
Puppet to use a technique called stored configuration.
|
||||
|
||||
* Configure Puppet Master to use storeconfigs:
|
||||
|
||||
``# vi /etc/puppet/puppet.conf`` and add following into the ``[master]`` section::
|
||||
|
||||
storeconfigs = true
|
||||
storeconfigs_backend = puppetdb
|
||||
|
||||
* Configure PuppetDB to use the correct hostname and port:
|
||||
|
||||
``# vi /etc/puppet/puppetdb.conf`` to create the ``puppetdb.conf`` file and
|
||||
add the following (replace ``localdomain`` with your domain name)::
|
||||
|
||||
[main]
|
||||
server = fuel-pm.localdomain
|
||||
port = 8081
|
||||
|
||||
* Configure Puppet Master's file server capability:
|
||||
|
||||
``# vi /etc/puppet/fileserver.conf`` and append the following lines::
|
||||
|
||||
[ssh_keys]
|
||||
path /var/lib/puppet/ssh_keys
|
||||
allow *
|
||||
|
||||
* Create a directory with keys, give it appropriate permissions, and generate
|
||||
the keys themselves::
|
||||
|
||||
# mkdir /var/lib/puppet/ssh_keys
|
||||
# cd /var/lib/puppet/ssh_keys
|
||||
# ssh-keygen -f openstack
|
||||
# chown -R puppet:puppet /var/lib/puppet/ssh_keys/
|
||||
|
||||
* Set up SSL for PuppetDB and restart the puppetmaster and puppetdb services::
|
||||
|
||||
# service puppetmaster restart
|
||||
# puppetdb-ssl-setup
|
||||
# service puppetmaster restart
|
||||
# service puppetdb restart
|
||||
|
||||
* Finally, if you are planning to install Cobbler on the Puppet Master node as
|
||||
well (as we are in this example), make configuration changes on the Puppet Master
|
||||
so that it actually knows how to provision software onto itself:
|
||||
|
||||
``# vi /etc/puppet/puppet.conf``::
|
||||
|
||||
[main]
|
||||
# server
|
||||
server = fuel-pm.localdomain
|
||||
|
||||
# enable plugin sync
|
||||
pluginsync = true
|
||||
|
||||
|
||||
**IMPORTANT**: Note that while these operations appear to finish quickly, it
|
||||
can actually take several minutes for puppetdb to complete its startup process.
|
||||
You'll know it has finished starting up when you can successfully telnet to port
|
||||
8081::
|
||||
|
||||
# yum install telnet
|
||||
# telnet fuel-pm.localdomain 8081
|
||||
|
||||
Testing Puppet
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Add a simple configuration to Puppet so that when you run puppet on various nodes,
|
||||
it will display a "Hello world" message:
|
||||
|
||||
``# vi /etc/puppet/manifests/site.pp``::
|
||||
|
||||
node /fuel-pm.localdomain/ {
|
||||
notify{"Hello world from fuel-pm": }
|
||||
}
|
||||
|
||||
Finally, to make sure everything is working properly, run puppet agent
|
||||
and to see the ``Hello World from fuel-pm`` output::
|
||||
|
||||
# puppet agent --test
|
||||
|
||||
Troubleshooting PuppetDB and SSL
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The first time you run puppet, its not unusual to have difficulties
|
||||
with the SSL setup. If so, remove the original files and start again,
|
||||
like so::
|
||||
|
||||
# service puppetmaster stop
|
||||
# service puppetdb stop
|
||||
# rm -rf /etc/puppetdb/ssl
|
||||
# puppetdb-ssl-setup
|
||||
# service puppetdb start
|
||||
# service puppetmaster start
|
||||
|
||||
Again, remember that it may take several minutes before puppetdb is
|
||||
fully up and running, despite appearances to the contrary.
|
@ -1,160 +0,0 @@
|
||||
Installing Fuel and Cobbler
|
||||
--------------------------------
|
||||
|
||||
Cobbler performs bare metal provisioning and initial installation of
|
||||
Linux on OpenStack nodes. Luckily, you already have a Puppet Master
|
||||
installed and Fuel includes instructions for installing Cobbler, so
|
||||
you can install Cobbler using Puppet in a few seconds, rather than
|
||||
doing it manually.
|
||||
|
||||
Installing Fuel
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Installing Fuel is a simple matter of copying the complete Fuel
|
||||
package to fuel-pm and unpacking it in the proper location in order to
|
||||
supply Fuel manifests to Puppet::
|
||||
|
||||
# tar -xzf <fuel-archive-name>.tar.gz
|
||||
# cd <fuel-archive-name>
|
||||
# cp -Rf deployment/puppet/* /etc/puppet/modules/
|
||||
# service puppetmaster restart
|
||||
|
||||
From here, using Fuel is a matter of making sure it has the
|
||||
appropriate site.pp file from the Fuel distribution.
|
||||
|
||||
Using Puppet to install Cobbler
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
On fuel-pm, copy the contents of
|
||||
``<FUEL_DIR>/deployment/puppet/cobbler/examples/site.pp`` into your existing
|
||||
``/etc/puppet/manifests/site.pp`` file. The file has its own documentation, so
|
||||
it's a good idea to look through it to get a feel for the big picture and
|
||||
understand what's going on. The general idea is that this file sets
|
||||
certain parameters such as networking information, then defines the OS
|
||||
distributions Cobbler will serve so they can be imported into Cobbler
|
||||
as it's installed.
|
||||
|
||||
Lets take a look at some of the major points, and highlight where you
|
||||
will need to make changes::
|
||||
|
||||
...
|
||||
# [server] IP address that will be used as address of cobbler server.
|
||||
# It is needed to download kickstart files, call cobbler API and
|
||||
# so on. Required.
|
||||
$server = '10.0.0.100'
|
||||
|
||||
This, remember, is the fuel-pm server, which is acting as both the
|
||||
Puppet Master and Cobbler servers. ::
|
||||
|
||||
# Interface for cobbler instances
|
||||
$dhcp_interface = 'eth0'
|
||||
|
||||
The Cobbler instance needs to provide DHCP to each of the new nodes,
|
||||
so you will need to specify which interface will handle that. ::
|
||||
|
||||
$dhcp_start_address = '10.0.0.110'
|
||||
$dhcp_end_address = '10.0.0.126'
|
||||
|
||||
Change the ``$dhcp_start_address`` and ``$dhcp_end_address`` to match the network
|
||||
allocations you made earlier. The important thing is to make sure there are no
|
||||
conflicts with the static IPs you are allocating. ::
|
||||
|
||||
$dhcp_netmask = '255.255.255.0'
|
||||
$dhcp_gateway = '10.0.0.100'
|
||||
$domain_name = 'localdomain'
|
||||
|
||||
Change the ``$domain_name`` to your own domain name. ::
|
||||
|
||||
$name_server = '10.0.0.100'
|
||||
$next_server = '10.0.0.100'
|
||||
$cobbler_user = 'cobbler'
|
||||
$cobbler_password = 'cobbler'
|
||||
$pxetimeout = '0'
|
||||
|
||||
# Predefined mirror type to use: custom or default (should be removed soon)
|
||||
$mirror_type = 'default'
|
||||
|
||||
**Set the ``$mirror_type`` to be ``default``** so Fuel knows to request
|
||||
resources and packages from Internet sources rather than having to set up your
|
||||
own internal repositories.
|
||||
|
||||
The next step is to define the node itself, and the distributions it will serve. ::
|
||||
|
||||
...
|
||||
type => $mirror_type,
|
||||
}
|
||||
|
||||
node fuel-pm{
|
||||
|
||||
class {'cobbler::nat': nat_range => $nat_range}
|
||||
...
|
||||
|
||||
|
||||
The file assumes that you're installing Cobbler on a separate machine.
|
||||
Since you're installing it on fuel-pm, change the node name here.
|
||||
|
||||
Next, you will need to uncomment the required OS distributions so that
|
||||
they can be downloaded and imported into Cobbler during Cobbler
|
||||
installation.
|
||||
|
||||
In this example we'll focus on CentOS, so uncomment these lines and
|
||||
change the location of ISO image files to either a local mirror or the
|
||||
fastest available Internet mirror for ``CentOS-6.4-x86_64-minimal.iso``::
|
||||
|
||||
...
|
||||
# CentOS distribution
|
||||
# Uncomment the following section if you want CentOS image to be downloaded and imported into Cobbler
|
||||
# Replace "http://address/of" with valid hostname and path to the mirror where the image is stored
|
||||
|
||||
Class[cobbler::distro::centos64_x86_64] ->
|
||||
Class[cobbler::profile::centos64_x86_64]
|
||||
|
||||
class { cobbler::distro::centos64_x86_64:
|
||||
http_iso => "http://address/of/CentOS-6.4-x86_64-minimal.iso",
|
||||
ks_url => "cobbler",
|
||||
require => Class[cobbler],
|
||||
}
|
||||
|
||||
class { cobbler::profile::centos64_x86_64: }
|
||||
|
||||
# Ubuntu distribution
|
||||
# Uncomment the following section if you want Ubuntu image to be downloaded and imported into Cobbler
|
||||
# Replace "http://address/of" with valid hostname and path to the mirror where the image is stored
|
||||
...
|
||||
|
||||
|
||||
|
||||
If you want Cobbler to serve RedHat distribution in addition to CentOS, perform
|
||||
the same actions for those sections.
|
||||
|
||||
With those changes in place, Puppet knows that Cobbler must be
|
||||
installed on the fuel-pm machine, and will also add the right distro and profile.
|
||||
The CentOS image will be downloaded from the mirror and imported into Cobbler as
|
||||
well.
|
||||
|
||||
Note that while we've set up the network so that external resources are
|
||||
accessed through the 10.0.1.0/24 network, this configuration includes
|
||||
Puppet commands to configure forwarding on the Cobbler node to make
|
||||
external resources available via the 10.0.0.0/24 network, which is used
|
||||
during the installation process (see enable_nat_all and
|
||||
enable_nat_filter).
|
||||
|
||||
Finally, run the puppet agent to actually install Cobbler on fuel-pm::
|
||||
|
||||
# puppet agent --test
|
||||
|
||||
Testing cobbler
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
You can check that Cobbler is installed successfully by opening the
|
||||
following URL from your host machine:
|
||||
|
||||
http://fuel-pm/cobbler_web/ (u: cobbler, p: cobbler)
|
||||
|
||||
If fuel-pm doesn't resolve on your host machine, you can access the
|
||||
Cobbler dashboard from:
|
||||
|
||||
http://10.0.0.100/cobbler_web
|
||||
|
||||
At this point you should have a fully working instance of Cobbler,
|
||||
fully configured and capable of installing the OS on the target OpenStack nodes.
|
@ -1,10 +0,0 @@
|
||||
Preparing for node deployment
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
At this point you're almost ready to let Fuel work its magic.
|
||||
Copy the contents of ``<FUEL_DIR>/deployment/puppet/astute`` to the
|
||||
``/root`` directory, then change to the ``/root`` directory.
|
||||
|
||||
At this point, you've performed the configuration actions normally
|
||||
handled by the Fuel ISO. To continue installation, proceed to
|
||||
:ref:`Installing the OS using Fuel <Install-OS-Using-Fuel>`.
|
@ -1,4 +0,0 @@
|
||||
.. include:: /pages/creating-fuel-pm/0010-creating-fuel-pm-from-scratch.rst
|
||||
.. include:: /pages/creating-fuel-pm/0045-configuring-fuel-pm.rst
|
||||
.. include:: /pages/creating-fuel-pm/0050-installing-configuring-cobbler.rst
|
||||
.. include:: /pages/creating-fuel-pm/0060-register-with-fuel.rst
|
@ -1,6 +0,0 @@
|
||||
Known Issues and Workarounds
|
||||
----------------------------
|
||||
|
||||
.. include:: /pages/frequently-asked-questions/0010-rabbitmq.rst
|
||||
.. include:: /pages/frequently-asked-questions/0020-galera.rst
|
||||
|
@ -1,66 +0,0 @@
|
||||
|
||||
RabbitMQ
|
||||
^^^^^^^^
|
||||
|
||||
**Difficulty restarting RabbitMQ after a total failure**
|
||||
|
||||
|
||||
**Issue:**
|
||||
In general, all RabbitMQ nodes must not be shut down simultaneously. RabbitMQ requires
|
||||
that after a full shutdown of the cluster, the first node brought up should
|
||||
be the last one to shut down, but it's not always possible to know which node that is, or even to ensure a clean shutdown. Version 2.1 of Fuel solves this problem by managing the restart of
|
||||
available nodes, so you should not experience difficulty with this issue.
|
||||
|
||||
If, however, you are still using previous versions of Fuel, here is how Fuel 2.1 works around this problem in case you need to do it yourself.
|
||||
|
||||
**Workaround:**
|
||||
There are 2 possible scenarios, depending on the results of the shutdown:
|
||||
|
||||
#. The RabbitMQ master node is alive and can be started.
|
||||
#. It's impossible to start the RabbitMQ master node due to a hardware or system failure
|
||||
|
||||
Fuel 2.1 updates the ``/etc/init.d/rabbitmq-server`` init scripts for RHEL/Centos and Ubuntu to customized versions. These scripts attempt to start RabbitMQ twice, giving the RabbitMQ master node the necessary time to start after complete power loss.
|
||||
|
||||
With the scripts in place, power up all nodes, then check to see whether the RabbitMQ server started on all nodes. All nodes should start automatically.
|
||||
|
||||
On the other hand, if the RabbitMQ master node has failed, the init script performs the following actions during the rabbitmq-server start. It moves the existing Mnesia database to a backup directory, and then makes a third and last attempt to start the RabbitMQ server. In this case, RabbitMQ starts with clean database, and the live rabbit nodes assemble a new cluster. The script uses the current RabbitMQ settings to find the current Mnesia location and creates a backup directory in the same path as Mnesia, tagged with the current date.
|
||||
|
||||
So with the customized init scripts included in Fuel 2.1, in most cases RabbitMQ simply starts after complete power loss and automatically assembles the cluster, but you can manage the process yourself.
|
||||
|
||||
|
||||
**Background:** See http://comments.gmane.org/gmane.comp.networking.rabbitmq.general/19792.
|
||||
|
||||
.. _https://launchpad.net/galera: https://launchpad.net/galera
|
||||
.. _CentOS 6.3: http://isoredirect.centos.org/centos/6/isos/x86_64/
|
||||
.. _http://wiki.vps.net/vps-net-features/cloud-servers/template-information/galeramysql-recommended-cluster-configuration/: http://wiki.vps.net/vps-net-features/cloud-servers/template-information/galeramysql-recommended-cluster-configuration/
|
||||
.. _http://comments.gmane.org/gmane.comp.networking.rabbitmq.general/19792: http://comments.gmane.org/gmane.comp.networking.rabbitmq.general/19792
|
||||
.. _http://puppetlabs.com/blog/a-deployment-pipeline-for-infrastructure/: http://puppetlabs.com/blog/a-deployment-pipeline-for-infrastructure/
|
||||
.. _http://download.mirantis.com/epel-fuel/: http://download.mirantis.com/epel-fuel/
|
||||
.. _Creating the virtual machines: http://#
|
||||
.. _http://projects.reductivelabs.com/issues/2244: http://projects.reductivelabs.com/issues/2244
|
||||
.. _https://bugs.launchpad.net/codership-mysql/+bug/1087368: https://bugs.launchpad.net/codership-mysql/+bug/1087368
|
||||
.. _https://groups.google.com/forum/?fromgroups=#!topic/puppet-users/OpCBjV1nR2M: https://groups.google.com/forum/?fromgroups=#!topic/puppet-users/OpCBjV1nR2M
|
||||
.. _https://www.virtualbox.org/wiki/Downloads: https://www.virtualbox.org/wiki/Downloads
|
||||
.. _Overview: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id8
|
||||
.. _Environments: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id9
|
||||
.. _Useful links: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id6
|
||||
.. _The process of redeploying the same environment: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id7
|
||||
.. _Galera cluster has no built-in restart or shutdown mechanism: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id4
|
||||
.. _The right way to get Galera up and working: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id5
|
||||
.. _At least one RabbitMQ node must remain operational: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id2
|
||||
.. _Galera: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id3
|
||||
.. _RabbitMQ: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id1
|
||||
.. _http://docs.puppetlabs.com/guides/environment.html: http://docs.puppetlabs.com/guides/environment.html
|
||||
.. _Deployment pipeline: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id10
|
||||
.. _Links: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/known-issues-and-workarounds/#id11
|
||||
.. _http://10.0.1.10/: http://10.0.1.10/
|
||||
.. _contact Mirantis for further assistance: http://www.mirantis.com/
|
||||
.. _https://launchpad.net/codership-mysql: https://launchpad.net/codership-mysql
|
||||
.. _http://projects.puppetlabs.com/issues/4680: http://projects.puppetlabs.com/issues/4680
|
||||
.. _http://www.codership.com/wiki/doku.php: http://www.codership.com/wiki/doku.php
|
||||
.. _http://projects.puppetlabs.com/issues/3234: http://projects.puppetlabs.com/issues/3234
|
||||
.. _Enabling Stored Configuration: http://fuel.mirantis.com/reference-documentation-on-fuel-folsom/installing-configuring-puppet-master-2/#puppet-master-stored-config
|
||||
.. _http://openlife.cc/blogs/2011/july/ultimate-mysql-high-availability-solution: http://openlife.cc/blogs/2011/july/ultimate-mysql-high-availability-solution
|
||||
.. _http://www.google.com: http://www.google.com/
|
||||
|
||||
|
@ -1,267 +0,0 @@
|
||||
|
||||
Galera cluster has no built-in restart or shutdown mechanism
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
**Issue:**
|
||||
A Galera cluster cannot be simply started or stopped. It is designed to work continuously.
|
||||
|
||||
**Workaround:**
|
||||
|
||||
Galera, as high availability software, does not include any built-in full cluster shutdown or restart sequence. It is supposed to be running on a 24/7/365 basis.
|
||||
|
||||
On the other hand, deploying, updating or restarting Galera may lead to different issues.
|
||||
This guide is intended to help avoid some of these issues.
|
||||
|
||||
Regular Galera cluster startup includes a combination of the procedures described below.
|
||||
These procedures, with some differences, are performed by Fuel manifests.
|
||||
|
||||
|
||||
**Stopping a single Galera node**
|
||||
|
||||
There is no dedicated Galera process - Galera works inside the MySQL server process. The
|
||||
MySQL server should be patched with Galera WSREP patch to be able to work as Galera cluster.
|
||||
|
||||
All Galera stop steps listed below are automatically performed by the mysql init script
|
||||
supplied by Fuel installation manifests, so in most cases it should be enough to perform the first step only.
|
||||
In case even init script fails in some (rare, as we hope) circumstances, repeat step 2 manually.
|
||||
|
||||
#. Run ``service mysql stop``.
|
||||
Wait 15-30 seconds to ensure all MySQL processes are shut down.
|
||||
|
||||
|
||||
#. Run ``ps -ef | grep mysql`` and stop ALL(!) **mysqld** and **mysqld_safe** processes.
|
||||
* Wait 20 seconds and run ``ps -ef | grep mysql`` again to see if any mysqld processes have restarted.
|
||||
* Stop or kill any new mysqld or mysqld_safe processes.
|
||||
|
||||
It is very important to stop all MySQL processes. Galera uses ``mysqld_safe`` and it may start additional MySQL processes. So even if you don't immediately see any running processes, additional processes may be already starting. That is why we check running processes twice. ``mysqld_safe`` has a default timeout 15 seconds before processes restart. If, after that time, ``mysqld`` processes are running, the node may be considered shut down.
|
||||
|
||||
If there was nothing to kill and all MySQL processes stopped after the ``service mysql stop`` command, the node may be considered shut down gracefully.
|
||||
|
||||
|
||||
**Stop the Galera cluster**
|
||||
|
||||
A Galera cluster is a master-master replication cluster. Therefore, it is always in the process of synchronization.
|
||||
|
||||
The recommended way to stop the cluster involves the following steps:
|
||||
|
||||
#. Stop all requests to the cluster from outside. Under heavy load, a default Galera non-synchronized cache may be up to 1 Gb; you may have to wait until every node is fully synced to shut the cluster down.
|
||||
|
||||
#. Select the first node to shut down. In general, it's better to start with the non-primary nodes. Connect to this node with the mysql console.
|
||||
|
||||
#. Run ``show status like 'wsrep_local_state%';``
|
||||
|
||||
If it is "Synced", then you may start the shutdown node procedure.
|
||||
|
||||
If the node is non-synchronized, you may still shut it down, but make sure you don't start a new cluster operation from this node in the future.
|
||||
|
||||
#. In mysql console, run the following command::
|
||||
|
||||
SET GLOBAL wsrep_on='OFF';
|
||||
|
||||
Replication stops immediately after the ``wsrep_on`` variable is set to "OFF", so avoid making any changes to the node after this changing this setting.
|
||||
|
||||
#. Exit from the mysql console.
|
||||
|
||||
#. Follow the steps described in `Stopping a single Galera node` to stop the node altogether.
|
||||
|
||||
|
||||
Repeat these instructions for each remaining node in the cluster.
|
||||
|
||||
Remember which node you are going to shut down last -- ideally, it should be the primary node in the synced state. This is the node you should start first when you decide to continue cluster operation.
|
||||
|
||||
|
||||
**Starting Galera and creating a new cluster**
|
||||
|
||||
Galera writes its state to file the file ``grastate.dat``, residing in the location specified in the
|
||||
``wsrep_data_home_dir`` variable. This variable defaults to ``mysql_real_data_home``, and Fuel OpenStack deployment manifests use this default location, creating the file at ``/var/lib/mysql/grastate.dat``.
|
||||
|
||||
In the case of an unexpected cluster shutdown, this file can be useful for finding the node with the most recent commit.
|
||||
Simply compare the "UUID" values of ``grastat.dat`` from every node. The greater "UUID" value indicates which node has the latest commit.
|
||||
|
||||
If the cluster was shut down gracefully and last shut down node is known, simply perform the steps below to start up the cluster. Alternatively, you can find the node with the most recent commit using the ``grastat.dat`` files
|
||||
and start the cluster operation from that node.
|
||||
|
||||
#. Ensure that all Galera nodes are shut down.
|
||||
|
||||
Any running nodes will be outside the new cluster untill restart, which could affect data integrity.
|
||||
|
||||
#. Select the primary node.
|
||||
|
||||
This node is supposed to start first. It creates a new cluster ID and a new last commit UUID
|
||||
(the ``wsrep_cluster_state_uuid`` variable represents this UUID inside the MySQL process).
|
||||
Fuel deployment manifests with default settings set up ``fuel-controller-01`` to be both the primary Galera cluster node and the first deployed OpenStack controller.
|
||||
* Open ``/etc/mysql/conf.d/wsrep.cnf``
|
||||
* Set empty cluster address as follows (including quotation marks):
|
||||
|
||||
``wsrep_cluster_address="gcomm://"``
|
||||
|
||||
* Save changes to the config file.
|
||||
|
||||
#. Run the ``service mysql start`` command on the first primary node or restart MySQL
|
||||
if there were configuration changes to ``wsrep.cnf``.
|
||||
|
||||
* Connect to MySQL server.
|
||||
|
||||
* Run the ``SET GLOBAL wsrep_on='ON';`` to start replication within the new cluster. This variable can also be set by editing the ``wsrep.cnf`` file.
|
||||
|
||||
* Check the new cluster status by running the following command: ``show status like 'wsrep%';``
|
||||
|
||||
* ``wsrep_local_state_comment`` should be "Synced"
|
||||
|
||||
* ``wsrep_cluster_status`` should be "Primary"
|
||||
|
||||
* ``wsrep_cluster_size`` should be "1", as this is the only cluster that's been started so far.
|
||||
|
||||
* ``wsrep_incoming_addresses`` should include only the address of the current node.
|
||||
|
||||
|
||||
#. Select one of the secondary nodes.
|
||||
|
||||
* Check its ``/etc/mysql/conf.d/wsrep.cnf`` file.
|
||||
|
||||
* The ``wsrep_cluster_address="gcomm://node1,node2"`` variable should include the name or IP address
|
||||
of the already started primary node. Otherwise, this node will definitely fail to start.
|
||||
|
||||
**Note.**
|
||||
*Due to a Galera bug, do not include a node's own name and address in the ``wsrep_cluster_address`` specified for that node; while each Galera node attempts to exclude its own address, sometimes it fails. In this case, the Galera node fails to start, with a "Cannot open channel..." error in* **/etc/log/mysqld.log**
|
||||
|
||||
In the case of OpenStack deployed by Fuel manifests with default settings (2 controllers), Fuel automatically removes local names and IP addresses from gcomm strings on every node to prevent a node from attempting to connect to itself. This parameter should look like this:
|
||||
|
||||
``wsrep_cluster_address="gcomm://fuel-controller-01:4567"``
|
||||
|
||||
* If ``wsrep_cluster_address`` is set correctly, run ``rm -f /var/lib/mysql/grastate.dat`` and then ``service mysql start`` on this node.
|
||||
|
||||
|
||||
#. Connect to any node with mysql and run ``show status like 'wsrep%';`` again.
|
||||
|
||||
* ``wsrep_local_state_comment`` should finally change from "Donor/Synced" or other statuses to "Synced".
|
||||
|
||||
Time to sync may vary depending on the database size and connection speed.
|
||||
|
||||
* ``wsrep_cluster_status`` should be "Primary" on both nodes.
|
||||
|
||||
Galera is a master-master replication cluster and every node becomes primary by default (i.e. master).
|
||||
Galera also supports master-slave configuration for special purposes.
|
||||
Slave nodes have the "Non-Primary" value for ``wsrep_cluster_status``.
|
||||
|
||||
* ``wsrep_cluster_size`` should be "2", since we have just added one more node to the cluster.
|
||||
|
||||
* ``wsrep_incoming_addresses`` should include the addresses of both started nodes.
|
||||
|
||||
**Note:**
|
||||
State transfer is a heavy operation not only on the joining node, but also on the donor.
|
||||
In particular, the state donor may be not able to serve client requests, or it just plain may be slow.
|
||||
|
||||
|
||||
#. Repeat step 4 on all remaining controllers
|
||||
|
||||
If all secondary controllers are started successfully and became synced and you do not plan to restart the cluster
|
||||
in the near future, it is strongly recommended that you change the ``wsrep`` configuration settings on the first controller.
|
||||
|
||||
* Open file ``/etc/mysql/conf.d/wsrep.cnf``.
|
||||
* Set ``wsrep_cluster_address=`` to the same value (node list) that is used for every secondary controller.
|
||||
|
||||
In case of OpenStack deployed by Fuel manifests with default settings (2 controllers),
|
||||
on every operating controller this parameter should finally look like
|
||||
|
||||
``wsrep_cluster_address="gcomm://fuel-controller-01:4567,fuel-controller-02:4567"``
|
||||
|
||||
This step is important for future failures or maintenance procedures.
|
||||
If the Galera primary controller node is restarted for any reason, if it has the empty "gcomm" value
|
||||
(i.e. ``wsrep_cluster_address="gcomm://"``), it creates a new cluster and exits the existing cluster.
|
||||
The existing cluster nodes may also stop receiving requests and the synchronization process to prevent data
|
||||
de-synchronization issues.
|
||||
|
||||
|
||||
**Note:**
|
||||
|
||||
Starting wtih mysql version 5.5.28_wsrep23.7 (Galera version 2.2), Galera cluster supports an additional start mode.
|
||||
Instead of setting ``wsrep_cluster_address="gcomm://"``, on the first node one can set the following URL
|
||||
for cluster address::
|
||||
|
||||
wsrep_cluster_address="gcomm://node1,node2:port2,node3?pc.wait_prim=yes"
|
||||
|
||||
where ``nodeX`` is the name or IP address of one of available nodes, with optional port.
|
||||
|
||||
Therefore, every Galera node may have the same configuration file with the list of all nodes.
|
||||
It is designed to eliminate all configuration file changes on the first node after the cluster is started.
|
||||
|
||||
After the nodes are started, with mysql one may set the ``pc.bootstrap=1`` flag to the node
|
||||
which should start the new cluster and become the primary node.
|
||||
All other nodes should automatically perform initial synchronization with this new primary node.
|
||||
This flag may be also provided for a single selected node via the ``wsrep.cnf`` configuration file as follows::
|
||||
|
||||
wsrep_cluster_address="gcomm://node1,node2:port2,node3?pc.wait_prim=yes&pc.bootstrap=1"
|
||||
|
||||
Unfortunately, due to a bug in the mysql init script (<https://bugs.launchpad.net/codership-mysql/+bug/1087368>),
|
||||
the bootstrap flag is completely ignored in Galera 2.2 (wsrep_2.7). So, to start a new cluster, one should use
|
||||
the old way with an empty ``gcomm://`` URL.
|
||||
All other nodes may have both the single node and multiple node list in the ``gcomm`` URL,
|
||||
the bug affects only the first node - the one that starts the new cluster.
|
||||
Please note also that nodes with non-empty ``gcomm`` URL may start only if at least one of the nodes
|
||||
listed in ``gcomm://node1,node2:port2,node3`` is already started and is available for initial synchronization.
|
||||
For every starting Galera node it is enough to have at least one working node name/address to get full
|
||||
information about the cluster structure and to perform initial synchronization.
|
||||
Fuel deployment manifests with default settings may or may not set::
|
||||
|
||||
wsrep_cluster_address="gcomm://"
|
||||
|
||||
on the primary node (first deployed OpenStack controller) and node list like::
|
||||
|
||||
wsrep_cluster_address="gcomm://fuel-controller-01:4567,fuel-controller-02:4567"
|
||||
|
||||
on every secondary controller. Therefore, it is a good idea to check these parameters after the deployment is finished.
|
||||
|
||||
|
||||
**Note:**
|
||||
|
||||
A Galera cluster is a very democratic system. As it is a master-master cluster,
|
||||
every primary node equals to other primary nodes.
|
||||
Primary nodes with the same sync state (same ``wsrep_cluster_state_uuid`` value) form the so called quorum -
|
||||
the majority of primary nodes with the same ``wsrep_cluster_state_uuid``.
|
||||
Normally, one of the controllers gets a new commit, increases its ``wsrep_cluster_state_uuid`` value
|
||||
and performs synchronization with other nodes.
|
||||
If one of primary controllers fails, the Galera cluster continues serving requests as long as the quorum exists.
|
||||
Exit of the primary controller from the cluster equals a failure, because after exit this controller
|
||||
has a new cluster ID and a ``wsrep_cluster_state_uuid`` value less than the same value on the working nodes.
|
||||
So 3 working primary controllers are the very minimal Galera cluster size. The recommended Galera cluster size is
|
||||
6 controllers.
|
||||
|
||||
Fuel deployment manifests with default settings deploy a non-recommended Galera configuration
|
||||
with 2 controllers only. This is suitable for testing purposes, but not for production deployments.
|
||||
|
||||
|
||||
**Restarting an existing cluster after failure**
|
||||
|
||||
Continuing a Galera cluster after a power failure or other types of breakdown basically consists of two steps:
|
||||
backing up every node and finding the node with the most recent non-damaged replica.
|
||||
|
||||
* Helpful tip: add ``wsrep_provider_options="wsrep_on = off;"`` to the ``/etc/mysql/conf.d/wsrep.cnf`` configuration file.
|
||||
|
||||
After these steps simply perform the **Start Galera and create a new cluster** procedure,
|
||||
starting from the node with the most recent non-damaged replica.
|
||||
|
||||
|
||||
Useful links
|
||||
^^^^^^^^^^^^
|
||||
|
||||
* Galera documentation from Galera authors:
|
||||
|
||||
* http://www.codership.com/wiki/doku.php
|
||||
|
||||
* Actual Galera and WSREP patch bug list and official Galera/WSREP bug tracker:
|
||||
|
||||
* https://launchpad.net/codership-mysql
|
||||
* https://launchpad.net/galera
|
||||
|
||||
* One of recommended Galera cluster robust configurations:
|
||||
|
||||
* http://wiki.vps.net/vps-net-features/cloud-servers/template-information/galeramysql-recommended-cluster-configuration/
|
||||
|
||||
* Why we use Galera:
|
||||
|
||||
* http://openlife.cc/blogs/2011/july/ultimate-mysql-high-availability-solution
|
||||
|
||||
* Other questions (seriously, sometimes there is not enough info about Galera available in the official Galera docs):
|
||||
|
||||
* http://www.google.com
|
@ -1,136 +0,0 @@
|
||||
|
||||
.. _common-technical-issues:
|
||||
|
||||
Common Technical Issues
|
||||
-----------------------
|
||||
|
||||
1. Puppet fails with ::
|
||||
|
||||
err: Could not retrieve catalog from remote server: Error 400 on SERVER: undefined method 'fact_merge' for nil:NilClass"
|
||||
|
||||
* This is a Puppet bug. See: http://projects.puppetlabs.com/issues/3234
|
||||
* Workaround: ``service puppetmaster restart``
|
||||
|
||||
2. Puppet client will never resend the certificate to Puppet Master. The certificate cannot be signed and verified.
|
||||
* This is a Puppet bug. See: http://projects.puppetlabs.com/issues/4680
|
||||
* Workaround:
|
||||
* On Puppet client::
|
||||
|
||||
rm -f /etc/puppet/ssl/certificate_requests/\*.pem
|
||||
rm -f /etc/puppet/ssl/certs/\*.pem
|
||||
|
||||
* On Puppet master::
|
||||
|
||||
rm -f /var/lib/puppet/ssl/ca/requests/\*.pem
|
||||
|
||||
#. The manifests are up-to-date under ``/etc/puppet/manifests``, but Puppet master keeps serving the previous version of manifests to the clients. Manifests seem to be cached by Puppet master.
|
||||
|
||||
* More information: https://groups.google.com/forum/?fromgroups=#!topic/puppet-users/OpCBjV1nR2M
|
||||
* Workaround: ``service puppetmaster restart``
|
||||
|
||||
#. Timeout error for fuel-controller-XX when running ``puppet-agent --test`` to install OpenStack when using HDD instead of SSD ::
|
||||
|
||||
| Sep 26 17:56:15 fuel-controller-02 puppet-agent[1493]: Could not retrieve catalog from remote server: execution expired
|
||||
| Sep 26 17:56:15 fuel-controller-02 puppet-agent[1493]: Not using cache on failed catalog
|
||||
| Sep 26 17:56:15 fuel-controller-02 puppet-agent[1493]: Could not retrieve catalog; skipping run
|
||||
|
||||
* Workaround: ``vi /etc/puppet/puppet.conf``
|
||||
* add: ``configtimeout = 1200``
|
||||
|
||||
#. On running "``puppet agent --test``", the error messages below occur::
|
||||
|
||||
| err: /File[/var/lib/puppet/lib]: Could not evaluate: Could not retrieve information from environment production source(s) puppet://fuel-pm.localdomain/plugins
|
||||
|
||||
* Workaround: http://projects.reductivelabs.com/issues/2244
|
||||
|
||||
and ::
|
||||
|
||||
| err: Could not retrieve catalog from remote server: Error 400 on SERVER: stack level too deep
|
||||
| warning: Not using cache on failed catalog
|
||||
| err: Could not retrieve catalog; skipping run
|
||||
|
||||
* Workaround: The second problem can be solved by rebooting Puppet master.
|
||||
|
||||
#. PuppetDB Connection Failures:
|
||||
|
||||
Puppet fails on fuel-pm with message::
|
||||
|
||||
Could not retrieve catalog from remote server: Error 400 on SERVER: Failed to submit 'replace facts' command for fuel-pm to PuppetDB at fuel-pm:8081: Connection refused - connect(2)
|
||||
|
||||
This message is often the result of one of the following:
|
||||
|
||||
* Firewall blocking the puppetdb port
|
||||
* DNS issues with the hostname specified in your puppetdb.conf
|
||||
* DNS issues with the ssl-host specified in your jetty.ini on the puppetdb server
|
||||
|
||||
* Workaround: If you are able to connect (e.g. via telnet) to port 8081 on the puppetdb machine, puppetdb is running. To try and isolate the problem, add the following to ``/etc/puppetdb/conf.d/jetty.ini``::
|
||||
|
||||
certificate-whitelist = /etc/puppetdb/whitelist.txt
|
||||
|
||||
Be sure to list all aliases for the machine in that file.
|
||||
|
||||
|
||||
|
||||
|
||||
.. _create-the-XFS-partition:
|
||||
|
||||
Creating the XFS partition
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In most casts, Fuel creates the XFS partition for you. If for some reason you need to create it yourself, use this procedure:
|
||||
|
||||
|
||||
|
||||
#. Create the partition itself::
|
||||
|
||||
|
||||
|
||||
|
||||
fdisk /dev/sdb
|
||||
n(for new)
|
||||
p(for partition)
|
||||
<enter> (to accept the defaults)
|
||||
<enter> (to accept the defaults)
|
||||
w(to save changes)
|
||||
|
||||
|
||||
|
||||
|
||||
#. Initialize the XFS partition::
|
||||
|
||||
|
||||
|
||||
|
||||
mkfs.xfs -i size=1024 -f /dev/sdb1
|
||||
|
||||
|
||||
|
||||
|
||||
#. For a standard swift install, all data drives are mounted directly under /srv/node, so first create the mount point::
|
||||
|
||||
|
||||
|
||||
|
||||
mkdir -p /srv/node/sdb1
|
||||
|
||||
|
||||
|
||||
|
||||
#. Finally, add the new partition to fstab so it mounts automatically, then mount all current partitions::
|
||||
|
||||
|
||||
|
||||
|
||||
echo "/dev/sdb1 /srv/node/sdb1 xfs
|
||||
noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
|
||||
mount -a
|
||||
|
||||
|
||||
Redeploying a node from scratch
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Compute and Cinder nodes in an HA configuration and controller in any configuration cannot be redeployed without completely redeploying the cluster. However, in a non-HA situation you can redeploy a compute or Cinder node. Simply follow these steps:
|
||||
|
||||
#. Remove the certificate for the node by executing the command ``puppet cert clean <hostname>`` on fuel-pm.
|
||||
#. Re-boot the node over the network so it can be picked up by cobbler.
|
||||
#. Run the puppet agent on the target node using ``puppet agent --test``.
|
@ -1,8 +0,0 @@
|
||||
Other Questions
|
||||
---------------
|
||||
|
||||
#. **[Q]** Why did you decide to provide OpenStack packages through your own repository?
|
||||
|
||||
**[A]** We are fully committed to providing our customers with working and stable bits and pieces in order to make successful OpenStack deployments. Please note that we do not distribute our own version of OpenStack; we rather provide a plain vanilla distribution. So there is no vendor lock-in. Our repository just keeps the history of OpenStack packages certified to work with our Puppet manifests.
|
||||
|
||||
The benefit of this approach is that at any moment in time you can install any OpenStack version you want. If you are running Essex, you just need to use Puppet manifests which reference OpenStack packages for Essex from our repository. Once Folsom was released, we added new OpenStack packages for Folsom to our repository and created a separate branch with the corresponding Puppet manifests (which, in turn, reference these packages). With EPEL this would not be possible, as repository only keeps the latest version for OpenStack packages.
|
@ -1,16 +0,0 @@
|
||||
In this section, you’ll learn how to do an actual installation of OpenStack
|
||||
using Fuel. In addition to getting a feel for the steps involved, you’ll also
|
||||
gain some familiarity with some of your customization options. While Fuel does
|
||||
provide several different deployment configurations out of the box, its common
|
||||
to want to tweak those architectures for your own situation, so you’ll see how
|
||||
to move certain features around from the standard installation.
|
||||
|
||||
The first step, however, is to commit to a deployment template. A fairly balanced
|
||||
small size, yet fully featured, deployment is the Multi-node (HA) Compact
|
||||
deployment, so that’s what we’ll be using through the rest of this guide.
|
||||
|
||||
Real world installations require a physical hardware infrastructure, but you can
|
||||
easily deploy a small simulation cloud on a single physical machine using
|
||||
VirtualBox. You can follow these instructions in order to install an OpenStack
|
||||
cloud into a test environment using VirtualBox, or to get a production-grade
|
||||
installation using actual hardware.
|
@ -1,22 +0,0 @@
|
||||
How installation works
|
||||
----------------------
|
||||
|
||||
While version 2.0 of Fuel provided the ability to simplify installation of
|
||||
OpenStack, versions 2.1 and above include orchestration capabilities that
|
||||
simplify deployment an OpenStack cluster. The deployment process follows this
|
||||
general procedure:
|
||||
|
||||
#. Design your architecture.
|
||||
#. Install Fuel onto the fuel-pm machine.
|
||||
#. Configure Fuel.
|
||||
#. Create the basic configuration and load it into Cobbler.
|
||||
#. PXE-boot the servers so Cobbler can install the operating system and prepare
|
||||
them for orchestration.
|
||||
#. Use Fuel's included templates and the configuration to populate Puppet's
|
||||
site.pp file.
|
||||
#. Customize the site.pp file if necessary.
|
||||
#. Use the orchestrator to coordinate the installation of the appropriate
|
||||
OpenStack components on each node.
|
||||
|
||||
Start by designing your architecture.
|
||||
|
@ -1,58 +0,0 @@
|
||||
|
||||
Before you start
|
||||
----------------
|
||||
|
||||
Before you begin your installation, you will need to make a number of important
|
||||
decisions:
|
||||
|
||||
* **OpenStack features.** Your first decision is which of the optional OpenStack
|
||||
features you want. For example, you must decide whether you want to install
|
||||
Swift, whether you want Glance to use Swift for image storage, whether you want
|
||||
Cinder for block storage, and whether you want nova-network or Quantum to handle
|
||||
your network connectivity. In the case of this example, we will be installing
|
||||
Swift, and Glance will be using it. We'll also be using Cinder for block storage.
|
||||
Because it can be easily installed using orchestration, we will also be using
|
||||
Quantum.
|
||||
|
||||
* **Deployment configuration.** Next you need to decide whether your deployment
|
||||
requires high availability. If you do choose to do an HA deployment, you have a
|
||||
choice regarding the number of controllers you want to include. Following the
|
||||
recommendations in the previous section for a typical HA deployment configuration,
|
||||
we will use 3 OpenStack controllers.
|
||||
|
||||
* **Cobbler server and Puppet Master.** The heart of a Fuel install is the
|
||||
combination of Puppet Master and Cobbler used to create your resources. Although
|
||||
Cobbler and Puppet Master can be installed on separate machines, it is common
|
||||
practice to install both on a single machine for small to medium size clouds,
|
||||
and that's what we'll be doing in this example.
|
||||
(By default, the Fuel ISO creates a single server with both services.)
|
||||
|
||||
* **Domain name.** Puppet clients generate a Certificate Signing Request (CSR),
|
||||
which is then signed by Puppet Master. The signed certificate can then be used
|
||||
to authenticate the client during provisioning. Certificate generation requires
|
||||
a fully qualified hostname, so you must choose a domain name to be used in your
|
||||
installation. Future versions of Fuel will enable you to choose this domain name
|
||||
on your own; by default, Fuel 3.0 uses ``localdomain``.
|
||||
|
||||
* **Network addresses.** OpenStack requires a minimum of three networks. If you
|
||||
are deploying on physical hardware, two of them -- the public network and the
|
||||
internal, or management network -- must be routable in your networking
|
||||
infrastructure. Also, if you intend for your cluster to be accessible from the
|
||||
Internet, you'll want the public network to be on the proper network segment.
|
||||
For simplicity in this case, this example assumes an Internet router at
|
||||
192.168.0.1. Additionally, a set of private network addresses should be selected
|
||||
for automatic assignment to guest VMs. (These are fixed IPs for the private
|
||||
network). In our case, we are allocating network addresses as follows:
|
||||
|
||||
* Public network: 192.168.0.0/24
|
||||
* Internal network: 10.0.0.0/24
|
||||
* Private network: 10.0.1.0/24
|
||||
|
||||
* **Network interfaces.** All of those networks need to be assigned to the
|
||||
available NIC cards on the allocated machines. Additionally, if a fourth NIC is
|
||||
available, Cinder or block storage traffic can also be separated and delegated
|
||||
to the fourth NIC. In our case, we're assigning networks as follows:
|
||||
|
||||
* Public network: eth1
|
||||
* Internal network: eth0
|
||||
* Private network: eth2
|
@ -1,272 +0,0 @@
|
||||
Infrastructure allocation and installation
|
||||
------------------------------------------
|
||||
|
||||
The next step is to make sure that you have all of the required hardware and
|
||||
software in place.
|
||||
|
||||
Software
|
||||
^^^^^^^^
|
||||
|
||||
You can download the latest release of the Fuel ISO from
|
||||
http://fuel.mirantis.com/your-downloads/.
|
||||
|
||||
Alternatively, if you can't use the pre-built ISO, Mirantis also offers the
|
||||
Fuel Library as a tar.gz file downloadable from
|
||||
`Downloads <http://fuel.mirantis.com/your-downloads/>`_ section of the Fuel
|
||||
portal. Using this file requires a bit more manual effort, but will yeild the
|
||||
same results as using the ISO.
|
||||
|
||||
Network setup
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
OpenStack requires a minimum of three distinct networks: internal (or
|
||||
management), public, and private. The simplest and best mapping is to
|
||||
assign each network to a different physical interface. However, not
|
||||
all machines have three NICs, and OpenStack can be configured and
|
||||
deployed with only two physical NICs, collapsing the internal and
|
||||
public traffic onto a single NIC.
|
||||
|
||||
If you are deploying to a simulation environment, however, it makes
|
||||
sense to just allocate three NICs to each VM in your OpenStack
|
||||
infrastructure, one each for the internal, public, and private networks respectively.
|
||||
|
||||
Finally, we must assign network ranges to the internal, public, and private
|
||||
networks, and IP addresses to fuel-pm, fuel-controllers, and fuel-compute nodes.
|
||||
For a real deployment using physical infrastructure you must work with your IT
|
||||
department to determine which IPs to use, but for the purposes of this exercise
|
||||
we will assume the below network and IP assignments:
|
||||
|
||||
* 10.0.0.0/24: management or internal network, for communication between
|
||||
Puppet master and Puppet clients, as well as PXE/TFTP/DHCP for Cobbler.
|
||||
* 192.168.0.0/24: public network, for the High Availability (HA) Virtual IP
|
||||
(VIP), as well as floating IPs assigned to OpenStack guest VMs
|
||||
* 10.0.1.0/24: private network, fixed IPs automatically assigned to guest VMs
|
||||
by OpenStack upon their creation
|
||||
|
||||
Next we need to allocate a static IP address from the internal network
|
||||
to eth0 for fuel-pm, and eth1 for the controller, compute, and (if necessary)
|
||||
Quantum nodes. For High Availability (HA) we must choose and assign an IP
|
||||
address from the public network to HAProxy running on the controllers.
|
||||
You can configure network addresses/network mask according to your
|
||||
needs, but our instructions will assume the following network settings
|
||||
on the interfaces:
|
||||
|
||||
* eth0: internal management network, where each machine will have a static IP address
|
||||
|
||||
* 10.0.0.100 for Puppet Master
|
||||
* 10.0.0.101-10.0.0.103 for the controller nodes
|
||||
* 10.0.0.110-10.0.0.126 for the compute nodes
|
||||
* 10.0.0.10 internal Virtual IP for component access
|
||||
* 255.255.255.0 network mask
|
||||
|
||||
* eth1: public network
|
||||
|
||||
* 192.168.0.10 public Virtual IP for access to the Horizon GUI (OpenStack
|
||||
management interface)
|
||||
|
||||
* eth2: for communication between OpenStack VMs without IP address with
|
||||
promiscuous mode enabled.
|
||||
|
||||
Physical installation infrastructure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The amount of hardware necessary for an installation depends on the
|
||||
choices you have made above. This sample installation requires the
|
||||
following hardware:
|
||||
|
||||
* 1 server to host both Puppet Master and Cobbler. The minimum configuration for
|
||||
this server is:
|
||||
|
||||
* 32-bit or 64-bit architecture
|
||||
* 1+ CPU or vCPU for up to 10 nodes (2 vCPU for up to 20 nodes, 4 vCPU for up
|
||||
to 100 nodes)
|
||||
* 1024+ MB of RAM for up to 10 nodes (4096+ MB for up to 20 nodes, 8192+ MB
|
||||
for up to 100 nodes)
|
||||
* 16+ GB of HDD for OS, and Linux distro storage
|
||||
|
||||
* 3 servers to act as OpenStack controllers (called fuel-controller-01,
|
||||
fuel-controller-02, and fuel-controller-03). The minimum configuration for a
|
||||
controller in Compact mode is:
|
||||
|
||||
* 64-bit architecture
|
||||
* 1+ CPU
|
||||
* 1024+ MB of RAM (2048+ MB preferred)
|
||||
* 400+ GB of HDD
|
||||
|
||||
* 1 server to act as the OpenStack compute node (called fuel-compute-01). The
|
||||
minimum configuration for a compute node with Cinder deployed on it is:
|
||||
|
||||
* 64-bit architecture
|
||||
* 2+ CPU, with Intel VTx or AMDV virtualization technology
|
||||
* 2048+ MB of RAM
|
||||
* 1+ TB of HDD
|
||||
|
||||
(If you choose to deploy Quantum on a separate node, you will need an
|
||||
additional server with specifications comparable to the controller nodes.)
|
||||
|
||||
Make sure your hardware is capable of PXE booting over the network from Cobbler.
|
||||
You'll also need each server's MAC addresses.
|
||||
|
||||
For a list of certified hardware configurations, please `contact the
|
||||
Mirantis Services team <http://www.mirantis.com/contact/>`_.
|
||||
|
||||
Virtual installation infrastructure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For a virtual installation, you need only a single machine. You can get
|
||||
by on 8GB of RAM, but 16GB will be better.
|
||||
|
||||
To actually perform the installation, you need a way to create Virtual Machines.
|
||||
This guide assumes that you are using version 4.2.12 of VirtualBox, which you
|
||||
can download from https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
Make sure to also install the VirtualBox Extension Pack.
|
||||
|
||||
You'll need to run VirtualBox on a stable host system. Mac OS 10.7.x,
|
||||
CentOS 6.3+, or Ubuntu 12.04 are preferred; results in other operating
|
||||
systems are unpredictable.
|
||||
|
||||
Configuring VirtualBox
|
||||
++++++++++++++++++++++
|
||||
|
||||
If you are on VirtualBox, please create or make sure the following
|
||||
hostonly adapters exist and are configured properly:
|
||||
|
||||
* VirtualBox -> File -> Preferences...
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet0)
|
||||
|
||||
* IPv4 Address: 10.0.0.1
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet1)
|
||||
|
||||
* IPv4 Address: 10.0.1.1
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet2)
|
||||
|
||||
* IPv4 Address: 0.0.0.0
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
In this example, only the first two adapters will be used, but you can choose to
|
||||
use the third one to handle your storage network traffic.
|
||||
|
||||
After creating these interfaces, reboot the host machine to make sure that
|
||||
DHCP isn't running in the background.
|
||||
|
||||
Installing on Windows isn't recommended, but if you're attempting it,
|
||||
you will also need to set up the IP address & network mask under
|
||||
Control Panel > Network and Internet > Network and Sharing Center for the
|
||||
Virtual HostOnly Network adapter.
|
||||
|
||||
Creating fuel-pm
|
||||
++++++++++++++++
|
||||
|
||||
The process of creating a virtual machine to host Fuel in VirtualBox depends on
|
||||
whether your deployment is purely virtual or consists of a physical or virtual
|
||||
fuel-pm controlling physical hardware. If your deployment is purely
|
||||
virtual then Adapter 1 may be a Hostonly adapter attached to
|
||||
vboxnet0, but if your deployment infrastructure consists of a virtual
|
||||
fuel-pm controlling physical machines, Adapter 1 must be a Bridged
|
||||
Adapter, connected to whatever network interface of the host machine
|
||||
is connected to your physical machines.
|
||||
|
||||
To create fuel-pm, start up VirtualBox and create a new machine as follows:
|
||||
|
||||
* Machine -> New...
|
||||
|
||||
* Name: fuel-pm
|
||||
* Type: Linux
|
||||
* Version: Red Hat (64 Bit)
|
||||
* Memory: 2048 MB
|
||||
* Drive space: 16 GB HDD
|
||||
|
||||
* Machine -> Settings... -> Network
|
||||
|
||||
* Adapter 1
|
||||
|
||||
* Physical network
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: The host machine's network adapter with access to the network on
|
||||
which the physical machines reside
|
||||
* VirtualBox installation
|
||||
* Enable Network Adapter
|
||||
* Attached to: Hostonly Adapter
|
||||
* Name: vboxnet0
|
||||
|
||||
* Adapter 2
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: eth0 (or whichever physical network is attached to the Internet)
|
||||
|
||||
* Machine -> Storage
|
||||
|
||||
* Attach the downloaded ISO as a CD-ROM drive
|
||||
|
||||
If you can't (or would rather not) install from the ISO, you can find
|
||||
instructions for installing from the Fuel Library in :ref:`Appendix A <Create-PM>`.
|
||||
|
||||
Creating the OpenStack nodes
|
||||
++++++++++++++++++++++++++++
|
||||
|
||||
If you're using VirtualBox, you will need to create the corresponding
|
||||
virtual machines for your OpenStack nodes. Follow these instructions
|
||||
to create machines named fuel-controller-01, fuel-controller-02, fuel-
|
||||
controller-03, and fuel-compute-01, but do not start them yet.
|
||||
|
||||
As you create each network adapter, click Advanced to expose and
|
||||
record the corresponding mac address.
|
||||
|
||||
* Machine -> New...
|
||||
|
||||
* Name: fuel-controller-01 (you will need to repeat these steps for
|
||||
fuel-controller-02, fuel-controller-03, and fuel-compute-01)
|
||||
* Type: Linux
|
||||
* Version: Red Hat (64 Bit)
|
||||
* Memory: 2048MB
|
||||
* Drive space: 8GB
|
||||
|
||||
* Machine -> Settings -> System
|
||||
|
||||
* Check Network in Boot sequence
|
||||
|
||||
* Machine -> Settings -> Storage
|
||||
|
||||
* Controller: SATA
|
||||
|
||||
* Click the Add icon at the bottom of the Storage Tree pane and choose Add Disk
|
||||
* Add a second VDI disk of 10GB for storage
|
||||
|
||||
* Machine -> Settings -> Network
|
||||
|
||||
* Adapter 1
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Hostonly Adapter
|
||||
* Name: vboxnet0
|
||||
|
||||
* Adapter 2
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: eth0 (physical network attached to the Internet. You can also use a gateway.)
|
||||
|
||||
* Adapter 3
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Hostonly Adapter
|
||||
* Name: vboxnet1
|
||||
* Advanced -> Promiscuous mode: Allow All
|
||||
|
||||
It is important that hostonly Adapter 1 goes first, as Cobbler will
|
||||
use vboxnet0 for PXE, and VirtualBox boots from LAN on the first
|
||||
available network adapter.
|
||||
|
||||
The additional drive volume will be used as storage space by Cinder, and will be
|
||||
configured automatically by Fuel.
|
@ -1,83 +0,0 @@
|
||||
|
||||
Installing & Configuring Fuel
|
||||
-----------------------------
|
||||
Now that you know what you're going to install and where you're going to
|
||||
install it, it's time to begin putting the pieces together. To do that,
|
||||
you'll need to create the Puppet master and Cobbler servers, which will
|
||||
actually provision and set up your OpenStack nodes.
|
||||
|
||||
Installing Puppet Master is a one-time procedure for the entire
|
||||
infrastructure. Once done, Puppet Master will act as a single point of
|
||||
control for all of your servers, and you will never have to return to
|
||||
these installation steps again.
|
||||
|
||||
The deployment of the Puppet Master server -- named fuel-pm in these
|
||||
instructions -- varies slightly between the physical and simulation
|
||||
environments. In a physical infrastructure, fuel-pm must have a
|
||||
network presence on the same network the physical machines will
|
||||
ultimately PXE boot from. In a simulation environment fuel-pm only
|
||||
needs virtual network (hostonlyif) connectivity.
|
||||
|
||||
At this point, you should have either a physical or virtual machine that
|
||||
can be booted from the Mirantis ISO, downloaded from
|
||||
http://fuel.mirantis.com/your-downloads/ .
|
||||
|
||||
This ISO can be used to create fuel-pm on a physical or virtual
|
||||
machine based on CentOS 6.4. If for some reason you
|
||||
can't use this ISO, follow the instructions in
|
||||
:ref:`Creating the Puppet master <Create-PM>` to create your own fuel-pm, then
|
||||
skip ahead to :ref:`Configuring fuel-pm <Configuring-Fuel-PM>`.
|
||||
|
||||
Installing Fuel from the ISO
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Start the new machine to install the ISO. The only real installation decision
|
||||
you will need to make is to specify the interface through which the installer
|
||||
can access the Internet. Choose ``eth1``, as it's connected to the
|
||||
Internet-connected interface.
|
||||
|
||||
Configuring fuel-pm from the ISO installation
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Once fuel-pm finishes installing, you'll be presented with a basic menu.
|
||||
You can use this menu to set the basic information Fuel will need to configure
|
||||
your installation. You can customize these steps for your own situation, of
|
||||
course, but here are the steps to take for the example installation:
|
||||
|
||||
#. Future versions of Fuel will enable you to change the hostname and domain name
|
||||
for your admin node and cluster, respectively. For now, your admin node must be
|
||||
called ``fuel-pm``, and your domain name must be ``localdomain``.
|
||||
#. To configure the management interface, choose 2.
|
||||
|
||||
* The example specifies ``eth0`` as the internal, or management interface, so
|
||||
enter that.
|
||||
* The management network in the example is using static IP addresses, so
|
||||
specify ``no`` for using DHCP.
|
||||
* Enter the IP address of ``10.0.0.100`` for the Puppet Master, and the
|
||||
netmask of 255.255.255.0. Future versions of Fuel will enable you to choose
|
||||
a different IP range for your management interface.
|
||||
* Set the gateway and DNS servers if desired. In this example, we'll use the
|
||||
router at 192.168.0.1 as the gateway.
|
||||
|
||||
#. To configure the external interface, which VMs will use to send traffic to
|
||||
and from the internet, choose 3. Set the interface to ``eth1``. By default,
|
||||
this interface uses DHCP, which is what the example calls for.
|
||||
|
||||
#. To choose the start and end addresses to be used during PXE boot, choose 4.
|
||||
In the case of this example, the start address is ``10.0.0.201`` and the end
|
||||
address is ``10.0.0.254``. Later, these nodes will receive IP addresses from Cobbler.
|
||||
|
||||
#. Future versions of Fuel will enable you to choose a custom set of repositories.
|
||||
|
||||
#. If you need to specify a proxy through which fuel-pm will access the Internet,
|
||||
press 6.
|
||||
|
||||
#. Once you've finished editing, choose 9 to save your changes and exit the menu.
|
||||
|
||||
..
|
||||
Please note: Even though defaults are shown, you must set actual values; if
|
||||
you simply press "enter" you will wind up with empty values.
|
||||
|
||||
To re-enter the menu at any time, type::
|
||||
|
||||
# bootstrap_admin_node.sh
|
@ -1,349 +0,0 @@
|
||||
.. _Install-OS-Using-Fuel:
|
||||
|
||||
Installing the OS using Fuel
|
||||
----------------------------
|
||||
|
||||
The first step in creating the actual OpenStack nodes is to let Fuel's Cobbler
|
||||
kickstart files assist in the installation of operating systems on the target
|
||||
servers.
|
||||
|
||||
.. _Configuring-Cobbler:
|
||||
|
||||
Configuring Cobbler with config.yaml
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Fuel uses a single file, ``config.yaml``, to both configure Cobbler and assist
|
||||
in the configuration of the ``site.pp`` file. This file appears in the ``/root``
|
||||
directory when the master node (fuel-pm) is provisioned and configured.
|
||||
|
||||
You'll want to configure this example for your own situation, but the example
|
||||
looks like this::
|
||||
|
||||
common:
|
||||
orchestrator_common:
|
||||
attributes:
|
||||
deployment_mode: ha_compact
|
||||
deployment_engine: simplepuppet
|
||||
task_uuid: deployment_task
|
||||
|
||||
Possible values for ``deployment_mode`` are ``singlenode_compute``,
|
||||
``multinode_compute``, ``ha_compute``, ``ha_compact``, ``ha_full``, and
|
||||
``ha_minimal``. Change the ``deployment_mode`` to ``ha_compact`` to tell Fuel to
|
||||
use HA architecture. Specifying the ``simplepuppet`` deployment engine means
|
||||
that the orchestrator will be calling Puppet on each of the nodes.
|
||||
|
||||
Next you'll need to set OpenStack's networking information::
|
||||
|
||||
openstack_common:
|
||||
internal_virtual_ip: 10.0.0.10
|
||||
public_virtual_ip: 192.168.0.10
|
||||
create_networks: true
|
||||
fixed_range: 172.16.0.0/16
|
||||
floating_range: 192.168.0.0/24
|
||||
|
||||
NOTE: The network range configured for ``fixed_range`` allows for communications
|
||||
between VM nodes and is not meant to allow public or private network access.
|
||||
|
||||
Change the virtual IPs to match the target networks, and set the fixed and
|
||||
floating ranges. ::
|
||||
|
||||
swift_loopback: loopback
|
||||
nv_physical_volumes:
|
||||
- /dev/sdb
|
||||
|
||||
By setting the ``nv_physical_volumes`` value, you are not only telling OpenStack
|
||||
to use this value for Cinder (you'll see more about that in the ``site.pp`` file),
|
||||
but also where Cinder should store its data.
|
||||
|
||||
Later, we'll set up a new partition for Cinder, so tell Cobbler to create it here. ::
|
||||
|
||||
external_ip_info:
|
||||
public_net_router: 192.168.0.1
|
||||
ext_bridge: 0.0.0.0
|
||||
pool_start: 192.168.0.110
|
||||
pool_end: 192.168.0.126
|
||||
|
||||
Set the ``public_net_router`` to point to the real router at the public network.
|
||||
The ``ext_bridge`` is the IP of the Quantum bridge. It should be assigned to any
|
||||
available free IP address on the public network that's outside the floating range.
|
||||
You also have the option to simply set it to ``0.0.0.0``. The ``pool_start`` and
|
||||
``pool_end`` values represent the public addresses of your nodes, and should be
|
||||
within the ``floating_range``. ::
|
||||
|
||||
segment_range: 900:999
|
||||
network_manager: nova.network.manager.FlatDHCPManager
|
||||
auto_assign_floating_ip: true
|
||||
quantum_netnode_on_cnt: true
|
||||
|
||||
Fuel provides two choices for your network manager: ``FlatDHCPManager`` and
|
||||
``VlanManager``. By default, the system uses ``FlatDHCPManager``. Here you can
|
||||
see that we're also telling OpenStack to automatically assing a floating IP to
|
||||
an instance when it's created, and to put the Quantum services on the controllers
|
||||
rather than a sepearate node. You can also choose ``tenant_network_type`` for
|
||||
network segmentation type and segmentation range ``segment_range`` for network
|
||||
(consult Quantum documentation for details). ::
|
||||
|
||||
use_syslog: false
|
||||
syslog_server: 127.0.0.1
|
||||
mirror_type: default
|
||||
|
||||
**THIS SETTING IS CRUCIAL:** The ``mirror_type`` **must** to be set to ``default``
|
||||
unless you have your own repositories set up, or OpenStack will not install properly. ::
|
||||
|
||||
quantum: true
|
||||
internal_interface: eth0
|
||||
public_interface: eth1
|
||||
private_interface: eth2
|
||||
public_netmask: 255.255.255.0
|
||||
internal_netmask: 255.255.255.0
|
||||
|
||||
Earlier, you decided which interfaces to use for which networks; note that here. ::
|
||||
|
||||
default_gateway: 192.168.0.1
|
||||
|
||||
Depending on how you've set up your network, you can either set the
|
||||
``default_gateway`` to the Master Node (fuel-pm) or to the ``public_net_router``. ::
|
||||
|
||||
nagios_master: fuel-controller-01.localdomain
|
||||
loopback: loopback
|
||||
cinder: true
|
||||
cinder_nodes:
|
||||
- controller
|
||||
swift: true
|
||||
|
||||
The loopback setting determines how Swift stores data. If you set the value to
|
||||
``loopback``, Swift will use 1GB files as storage devices. If you tuned Cobbler
|
||||
to create a partition for Swift and mounted it to ``/srv/nodes/``, then you
|
||||
should set ``loopback`` to ``false``.
|
||||
|
||||
In this example, you're using Cinder and including it on the compute nodes, so
|
||||
note that appropriately. Also, you're using Swift, so turn that on here. ::
|
||||
|
||||
repo_proxy: http://10.0.0.100:3128
|
||||
|
||||
One improvement in Fuel 2.1 was the ability for the Master Node to cache
|
||||
downloads in order to speed up installs; by default the ``repo_proxy`` is set to
|
||||
point to fuel-pm in order to let that happen. One consequence of that is that
|
||||
your deployment will actually go faster if you let one install complete, then do
|
||||
all the others, rather than running all of them concurrently. ::
|
||||
|
||||
deployment_id: '53'
|
||||
|
||||
Fuel enables you to manage multiple clusters; setting the ``deployment_id`` will
|
||||
let Fuel know which deployment you're working with. ::
|
||||
|
||||
dns_nameservers:
|
||||
- 10.0.0.100
|
||||
- 8.8.8.8
|
||||
|
||||
The slave nodes should first look to the master node for DNS, so mark that as your
|
||||
first nameserver.
|
||||
|
||||
The next step is to define the nodes themselves. To do that, you'll list each
|
||||
node once for each role that needs to be installed. Note that by default the
|
||||
first node is called ``fuel-cobbler``; change it to ``fuel-pm``. ::
|
||||
|
||||
nodes:
|
||||
- name: fuel-pm
|
||||
role: cobbler
|
||||
internal_address: 10.0.0.100
|
||||
public_address: 192.168.0.100
|
||||
- name: fuel-controller-01
|
||||
role: controller
|
||||
internal_address: 10.0.0.101
|
||||
public_address: 192.168.0.101
|
||||
swift_zone: 1
|
||||
- name: fuel-controller-02
|
||||
role: controller
|
||||
internal_address: 10.0.0.102
|
||||
public_address: 192.168.0.102
|
||||
swift_zone: 2
|
||||
- name: fuel-controller-03
|
||||
role: controller
|
||||
internal_address: 10.0.0.103
|
||||
public_address: 192.168.0.103
|
||||
swift_zone: 3
|
||||
- name: fuel-controller-01
|
||||
role: quantum
|
||||
internal_address: 10.0.0.101
|
||||
public_address: 192.168.0.101
|
||||
- name: fuel-compute-01
|
||||
role: compute
|
||||
internal_address: 10.0.0.110
|
||||
public_address: 192.168.0.110
|
||||
|
||||
Notice that each node can be listed multiple times; this is because each node
|
||||
fulfills multiple roles. Notice also that the IP address for fuel-compute-01
|
||||
is *.110, not *.105.
|
||||
|
||||
The ``cobbler_common`` section applies to all machines::
|
||||
|
||||
cobbler_common:
|
||||
# for Centos
|
||||
profile: "centos64_x86_64"
|
||||
|
||||
Fuel can install CentOS on your servers, or you can add a profile of your own.
|
||||
By default, ``config.yaml`` uses CentOS. ::
|
||||
|
||||
netboot-enabled: "1"
|
||||
# for Centos
|
||||
name-servers: "10.0.0.100"
|
||||
name-servers-search: "localdomain"
|
||||
gateway: 192.168.0.1
|
||||
|
||||
Set the default nameserver to be fuel-pm, and change the domain name to your own
|
||||
domain name. Set the ``gateway`` to the public network's default gateway.
|
||||
Alternatively, if you don't plan to use your public networks actual gateway, you
|
||||
can set this value to be the IP address of the master node.
|
||||
|
||||
**Please note:** You must specify a working gateway (or proxy) in order to install
|
||||
OpenStack, because the system will need to communicate with public repositories. ::
|
||||
|
||||
ksmeta: "puppet_version=2.7.19-1puppetlabs2 \
|
||||
puppet_auto_setup=1 \
|
||||
puppet_master=fuel-pm.localdomain \
|
||||
|
||||
Change the fully-qualified domain name for the Puppet Master to reflect your own
|
||||
domain name. ::
|
||||
|
||||
puppet_enable=0 \
|
||||
ntp_enable=1 \
|
||||
mco_auto_setup=1 \
|
||||
mco_pskey=un0aez2ei9eiGaequaey4loocohjuch4Ievu3shaeweeg5Uthi \
|
||||
mco_stomphost=10.0.0.100 \
|
||||
|
||||
Make sure the ``mco_stomphost`` is set for the master node so that the
|
||||
orchestrator can find the nodes. ::
|
||||
|
||||
mco_stompport=61613 \
|
||||
mco_stompuser=mcollective \
|
||||
mco_stomppassword=AeN5mi5thahz2Aiveexo \
|
||||
mco_enable=1"
|
||||
|
||||
This section sets the system up for orchestration; you shouldn't have to touch it.
|
||||
|
||||
Next you'll define the actual servers. ::
|
||||
|
||||
fuel-controller-01:
|
||||
hostname: "fuel-controller-01"
|
||||
role: controller
|
||||
interfaces:
|
||||
eth0:
|
||||
mac: "08:00:27:BD:3A:7D"
|
||||
static: "1"
|
||||
ip-address: "10.0.0.101"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-controller-01.localdomain"
|
||||
management: "1"
|
||||
eth1:
|
||||
mac: "08:00:27:ED:9C:3C"
|
||||
static: "0"
|
||||
eth2:
|
||||
mac: "08:00:27:B0:EB:2C"
|
||||
static: "1"
|
||||
interfaces_extra:
|
||||
eth0:
|
||||
peerdns: "no"
|
||||
eth1:
|
||||
peerdns: "no"
|
||||
eth2:
|
||||
promisc: "yes"
|
||||
userctl: "yes"
|
||||
peerdns: "no"
|
||||
|
||||
For a VirtualBox installation, you can retrieve the MAC ids for your network
|
||||
adapters by expanding "Advanced" for the adapater in VirtualBox, or by executing
|
||||
``ifconfig`` on the server itself.
|
||||
|
||||
For a physical installation, the MAC address of the server is often printed on
|
||||
the sticker attached to the server for the LOM interfaces, or is available from
|
||||
the BIOS screen. You may also be able to find the MAC address in the hardware
|
||||
inventory BMC/DRAC/ILO, though this may be server-dependent.
|
||||
|
||||
Also, make sure the ``ip-address`` is correct, and that the ``dns-name`` has
|
||||
your own domain name in it.
|
||||
|
||||
In this example, IP addresses should be assigned as follows::
|
||||
|
||||
fuel-controller-01: 10.0.0.101
|
||||
fuel-controller-02: 10.0.0.102
|
||||
fuel-controller-03: 10.0.0.103
|
||||
fuel-compute-01: 10.0.0.110
|
||||
|
||||
Repeat this step for each of the other controllers, and for the compute node.
|
||||
Note that the compute node has its own role::
|
||||
|
||||
fuel-compute-01:
|
||||
hostname: "fuel-compute-01"
|
||||
role: compute
|
||||
interfaces:
|
||||
eth0:
|
||||
mac: "08:00:27:AE:A9:6E"
|
||||
static: "1"
|
||||
ip-address: "10.0.0.110"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-compute-01.localdomain"
|
||||
management: "1"
|
||||
eth1:
|
||||
mac: "08:00:27:B7:F9:CD"
|
||||
static: "0"
|
||||
eth2:
|
||||
mac: "08:00:27:8B:A6:B7"
|
||||
static: "1"
|
||||
interfaces_extra:
|
||||
eth0:
|
||||
peerdns: "no"
|
||||
eth1:
|
||||
peerdns: "no"
|
||||
eth2:
|
||||
promisc: "yes"
|
||||
userctl: "yes"
|
||||
peerdns: "no"
|
||||
|
||||
|
||||
Loading the configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Once you've completed the changes to ``config.yaml``, you need to load the
|
||||
information into Cobbler. To do that, use the ``cobbler_system`` script::
|
||||
|
||||
# cobbler_system -f config.yaml
|
||||
|
||||
Now you're ready to start spinning up the controllers and compute nodes.
|
||||
|
||||
Installing the operating system
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Now that Cobbler has the correct configuration, the only thing you need to do is
|
||||
to PXE-boot your nodes. This means that they ill boot over the network, with
|
||||
DHCP/TFTP provided by Cobbler, and will be provisioned accordingly,
|
||||
with the specified operating system and configuration.
|
||||
|
||||
If you installed Fuel from the ISO, start fuel-controller-01 first and let the
|
||||
installation finish before starting the other nodes. Fuel will cache the
|
||||
downloads so subsequent installs will go faster.
|
||||
|
||||
The process for each node looks like this:
|
||||
|
||||
#. Start the VM.
|
||||
#. Press F12 immediately and select l (LAN) as a bootable media.
|
||||
#. Wait for the installation to complete.
|
||||
#. Log into the new machine using root/r00tme.
|
||||
#. **Change the root password.**
|
||||
#. Check that networking is set up correctly and the machine can reach the Internet::
|
||||
|
||||
# ping fuel-pm.localdomain
|
||||
# ping www.mirantis.com
|
||||
|
||||
If you're unable to ping outside addresses, add the fuel-pm server as a default
|
||||
gateway::
|
||||
|
||||
# route add default gw 10.0.0.100
|
||||
|
||||
**It is important to note** that if you use VLANs in your network
|
||||
configuration, you always have to keep in mind the fact that PXE
|
||||
booting does not work on tagged interfaces. Therefore, all your nodes,
|
||||
including the one where the Cobbler service resides, must share one
|
||||
untagged VLAN (also called native VLAN). If necessary, you can use the
|
||||
``dhcp_interface`` parameter of the ``cobbler::server`` class to bind the DHCP
|
||||
service to the appropriate interface.
|
@ -1,31 +0,0 @@
|
||||
Generating the Puppet manifest
|
||||
------------------------------
|
||||
|
||||
Before you can deploy OpenStack, you will need to configure the site.pp file.
|
||||
While previous versions of Fuel required you to manually configure ``site.pp``,
|
||||
version 2.1 includes the ``openstack_system`` script, which uses both the
|
||||
``config.yaml`` and template files for the various reference architectures to
|
||||
create the appropriate Puppet manifest. To create ``site.pp``, execute this command::
|
||||
|
||||
# openstack_system -c config.yaml \
|
||||
-t /etc/puppet/modules/openstack/examples/site_openstack_ha_compact.pp \
|
||||
-o /etc/puppet/manifests/site.pp \
|
||||
-a astute.yaml
|
||||
|
||||
The four parameters shown here represent the following:
|
||||
|
||||
* ``-c``: The absolute or relative path to the ``config.yaml`` file you
|
||||
customized earlier.
|
||||
* ``-t``: The template file to serve as a basis for ``site.pp``.
|
||||
Possible templates include ``site_openstack_ha_compact.pp``,
|
||||
``site_openstack_ha_minimal.pp``, ``site_openstack_ha_full.pp``,
|
||||
``site_openstack_single.pp``, and ``site_openstack_simple.pp``.
|
||||
* ``-o``: The output file. This should always be ``/etc/puppet/manifests/site.pp``.
|
||||
* ``-a``: The orchestration configuration file, to be output for use in the next step.
|
||||
|
||||
From there you're ready to install your OpenStack components, but first let's
|
||||
look at what's actually in the new ``site.pp`` manifest, so that you can
|
||||
undersand how to customize it if necessary.
|
||||
|
||||
Similarly, if you are installing Fuel Library without the ISO, you will need to
|
||||
make these customizations yourself.
|
@ -1,848 +0,0 @@
|
||||
|
||||
Understanding the Puppet manifest
|
||||
---------------------------------
|
||||
|
||||
At this point you have functioning servers that are ready to have
|
||||
OpenStack installed. If you're using VirtualBox, save the current state
|
||||
of every virtual machine by taking a snapshot using ``File->Take Snapshot``. This
|
||||
way you can go back to this point and try again if necessary.
|
||||
|
||||
|
||||
The next step will be to go through the ``/etc/puppet/manifests/site.pp`` file
|
||||
and make any necessary customizations. If you have run ``openstack_system``,
|
||||
there shouldn't be anything to change (with one small exception) but if you are
|
||||
installing Fuel manually, you will need to make these changes yourself.
|
||||
|
||||
In either case, it's always good to understand what your system is doing.
|
||||
|
||||
Let's start with the basic network customization::
|
||||
|
||||
### GENERAL CONFIG ###
|
||||
# This section sets main parameters such as hostnames and IP addresses of different nodes
|
||||
|
||||
# This is the name of the public interface. The public network provides address
|
||||
# space for Floating IPs, as well as public IP accessibility to the API endpoints.
|
||||
$public_interface = 'eth1'
|
||||
$public_br = 'br-ex'
|
||||
|
||||
# This is the name of the internal interface. It will be attached to the management
|
||||
# network, where data exchange between components of the OpenStack cluster will happen.
|
||||
$internal_interface = 'eth0'
|
||||
$internal_br = 'br-mgmt'
|
||||
|
||||
# This is the name of the private interface. All traffic within OpenStack tenants'
|
||||
# networks will go through this interface.
|
||||
$private_interface = 'eth2'
|
||||
|
||||
In this case, we don't need to make any changes to the interface
|
||||
settings, because they match what we've already set up. ::
|
||||
|
||||
# Public and Internal VIPs. These virtual addresses are required by HA topology
|
||||
# and will be managed by keepalived.
|
||||
$internal_virtual_ip = '10.0.0.10'
|
||||
|
||||
# Change this IP to IP routable from your 'public' network,
|
||||
# e. g. Internet or your office LAN, in which your public
|
||||
# interface resides
|
||||
$public_virtual_ip = '192.168.0.10'
|
||||
|
||||
Make sure the virtual IPs you see here mesh with your actual setup. They should
|
||||
be IPs that are routeable, but not within the range of the DHCP scope. These are
|
||||
the IPs through which your services will be accessed.
|
||||
|
||||
The next section sets up the servers themselves. If you are setting up Fuel
|
||||
manually, make sure to add each server with the appropriate IP addresses; if you
|
||||
ran ``openstack_system`` script, the values will be overridden by the next
|
||||
section, and you can ignore this array. ::
|
||||
|
||||
$nodes_harr = [
|
||||
{
|
||||
'name' => 'fuel-pm',
|
||||
'role' => 'cobbler',
|
||||
'internal_address' => '10.0.0.100',
|
||||
'public_address' => '192.168.0.100',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.0.0.100',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-controller-01',
|
||||
'role' => 'primary-controller',
|
||||
'internal_address' => '10.0.0.101',
|
||||
'public_address' => '192.168.0.101',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.0.0.101',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-controller-02',
|
||||
'role' => 'controller',
|
||||
'internal_address' => '10.0.0.102',
|
||||
'public_address' => '192.168.0.102',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.0.0.102',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-controller-03',
|
||||
'role' => 'controller',
|
||||
'internal_address' => '10.0.0.105',
|
||||
'public_address' => '192.168.0.105',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.0.0.105',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-compute-01',
|
||||
'role' => 'compute',
|
||||
'internal_address' => '10.0.0.106',
|
||||
'public_address' => '192.168.0.106',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.0.0.106',
|
||||
}
|
||||
]
|
||||
|
||||
Because this section comes from a template, it will likely include a number of
|
||||
servers you're not using. Feel free to leave them or take them out.
|
||||
|
||||
Next the ``site.pp`` file lists all of the nodes and roles you defined in the
|
||||
``config.yaml`` file::
|
||||
|
||||
$nodes = [{'public_address' => '192.168.0.101','name' => 'fuel-controller-01','role' =>
|
||||
'primary-controller','internal_address' => '10.0.0.101',
|
||||
'storage_local_net_ip' => '10.0.0.101', 'mountpoints' => '1 2\n2 1',
|
||||
'swift-zone' => 1 },
|
||||
{'public_address' => '192.168.0.102','name' => 'fuel-controller-02','role' =>
|
||||
'controller','internal_address' => '10.0.0.102',
|
||||
'storage_local_net_ip' => '10.0.0.102', 'mountpoints' => '1 2\n2 1',
|
||||
'swift-zone' => 2},
|
||||
{'public_address' => '192.168.0.103','name' => 'fuel-controller-03','role' =>
|
||||
'storage','internal_address' => '10.0.0.103',
|
||||
'storage_local_net_ip' => '10.0.0.103', 'mountpoints' => '1 2\n2 1',
|
||||
'swift-zone' => 3},
|
||||
{'public_address' => '192.168.0.110','name' => 'fuel-compute-01','role' =>
|
||||
'compute','internal_address' => '10.0.0.110'}]
|
||||
|
||||
Possible roles include ‘compute’, ‘controller’, ‘primary-controller’, ‘storage’,
|
||||
‘swift-proxy’, ‘quantum’, ‘master’, and ‘cobbler’. Check the IP addresses for
|
||||
each node and make sure that they mesh with what's in this array.
|
||||
|
||||
The file also specifies the default gateway to be the fuel-pm machine::
|
||||
|
||||
$default_gateway = '192.168.0.1'
|
||||
|
||||
Next ``site.pp`` defines DNS servers and provides netmasks::
|
||||
|
||||
# Specify nameservers here.
|
||||
# Need points to cobbler node IP, or to special prepared nameservers if you
|
||||
# known what you do.
|
||||
$dns_nameservers = ['10.0.0.100','8.8.8.8']
|
||||
|
||||
# Specify netmasks for internal and external networks.
|
||||
$internal_netmask = '255.255.255.0'
|
||||
$public_netmask = '255.255.255.0'
|
||||
...
|
||||
#Set this to anything other than pacemaker if you do not want Quantum HA
|
||||
#Also, if you do not want Quantum HA, you MUST enable $quantum_network_node
|
||||
#on the ONLY controller
|
||||
$ha_provider = 'pacemaker'
|
||||
$use_unicast_corosync = false
|
||||
|
||||
Next specify the main controller as the Nagios master. ::
|
||||
|
||||
# Set nagios master fqdn
|
||||
$nagios_master = 'fuel-controller-01.localdomain'
|
||||
## proj_name name of environment nagios configuration
|
||||
$proj_name = 'test'
|
||||
|
||||
Here again we have a parameter that looks ahead to things to come. OpenStack
|
||||
supports monitoring via Nagios. In this section, you can choose the Nagios
|
||||
master server as well as setting a project name. ::
|
||||
|
||||
# Specify if your installation contains multiple Nova controllers. Defaults to
|
||||
# true as it is the most common scenario.
|
||||
$multi_host = true
|
||||
|
||||
A single host cloud isn't especially useful, but if you really want to, you can
|
||||
specify that here.
|
||||
|
||||
Finally, you can define the various usernames and passwords for OpenStack
|
||||
services. ::
|
||||
|
||||
# Specify different DB credentials for various services
|
||||
$mysql_root_password = 'nova'
|
||||
$admin_email = 'openstack@openstack.org'
|
||||
$admin_password = 'nova'
|
||||
|
||||
$keystone_db_password = 'nova'
|
||||
$keystone_admin_token = 'nova'
|
||||
|
||||
$glance_db_password = 'nova'
|
||||
$glance_user_password = 'nova'
|
||||
|
||||
$nova_db_password = 'nova'
|
||||
$nova_user_password = 'nova'
|
||||
|
||||
$rabbit_password = 'nova'
|
||||
$rabbit_user = 'nova'
|
||||
|
||||
$swift_user_password = 'swift_pass'
|
||||
$swift_shared_secret = 'changeme'
|
||||
|
||||
$quantum_user_password = 'quantum_pass'
|
||||
$quantum_db_password = 'quantum_pass'
|
||||
$quantum_db_user = 'quantum'
|
||||
$quantum_db_dbname = 'quantum'
|
||||
|
||||
# End DB credentials section
|
||||
|
||||
Now that the network is configured for the servers, let's look at the
|
||||
various OpenStack services.
|
||||
|
||||
Enabling Quantum
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
In order to deploy OpenStack with Quantum you need to set up an
|
||||
additional node that will act as an L3 router, or run Quantum out of
|
||||
one of the existing nodes. ::
|
||||
|
||||
### NETWORK/QUANTUM ###
|
||||
# Specify network/quantum specific settings
|
||||
|
||||
# Should we use quantum or nova-network(deprecated).
|
||||
# Consult OpenStack documentation for differences between them.
|
||||
$quantum = true
|
||||
$quantum_netnode_on_cnt = true
|
||||
|
||||
In this case, we're using a "compact" architecture, so we want to place Quantum
|
||||
on the controllers::
|
||||
|
||||
# Specify network creation criteria:
|
||||
# Should puppet automatically create networks?
|
||||
$create_networks = true
|
||||
|
||||
# Fixed IP addresses are typically used for communication between VM instances.
|
||||
$fixed_range = '172.16.0.0/16'
|
||||
|
||||
# Floating IP addresses are used for communication of VM instances with the
|
||||
# outside world (e.g. Internet).
|
||||
$floating_range = '192.168.0.0/24'
|
||||
|
||||
OpenStack uses two ranges of IP addresses for virtual machines: fixed IPs, which
|
||||
are used for communication between VMs, and thus are part of the private network,
|
||||
and floating IPs, which are assigned to VMs for the purpose of communicating to
|
||||
and from the Internet. ::
|
||||
|
||||
# These parameters are passed to the previously specified network manager,
|
||||
# e.g. nova-manage network create.
|
||||
# Not used in Quantum.
|
||||
$num_networks = 1
|
||||
$network_size = 31
|
||||
$vlan_start = 300
|
||||
|
||||
These values don't actually relate to Quantum; they are used by nova-network.
|
||||
IDs for the VLANs OpenStack will create for tenants run from ``vlan_start`` to
|
||||
(``vlan_start + num_networks - 1``), and are generated automatically. ::
|
||||
|
||||
# Quantum
|
||||
|
||||
# Segmentation type for isolating traffic between tenants
|
||||
# Consult Openstack Quantum docs
|
||||
$tenant_network_type = 'gre'
|
||||
|
||||
# Which IP address will be used for creating GRE tunnels.
|
||||
$quantum_gre_bind_addr = $internal_address
|
||||
|
||||
If you are installing Quantum in non-HA mode, you will need to specify which
|
||||
single controller controls Quantum. ::
|
||||
|
||||
# If $external_ipinfo option is not defined, the addresses will be allocated
|
||||
# automatically from $floating_range:
|
||||
# the first address will be defined as an external default router,
|
||||
# the second address will be attached to an uplink bridge interface,
|
||||
# the remaining addresses will be utilized for the floating IP address pool.
|
||||
$external_ipinfo = {
|
||||
'pool_start' => '192.168.0.115',
|
||||
'public_net_router' => '192.168.0.1',
|
||||
'pool_end' => '192.168.0.126',
|
||||
'ext_bridge' => '0.0.0.0'
|
||||
}
|
||||
|
||||
# Quantum segmentation range.
|
||||
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
|
||||
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
|
||||
$segment_range = '900:999'
|
||||
|
||||
# Set up OpenStack network manager. It is used ONLY in nova-network.
|
||||
# Consult Openstack nova-network docs for possible values.
|
||||
$network_manager = 'nova.network.manager.FlatDHCPManager'
|
||||
|
||||
# Assign floating IPs to VMs on startup automatically?
|
||||
$auto_assign_floating_ip = false
|
||||
|
||||
# Database connection for Quantum configuration (quantum.conf)
|
||||
$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${$internal_virtual_ip}/{quantum_db_dbname}"
|
||||
|
||||
if $quantum {
|
||||
$public_int = $public_br
|
||||
$internal_int = $internal_br
|
||||
} else {
|
||||
$public_int = $public_interface
|
||||
$internal_int = $internal_interface
|
||||
}
|
||||
|
||||
If the system is set up to use Quantum, the public and internal interfaces are
|
||||
set to use the appropriate bridges, rather than the defined interfaces.
|
||||
|
||||
The remaining configuration is used to define classes that will be added to each
|
||||
Quantum node::
|
||||
|
||||
#Network configuration
|
||||
stage {'netconfig':
|
||||
before => Stage['main'],
|
||||
}
|
||||
class {'l23network': use_ovs => $quantum, stage=> 'netconfig'}
|
||||
class node_netconfig (
|
||||
$mgmt_ipaddr,
|
||||
$mgmt_netmask = '255.255.255.0',
|
||||
$public_ipaddr = undef,
|
||||
$public_netmask= '255.255.255.0',
|
||||
$save_default_gateway=true,
|
||||
$quantum = $quantum,
|
||||
) {
|
||||
if $quantum {
|
||||
l23network::l3::create_br_iface {'mgmt':
|
||||
interface => $internal_interface,
|
||||
bridge => $internal_br,
|
||||
ipaddr => $mgmt_ipaddr,
|
||||
netmask => $mgmt_netmask,
|
||||
dns_nameservers => $dns_nameservers,
|
||||
save_default_gateway => $save_default_gateway,
|
||||
} ->
|
||||
l23network::l3::create_br_iface {'ex':
|
||||
interface => $public_interface,
|
||||
bridge => $public_br,
|
||||
ipaddr => $public_ipaddr,
|
||||
netmask => $public_netmask,
|
||||
gateway => $default_gateway,
|
||||
}
|
||||
} else {
|
||||
# nova-network mode
|
||||
l23network::l3::ifconfig {$public_int:
|
||||
ipaddr => $public_ipaddr,
|
||||
netmask => $public_netmask,
|
||||
gateway => $default_gateway,
|
||||
}
|
||||
l23network::l3::ifconfig {$internal_int:
|
||||
ipaddr => $mgmt_ipaddr,
|
||||
netmask => $mgmt_netmask,
|
||||
dns_nameservers => $dns_nameservers,
|
||||
}
|
||||
}
|
||||
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
|
||||
}
|
||||
### NETWORK/QUANTUM END ###
|
||||
|
||||
All of this assumes, of course, that you're using Quantum; if you're using
|
||||
nova-network instead, only those values apply.
|
||||
|
||||
Defining the current cluster
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Fuel enables you to control multiple deployments simultaneously by setting an
|
||||
individual ``deployment_id``::
|
||||
|
||||
# This parameter specifies the the identifier of the current cluster. This is
|
||||
# needed in case of multiple environments.
|
||||
# installation. Each cluster requires a unique integer value.
|
||||
# Valid identifier range is 0 to 254
|
||||
$deployment_id = '79'
|
||||
|
||||
Enabling Cinder
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
This example also uses Cinder, and with
|
||||
some very specific variations from the default. Specifically, as we
|
||||
said before, while the Cinder scheduler will continue to run on the
|
||||
controllers, the actual storage takes place on the compute nodes, on
|
||||
the ``/dev/sdb1`` partition you created earlier. Cinder will be activated
|
||||
on any node that contains the specified block devices -- unless
|
||||
specified otherwise -- so let's look at what all of that means for the
|
||||
configuration. ::
|
||||
|
||||
|
||||
# Choose which nodes to install cinder onto
|
||||
# 'compute' -> compute nodes will run cinder
|
||||
# 'controller' -> controller nodes will run cinder
|
||||
# 'storage' -> storage nodes will run cinder
|
||||
# 'fuel-controller-XX' -> specify particular host(s) by hostname
|
||||
# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address
|
||||
# 'all' -> compute, controller, and storage nodes will run
|
||||
# cinder (excluding swift and proxy nodes)
|
||||
$cinder_nodes = ['controller']
|
||||
|
||||
We want Cinder to be on the controller nodes, so set this value to ``['controller']``. ::
|
||||
|
||||
#Set it to true if your want cinder-volume been installed to the host
|
||||
#Otherwise it will install api and scheduler services
|
||||
$manage_volumes = true
|
||||
|
||||
# Setup network interface, which Cinder uses to export iSCSI targets.
|
||||
$cinder_iscsi_bind_addr = $internal_address
|
||||
|
||||
Here you can specify which network interface is used by Cinder for its own
|
||||
traffic. For example, you could set up a fourth NIC at ``eth3``
|
||||
and specify that rather than ``$internal_int``. ::
|
||||
|
||||
# Below you can add physical volumes to cinder. Please replace values with the
|
||||
# actual names of devices.
|
||||
# This parameter defines which partitions to aggregate into cinder-volumes
|
||||
# or nova-volumes LVM VG
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
|
||||
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
|
||||
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG
|
||||
# by yourself
|
||||
$nv_physical_volume = ['/dev/sdb']
|
||||
|
||||
# Evaluate cinder node selection
|
||||
if ($cinder) {
|
||||
if (member($cinder_nodes,'all')) {
|
||||
$is_cinder_node = true
|
||||
} elsif (member($cinder_nodes,$::hostname)) {
|
||||
$is_cinder_node = true
|
||||
} elsif (member($cinder_nodes,$internal_address)) {
|
||||
$is_cinder_node = true
|
||||
} elsif ($node[0]['role'] =~ /controller/)) {
|
||||
$is_cinder_node = member($cinder_nodes, 'controller')
|
||||
} else {
|
||||
$is_cinder_node = member($cinder_nodes, $node[0]['role'])
|
||||
}
|
||||
} else {
|
||||
$is_cinder_node = false
|
||||
}
|
||||
|
||||
### CINDER/VOLUME END ###
|
||||
|
||||
We only want to allocate the ``/dev/sdb`` value for Cinder, so adjust
|
||||
``$nv_physical_volume`` accordingly. Note, however, that this is a global
|
||||
value; it will apply to all servers, including the controllers --
|
||||
unless we specify otherwise, which we will in a moment.
|
||||
|
||||
**Be careful** to not add block devices to the list which contain useful
|
||||
data (e.g. block devices on which your OS resides), as they will be
|
||||
destroyed after you allocate them for Cinder.
|
||||
|
||||
Now lets look at the other storage-based service: Swift.
|
||||
|
||||
Enabling Glance and Swift
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There aren't many changes that you will need to make to the default
|
||||
configuration in order to enable Swift to work properly in Swift Compact mode,
|
||||
but you will need to adjust if you want to run Swift on physical partitions ::
|
||||
|
||||
...
|
||||
### GLANCE and SWIFT ###
|
||||
|
||||
# Which backend to use for glance
|
||||
# Supported backends are "swift" and "file"
|
||||
$glance_backend = 'swift'
|
||||
|
||||
# Use loopback device for swift:
|
||||
# set 'loopback' or false
|
||||
# This parameter controls where swift partitions are located:
|
||||
# on physical partitions or inside loopback devices.
|
||||
$swift_loopback = loopback
|
||||
|
||||
The default value is ``loopback``, which tells Swift to use a loopback storage
|
||||
device, which is basically a file that acts like a drive, rather than an actual
|
||||
physical drive. You can also set this value to ``false``, which tells OpenStack
|
||||
to use a physical file instead. ::
|
||||
|
||||
# Which IP address to bind swift components to:
|
||||
# e.g., which IP swift-proxy should listen on
|
||||
$swift_local_net_ip = $internal_address
|
||||
|
||||
# IP node of controller used during swift installation
|
||||
# and put into swift configs
|
||||
$controller_node_public = $internal_virtual_ip
|
||||
|
||||
# Hash of proxies hostname|fqdn => ip mappings.
|
||||
# This is used by controller_ha.pp manifests for haproxy setup
|
||||
# of swift_proxy backends
|
||||
$swift_proxies = $controller_internal_addresses
|
||||
|
||||
Next, you're specifying the ``swift-master``::
|
||||
|
||||
# Set hostname of swift_master.
|
||||
# It tells on which swift proxy node to build
|
||||
# *ring.gz files. Other swift proxies/storages
|
||||
# will rsync them.
|
||||
if $node[0]['role'] == 'primary-controller' {
|
||||
$primary_proxy = true
|
||||
} else {
|
||||
$primary_proxy = false
|
||||
}
|
||||
if $node[0]['role'] == 'primary-controller' {
|
||||
$primary_controller = true
|
||||
} else {
|
||||
$primary_controller = false
|
||||
}
|
||||
$master_swift_proxy_nodes = filter_nodes($nodes,'role','primary-controller')
|
||||
$master_swift_proxy_ip = $master_swift_proxy_nodes[0]['internal_address']
|
||||
|
||||
In this case, there's no separate ``fuel-swiftproxy-01``, so the master
|
||||
controller will be the primary Swift controller.
|
||||
|
||||
Configuring OpenStack to use syslog
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To use the syslog server, adjust the corresponding variables in the
|
||||
``if $use_syslog`` clause::
|
||||
|
||||
$use_syslog = true
|
||||
if $use_syslog {
|
||||
class { "::rsyslog::client":
|
||||
log_local => true,
|
||||
log_auth_local => true,
|
||||
server => '127.0.0.1',
|
||||
port => '514'
|
||||
}
|
||||
}
|
||||
|
||||
For remote logging, use the IP or hostname of the server for the ``server``
|
||||
value and set the ``port`` appropriately. For local logging, ``set log_local``
|
||||
and ``log_auth_local`` to ``true``.
|
||||
|
||||
Setting the version and mirror type
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can customize the various versions of OpenStack's components, though it's
|
||||
typical to use the latest versions::
|
||||
|
||||
### Syslog END ###
|
||||
case $::osfamily {
|
||||
"Debian": {
|
||||
$rabbitmq_version_string = '2.8.7-1'
|
||||
}
|
||||
"RedHat": {
|
||||
$rabbitmq_version_string = '2.8.7-2.el6'
|
||||
}
|
||||
}
|
||||
# OpenStack packages and customized component versions to be installed.
|
||||
# Use 'latest' to get the most recent ones or specify exact version if you need
|
||||
# to install custom version.
|
||||
$openstack_version = {
|
||||
'keystone' => 'latest',
|
||||
'glance' => 'latest',
|
||||
'horizon' => 'latest',
|
||||
'nova' => 'latest',
|
||||
'novncproxy' => 'latest',
|
||||
'cinder' => 'latest',
|
||||
'rabbitmq_version' => $rabbitmq_version_string,
|
||||
}
|
||||
|
||||
To tell Fuel to download packages from external repos provided by Mirantis and
|
||||
your distribution vendors, make sure the ``$mirror_type`` variable is set to ``default``::
|
||||
|
||||
# If you want to set up a local repository, you will need to manually adjust
|
||||
# mirantis_repos.pp, though it is NOT recommended.
|
||||
$mirror_type = 'default'
|
||||
$enable_test_repo = false
|
||||
$repo_proxy = 'http://10.0.0.100:3128'
|
||||
|
||||
Once again, the ``$mirror_type`` **must** be set to ``default``. If you set it
|
||||
correctly in ``config.yaml`` and ran ``openstack_system`` this will already be
|
||||
taken care of. Otherwise, **make sure** to set this value yourself.
|
||||
|
||||
Future versions of Fuel will enable you to use your own internal repositories.
|
||||
|
||||
Setting verbosity
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
You also have the option to determine how much information OpenStack provides
|
||||
when performing configuration::
|
||||
|
||||
# This parameter specifies the verbosity level of log messages
|
||||
# in openstack components config. Currently, it disables or enables debugging.
|
||||
$verbose = true
|
||||
|
||||
Configuring Rate-Limits
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Openstack has predefined limits on different HTTP queries for nova-compute and
|
||||
Cinder services. Sometimes (e.g. for big clouds or test scenarios) these limits
|
||||
are too strict. (See
|
||||
http://docs.openstack.org/folsom/openstack-compute/admin/content/configuring-compute-API.html )
|
||||
In this case you can change them to more appropriate values.
|
||||
|
||||
There are two hashes describing these limits: ``$nova_rate_limits`` and
|
||||
``$cinder_rate_limits``. ::
|
||||
|
||||
# Rate Limits for cinder and Nova.
|
||||
# Cinder and Nova can rate-limit your requests to API services.
|
||||
# These limits can be reduced for your installation or usage scenario.
|
||||
# Change the following variables if you want. They are measured in requests per minute.
|
||||
$nova_rate_limits = {
|
||||
'POST' => 1000,
|
||||
'POST_SERVERS' => 1000,
|
||||
'PUT' => 1000, 'GET' => 1000,
|
||||
'DELETE' => 1000
|
||||
}
|
||||
$cinder_rate_limits = {
|
||||
'POST' => 1000,
|
||||
'POST_SERVERS' => 1000,
|
||||
'PUT' => 1000, 'GET' => 1000,
|
||||
'DELETE' => 1000
|
||||
}
|
||||
...
|
||||
|
||||
Enabling Horizon HTTPS/SSL mode
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Using the ``$horizon_use_ssl`` variable, you have the option to decide whether
|
||||
the OpenStack dashboard (Horizon) uses HTTP or HTTPS::
|
||||
|
||||
...
|
||||
# 'custom': require fileserver static mount point [ssl_certs] and hostname
|
||||
# based certificate existence
|
||||
$horizon_use_ssl = false
|
||||
|
||||
This variable accepts the following values:
|
||||
|
||||
* ``false``: In this mode, the dashboard uses HTTP with no encryption.
|
||||
* ``default``: In this mode, the dashboard uses keys supplied with the
|
||||
standard Apache SSL module package.
|
||||
* ``exist``: In this case, the dashboard assumes that the domain name-based
|
||||
certificate, or keys, are provisioned in advance. This can be a certificate
|
||||
signed by any authorized provider, such as Symantec/Verisign, Comodo, GoDaddy,
|
||||
and so on. The system looks for the keys in these locations:
|
||||
|
||||
* public ``/etc/pki/tls/certs/domain-name.crt``
|
||||
* private ``/etc/pki/tls/private/domain-name.key``
|
||||
|
||||
* ``custom``: This mode requires a static mount point on the fileserver for
|
||||
``[ssl_certs]`` and certificate pre-existence. To enable this mode, configure
|
||||
the puppet fileserver by editing ``/etc/puppet/fileserver.conf`` to add::
|
||||
|
||||
[ssl_certs]
|
||||
path /etc/puppet/templates/ssl
|
||||
allow *
|
||||
|
||||
From there, create the appropriate directory::
|
||||
|
||||
# mkdir -p /etc/puppet/templates/ssl
|
||||
|
||||
Add the certificates to this directory.
|
||||
(Reload the puppetmaster service for these changes to take effect.)
|
||||
|
||||
Now we just need to make sure that all of our nodes get the proper
|
||||
values.
|
||||
|
||||
Defining the node configurations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Now that we've set all of the global values, its time to make sure that
|
||||
the actual node definitions are correct. For example, by default all
|
||||
nodes will enable Cinder on ``/dev/sdb``. If you didn't want that for all
|
||||
controllers, you could set ``nv_physical_volume`` to ``null`` for a specific
|
||||
node or nodes. ::
|
||||
|
||||
...
|
||||
class compact_controller (
|
||||
$quantum_network_node = $quantum_netnode_on_cnt
|
||||
) {
|
||||
class { 'openstack::controller_ha':
|
||||
controller_public_addresses => $controller_public_addresses,
|
||||
controller_internal_addresses => $controller_internal_addresses,
|
||||
internal_address => $internal_address,
|
||||
public_interface => $public_int,
|
||||
internal_interface => $internal_int,
|
||||
...
|
||||
use_unicast_corosync => $use_unicast_corosync,
|
||||
ha_provider => $ha_provider
|
||||
}
|
||||
class { 'swift::keystone::auth':
|
||||
password => $swift_user_password,
|
||||
public_address => $public_virtual_ip,
|
||||
internal_address => $internal_virtual_ip,
|
||||
admin_address => $internal_virtual_ip,
|
||||
}
|
||||
}
|
||||
...
|
||||
|
||||
Fortunately, as you can see here, Fuel includes a class for the controllers, so you don't
|
||||
have to make global changes for each individual controller. If you look down a little further, this class then goes on to help specify the individual controllers and compute nodes::
|
||||
|
||||
...
|
||||
node /fuel-controller-[\d+]/ {
|
||||
include stdlib
|
||||
class { 'operatingsystem::checksupported':
|
||||
stage => 'setup'
|
||||
}
|
||||
|
||||
class {'::node_netconfig':
|
||||
mgmt_ipaddr => $::internal_address,
|
||||
mgmt_netmask => $::internal_netmask,
|
||||
public_ipaddr => $::public_address,
|
||||
public_netmask => $::public_netmask,
|
||||
stage => 'netconfig',
|
||||
}
|
||||
|
||||
class {'nagios':
|
||||
proj_name => $proj_name,
|
||||
services => [
|
||||
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
|
||||
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
|
||||
'glance-registry','horizon', 'rabbitmq', 'mysql', 'swift-proxy',
|
||||
'swift-account', 'swift-container', 'swift-object',
|
||||
],
|
||||
whitelist => ['127.0.0.1', $nagios_master],
|
||||
hostgroup => 'controller',
|
||||
}
|
||||
|
||||
class { compact_controller: }
|
||||
$swift_zone = $node[0]['swift_zone']
|
||||
|
||||
class { 'openstack::swift::storage_node':
|
||||
storage_type => $swift_loopback,
|
||||
swift_zone => $swift_zone,
|
||||
swift_local_net_ip => $internal_address,
|
||||
}
|
||||
|
||||
class { 'openstack::swift::proxy':
|
||||
swift_user_password => $swift_user_password,
|
||||
swift_proxies => $swift_proxies,
|
||||
...
|
||||
rabbit_ha_virtual_ip => $internal_virtual_ip,
|
||||
}
|
||||
}
|
||||
|
||||
Notice also that each controller has the ``swift_zone`` specified, so each
|
||||
of the three controllers can represent each of the three Swift zones.
|
||||
|
||||
Similarly, ``site.pp`` defines a class for the compute nodes.
|
||||
|
||||
Installing Nagios Monitoring using Puppet
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Fuel provides a way to deploy Nagios for monitoring your OpenStack cluster. It
|
||||
will require the installation of an agent on the controller, compute, and
|
||||
storage nodes, as well as having a master server for Nagios which will collect
|
||||
and display all the results. An agent, the Nagios NRPE addon, allows OpenStack
|
||||
to execute Nagios plugins on remote Linux/Unix machines. The main reason for
|
||||
doing this is to monitor basic resources (such as CPU load, memory usage, etc.),
|
||||
as well as more advanced ones on remote machines.
|
||||
|
||||
Nagios Agent
|
||||
++++++++++++
|
||||
|
||||
In order to install Nagios NRPE on a compute or controller node, a node should
|
||||
have the following settings: ::
|
||||
|
||||
class {'nagios':
|
||||
proj_name => 'test',
|
||||
services => ['nova-compute','nova-network','libvirt'],
|
||||
whitelist => ['127.0.0.1', $nagios_master],
|
||||
hostgroup => 'compute',
|
||||
}
|
||||
|
||||
* ``proj_name``: An environment for nagios commands and the directory
|
||||
(``/etc/nagios/test/``).
|
||||
* ``services``: All services to be monitored by nagios.
|
||||
* ``whitelist``: The array of IP addreses trusted by NRPE.
|
||||
* ``hostgroup``: The group to be used in the nagios master (do not forget create
|
||||
the group in the nagios master).
|
||||
|
||||
Nagios Server
|
||||
+++++++++++++
|
||||
|
||||
In order to install Nagios Master on any convenient node, a node should have the
|
||||
following applied: ::
|
||||
|
||||
class {'nagios::master':
|
||||
proj_name => 'test',
|
||||
templatehost => {'name' => 'default-host','check_interval' => '10'},
|
||||
templateservice => {'name' => 'default-service' ,'check_interval'=>'10'},
|
||||
hostgroups => ['compute','controller'],
|
||||
contactgroups => {'group' => 'admins', 'alias' => 'Admins'},
|
||||
contacts => {'user' => 'hotkey', 'alias' => 'Dennis Hoppe',
|
||||
'email' => 'nagios@%{domain}',
|
||||
'group' => 'admins'},
|
||||
}
|
||||
|
||||
* ``proj_name``: The environment for nagios commands and the directory
|
||||
(``/etc/nagios/test/``).
|
||||
* ``templatehost``: The group of checks and intervals parameters for hosts
|
||||
(as a Hash).
|
||||
* ``templateservice``: The group of checks and intervals parameters for services
|
||||
(as a Hash).
|
||||
* ``hostgroups``: All groups which on NRPE nodes (as an Array).
|
||||
* ``contactgroups``: The group of contacts (as a Hash).
|
||||
* ``contacts``: Contacts to receive error reports (as a Hash)
|
||||
|
||||
Health Checks
|
||||
+++++++++++++
|
||||
|
||||
You can see the complete definition of the available services to monitor and
|
||||
their health checks at ``deployment/puppet/nagios/manifests/params.pp``.
|
||||
|
||||
Here is the list: ::
|
||||
|
||||
$services_list = {
|
||||
'nova-compute' => 'check_nrpe_1arg!check_nova_compute',
|
||||
'nova-network' => 'check_nrpe_1arg!check_nova_network',
|
||||
'libvirt' => 'check_nrpe_1arg!check_libvirt',
|
||||
'swift-proxy' => 'check_nrpe_1arg!check_swift_proxy',
|
||||
'swift-account' => 'check_nrpe_1arg!check_swift_account',
|
||||
'swift-container' => 'check_nrpe_1arg!check_swift_container',
|
||||
'swift-object' => 'check_nrpe_1arg!check_swift_object',
|
||||
'swift-ring' => 'check_nrpe_1arg!check_swift_ring',
|
||||
'keystone' => 'check_http_api!5000',
|
||||
'nova-novncproxy' => 'check_nrpe_1arg!check_nova_novncproxy',
|
||||
'nova-scheduler' => 'check_nrpe_1arg!check_nova_scheduler',
|
||||
'nova-consoleauth' => 'check_nrpe_1arg!check_nova_consoleauth',
|
||||
'nova-cert' => 'check_nrpe_1arg!check_nova_cert',
|
||||
'cinder-scheduler' => 'check_nrpe_1arg!check_cinder_scheduler',
|
||||
'cinder-volume' => 'check_nrpe_1arg!check_cinder_volume',
|
||||
'haproxy' => 'check_nrpe_1arg!check_haproxy',
|
||||
'memcached' => 'check_nrpe_1arg!check_memcached',
|
||||
'nova-api' => 'check_http_api!8774',
|
||||
'cinder-api' => 'check_http_api!8776',
|
||||
'glance-api' => 'check_http_api!9292',
|
||||
'glance-registry' => 'check_nrpe_1arg!check_glance_registry',
|
||||
'horizon' => 'check_http_api!80',
|
||||
'rabbitmq' => 'check_rabbitmq',
|
||||
'mysql' => 'check_galera_mysql',
|
||||
'apt' => 'nrpe_check_apt',
|
||||
'kernel' => 'nrpe_check_kernel',
|
||||
'libs' => 'nrpe_check_libs',
|
||||
'load' => 'nrpe_check_load!5.0!4.0!3.0!10.0!6.0!4.0',
|
||||
'procs' => 'nrpe_check_procs!250!400',
|
||||
'zombie' => 'nrpe_check_procs_zombie!5!10',
|
||||
'swap' => 'nrpe_check_swap!20%!10%',
|
||||
'user' => 'nrpe_check_users!5!10',
|
||||
'host-alive' => 'check-host-alive',
|
||||
}
|
||||
|
||||
Node definitions
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
These are the node definitions generated for a Compact HA deployment. Other
|
||||
deployment configurations generate other definitions. For example, the
|
||||
``openstack/examples/site_openstack_full.pp`` template specifies the following
|
||||
nodes:
|
||||
|
||||
* fuel-controller-01
|
||||
* fuel-controller-02
|
||||
* fuel-controller-03
|
||||
* fuel-compute-[\d+]
|
||||
* fuel-swift-01
|
||||
* fuel-swift-02
|
||||
* fuel-swift-03
|
||||
* fuel-swiftproxy-[\d+]
|
||||
* fuel-quantum
|
||||
|
||||
Using this architecture, the system includes three stand-alone swift-storage
|
||||
servers, and one or more swift-proxy servers.
|
||||
|
||||
With ``site.pp`` prepared, you're ready to perform the actual installation.
|
@ -1,163 +0,0 @@
|
||||
Deploying OpenStack
|
||||
-------------------
|
||||
|
||||
You have two options for deploying OpenStack. The eaiser method is to use
|
||||
orchestration, but you can also deploy your nodes manually.
|
||||
|
||||
.. _orchestration:
|
||||
|
||||
Deploying via orchestration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Manually installing a handful of servers might be managable, but repeatable
|
||||
installations, or those that involve a large number of servers, require
|
||||
automated orchestration. Now you can use orchestration with Fuel through the
|
||||
``astute`` script. This script is configured using the ``astute.yaml``
|
||||
file you created when you ran ``openstack_system``.
|
||||
|
||||
To confirm that your servers are ready for orchestration, execute the command::
|
||||
|
||||
# mco ping
|
||||
|
||||
You should see all three controllers, plus the compute node, answer the call::
|
||||
|
||||
fuel-compute-01 time=107.26 ms
|
||||
fuel-controller-01 time=120.14 ms
|
||||
fuel-controller-02 time=135.94 ms
|
||||
fuel-controller-03 time=139.33 ms
|
||||
|
||||
To run the orchestrator, log in to ``fuel-pm`` and execute::
|
||||
|
||||
# astute -f astute.yaml
|
||||
|
||||
You will see a message on ``fuel-pm`` stating that the installation has started
|
||||
on fuel-controller-01. To see what's going on on the target node, type::
|
||||
|
||||
# tail -f /var/log/messages
|
||||
|
||||
Note that Puppet will require several runs to install all the different roles,
|
||||
so the first time it runs, the orchestrator will show an error, but it just
|
||||
means that the installation isn't complete. Also, after the first run on each
|
||||
server, the orchestrator doesn't output messages on fuel-pm; when it's finished
|
||||
running, it will return you to the command prompt. In the meantime, you can see
|
||||
what's going on by watching the logs on each individual machine.
|
||||
|
||||
Installing OpenStack using Puppet directly
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If for some reason you don't wish to use orchestration (for example, if you
|
||||
are adding a new node to an existing (non-HA) cluster) you have the option to
|
||||
install on an individual node or nodes using Puppet directly.
|
||||
|
||||
Start by logging in to the target server (fuel-controller-01 to start, if you're
|
||||
starting from scratch) and running the Puppet agent.
|
||||
|
||||
One optional step would be to use the script command to log all of your output
|
||||
so you can check for errors if necessary::
|
||||
|
||||
# script agent-01.log
|
||||
# puppet agent --test
|
||||
|
||||
You will see a great number of messages and the
|
||||
installation will take significant amount of time. When the process
|
||||
has completed, press CTRL-D to stop logging and grep for errors::
|
||||
|
||||
# grep err: agent-01.log
|
||||
|
||||
If you find any errors related to other nodes, ignore them for now.
|
||||
|
||||
Now you can run the same installation procedure on fuel-controller-02
|
||||
and fuel-controller-03, as well as on fuel-compute-01.
|
||||
|
||||
Note that controllers must be installed sequentially due to the
|
||||
nature of assembling a MySQL cluster based on Galera, which means that
|
||||
one must complete its installation before the next begins, but that
|
||||
compute nodes can be installed concurrently once the controllers are
|
||||
in place.
|
||||
|
||||
In some cases, you may find errors related to resources that are not
|
||||
yet available when the installation takes place. To solve that
|
||||
problem, simply re-run the puppet agent on the affected node after running the
|
||||
other controllers, and grep again for error messages.
|
||||
|
||||
When you see no errors on any of your nodes, your OpenStack cluster is
|
||||
ready to go.
|
||||
|
||||
Examples of OpenStack installation sequences
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When running Puppet manually, the exact sequence depends on what you're trying
|
||||
to achieve. In most cases, you'll need to run Puppet more than once; with every
|
||||
deployment pass Puppet collects and adds necessary absent information to the
|
||||
OpenStack configuration, stores it in PuppedDB and applies necessary changes.
|
||||
|
||||
**Note:** *Sequentially run* means you don't start the next node deployment
|
||||
until previous one is finished.
|
||||
|
||||
* **Example 1:** **Full OpenStack deployment with standalone storage nodes**
|
||||
|
||||
* Create necessary volumes on storage nodes as described in
|
||||
:ref:`create-the-XFS-partition`.
|
||||
* Sequentially run a deployment pass on every SwiftProxy node
|
||||
(``fuel-swiftproxy-01 ... fuel-swiftproxy-xx``), starting with the
|
||||
``primary-swift-proxy node``. Node names are set by the ``$swift_proxies``
|
||||
variable in ``site.pp``. There are 2 Swift Proxies by default.
|
||||
* Sequentially run a deployment pass on every storage node
|
||||
(``fuel-swift-01`` ... ``fuel-swift-xx``).
|
||||
* Sequentially run a deployment pass on the controller nodes
|
||||
(``fuel-controller-01 ... fuel-controller-xx``) starting with the
|
||||
``primary-controller`` node.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the
|
||||
Quantum router.
|
||||
* Run a deployment pass on every compute node
|
||||
(``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers, these
|
||||
nodes may be deployed in parallel.
|
||||
* Run an additional deployment pass on Controller 1 only
|
||||
(``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
|
||||
* **Example 2:** **Compact OpenStack deployment with storage and swift-proxy
|
||||
combined with nova-controller on the same nodes**
|
||||
|
||||
* Create necessary volumes on controller nodes as described in
|
||||
:ref:`create-the-XFS-partition`
|
||||
* Sequentially run a deployment pass on the controller nodes
|
||||
(``fuel-controller-01 ... fuel-controller-xx``), starting with the
|
||||
``primary-controller node``. Errors in Swift storage such as ::
|
||||
|
||||
/Stage[main]/Swift::Storage::Container/Ring_container_device[<device address>]:
|
||||
Could not evaluate: Device not found check device on <device address>
|
||||
|
||||
are expected during the deployment passes until the very final pass.
|
||||
|
||||
* Run an additional deployment pass on Controller 1 only
|
||||
(``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the
|
||||
Quantum router.
|
||||
* Run a deployment pass on every compute node
|
||||
(``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes
|
||||
may be deployed in parallel.
|
||||
|
||||
* **Example 3:** **OpenStack HA installation without Swift**
|
||||
|
||||
* Sequentially run a deployment pass on the controller nodes
|
||||
(``fuel-controller-01 ... fuel-controller-xx``), starting with the primary
|
||||
controller. No errors should appear during this deployment pass.
|
||||
* Run an additional deployment pass on the primary controller only
|
||||
(``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the
|
||||
Quantum router.
|
||||
* Run a deployment pass on every compute node
|
||||
(``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes
|
||||
may be deployed in parallel.
|
||||
|
||||
* **Example 4:** **The most simple OpenStack installation: Controller + Compute
|
||||
on the same node**
|
||||
|
||||
* Set the ``node /fuel-controller-[\d+]/`` variable in ``site.pp`` to match
|
||||
the hostname of the node on which you are going to deploy OpenStack. Set the
|
||||
``node /fuel-compute-[\d+]/`` variable to **mismatch** the node name. Run a
|
||||
deployment pass on this node. No errors should appear during this deployment pass.
|
||||
* Set the ``node /fuel-compute-[\d+]/`` variable in ``site.pp`` to match the
|
||||
hostname of the node on which you are going to deploy OpenStack. Set the
|
||||
``node /fuel-controller-[\d+]/`` variable to **mismatch** the node name. Run a
|
||||
deployment pass on this node. No errors should appear during this deployment pass.
|
@ -1,73 +0,0 @@
|
||||
Testing OpenStack
|
||||
-----------------
|
||||
|
||||
Now that you've installed OpenStack, its time to take your new
|
||||
OpenStack cloud for a drive. Follow these steps:
|
||||
|
||||
#. On the host machine, open your browser to http://192.168.0.10/
|
||||
(adjust this value to your own ``public_virtual_ip``) and login as nova/nova
|
||||
(unless you changed this information in ``site.pp``)
|
||||
|
||||
#. Click the ``Project`` tab in the left-hand column.
|
||||
|
||||
#. Under ``Manage Compute``, choose ``Access & Security`` to set security settings:
|
||||
|
||||
#. Click ``Create Keypair`` and enter a name for the new keypair. The private
|
||||
key should be downloaded automatically. Make sure to keep it safe.
|
||||
#. Click ``Access & Security`` again and click ``Edit Rules`` for the default
|
||||
Security Group. Add a new rule allowing TCP connections from port 22 to port
|
||||
22 for all IP addresses using a CIDR of 0.0.0.0/0. Click ``Add Rule`` to save
|
||||
the new rule.
|
||||
#. Add a second new rule allowing ICMP connections with a type and code of -1
|
||||
to the default Security Group and click ``Add Rule`` to save.
|
||||
|
||||
#. Click ``Allocate IP To Project`` and add two new floating IPs. Notice that
|
||||
they come from the pool specified in ``config.yaml`` and ``site.pp``.
|
||||
|
||||
#. Click ``Images & Snapshots``, then ``Create Image``. Enter a name and
|
||||
specify the ``Image Location`` as
|
||||
https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
|
||||
with a ``Format`` of QCOW2. Check the ``Public`` checkbox.
|
||||
|
||||
#. The next step is to upload an image to use for creating VMs, but an
|
||||
OpenStack Horizon has a known bug that prevents you from doing this in the
|
||||
browser. Instead, log in to any of the controllers as ``root`` and execute
|
||||
the following commands::
|
||||
|
||||
# cd ~
|
||||
# source openrc
|
||||
# glance image-create \
|
||||
--name cirros
|
||||
--container-format bare \
|
||||
--disk-format qcow2 \
|
||||
--is-public yes \
|
||||
--location https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
|
||||
|
||||
#. Go back to the browser and refresh the page. Launch a new instance of this image
|
||||
using the tiny flavor. Click the ``Networking`` tab and choose the default
|
||||
``net04_ext`` network, then click the ``Launch`` button.
|
||||
|
||||
#. On the ``Instances`` page:
|
||||
|
||||
#. Click the ``New Instance`` and review settings.
|
||||
#. Click the ``Logs`` tab to look at the logs.
|
||||
#. Click the ``VNC`` tab to log in. If you see just a big black rectangle, the
|
||||
machine is in screensaver mode; click the grey area and press the space bar to
|
||||
wake it up, then login as ``cirros/cubswin:)``.
|
||||
#. At the command line, enter ``ifconfig -a | more`` and see the assigned IP
|
||||
address.
|
||||
#. Enter ``sudo fdisk -l`` to see that no volume has yet been assigned to this VM.
|
||||
|
||||
#. On the ``Instances`` page, click ``Assign Floating IP`` and assign an IP
|
||||
address to your instance. You can either choose from one of the existing created
|
||||
IPs by using the pulldown menu or click the plus sign (+) to choose a network
|
||||
and allocate a new IP address.
|
||||
#. From your host machine, ping the floating ip assigned to this VM.
|
||||
#. If that works, try to ``ssh cirros@floating-ip`` from the host machine.
|
||||
|
||||
#. Back in the browser, click ``Volumes`` and ``Create Volume``. Create the new
|
||||
volume, and attach it to the instance.
|
||||
#. Go back to the ``VNC`` tab and repeat ``fdisk -l`` and see the new
|
||||
unpartitioned disk attached.
|
||||
|
||||
From here, your new VM is ready to use.
|
@ -1,18 +0,0 @@
|
||||
---
|
||||
nodes:
|
||||
- status: provisioned
|
||||
role: controller
|
||||
uid: fuel-controller-01
|
||||
- status: provisioned
|
||||
role: controller
|
||||
uid: fuel-controller-02
|
||||
- status: provisioned
|
||||
role: controller
|
||||
uid: fuel-controller-03
|
||||
- status: provisioned
|
||||
role: compute
|
||||
uid: fuel-compute-01
|
||||
attributes:
|
||||
deployment_mode: ha_compute
|
||||
deployment_engine: simplepuppet
|
||||
task_uuid: deployment_task
|
@ -1,307 +0,0 @@
|
||||
common:
|
||||
orchestrator_common:
|
||||
attributes:
|
||||
deployment_mode: multinode_compute
|
||||
task_uuid: deployment_task
|
||||
|
||||
openstack_common:
|
||||
internal_virtual_ip: 10.49.63.127
|
||||
public_virtual_ip: 10.49.54.127
|
||||
create_networks: true
|
||||
fixed_range: 192.168.0.0/16
|
||||
floating_range: 10.49.54.0/24
|
||||
swift_loopback: loopback
|
||||
nv_physical_volumes:
|
||||
- /dev/sdz
|
||||
- /dev/sdy
|
||||
external_ip_info:
|
||||
public_net_router: 10.49.54.1
|
||||
ext_bridge: 10.49.54.15
|
||||
pool_start: 10.49.54.225
|
||||
pool_end: 10.49.54.239
|
||||
segment_range: 900:999
|
||||
tenant_network_type: vlan
|
||||
network_manager: nova.network.manager.FlatDHCPManager
|
||||
auto_assign_floating_ip: true
|
||||
quantum_netnode_on_cnt: true
|
||||
use_syslog: true
|
||||
syslog_server: 10.49.63.12
|
||||
mirror_type: default
|
||||
quantum: true
|
||||
internal_interface: eth0
|
||||
public_interface: eth1
|
||||
private_interface: eth2
|
||||
public_netmask: 255.255.255.0
|
||||
internal_netmask: 255.255.255.0
|
||||
default_gateway: 10.0.1.100
|
||||
nagios_master: fuel-controller-01.localdomain
|
||||
cinder: true
|
||||
cinder_nodes:
|
||||
- controller
|
||||
swift: true
|
||||
repo_proxy: http://10.0.0.100:3128
|
||||
deployment_id: '53'
|
||||
dns_nameservers:
|
||||
- 10.0.0.100
|
||||
- 8.8.8.8
|
||||
nodes:
|
||||
- name: fuel-cobbler
|
||||
role: cobbler
|
||||
internal_address: 10.0.0.100
|
||||
public_address: 10.0.1.100
|
||||
- name: fuel-controller-01
|
||||
role: primary-controller
|
||||
internal_address: 10.0.0.101
|
||||
public_address: 10.0.1.101
|
||||
swift_zone: 1
|
||||
mountpoints: 1 2\n 2 1
|
||||
storage_local_net_ip: 10.0.0.101
|
||||
- name: fuel-controller-02
|
||||
role: controller
|
||||
internal_address: 10.0.0.102
|
||||
public_address: 10.0.1.102
|
||||
swift_zone: 2
|
||||
mountpoints: 1 2\n 2 1
|
||||
storage_local_net_ip: 10.0.0.102
|
||||
- name: fuel-controller-03
|
||||
role: controller
|
||||
internal_address: 10.0.0.104
|
||||
public_address: 10.0.1.104
|
||||
swift_zone: 3
|
||||
mountpoints: 1 2\n 2 1
|
||||
storage_local_net_ip: 10.0.0.104
|
||||
- name: fuel-compute-01
|
||||
role: compute
|
||||
internal_address: 10.0.0.105
|
||||
public_address: 10.0.1.105
|
||||
- name: fuel-compute-02
|
||||
role: compute
|
||||
internal_address: 10.0.0.106
|
||||
public_address: 10.0.1.106
|
||||
- name: fuel-compute-03
|
||||
role: compute
|
||||
internal_address: 10.0.0.107
|
||||
public_address: 10.0.1.107
|
||||
- name: fuel-swift-01
|
||||
role: storage
|
||||
internal_address: 10.0.0.108
|
||||
public_address: 10.0.1.108
|
||||
swift_zone: 4
|
||||
mountpoints: 1 2\n 2 1
|
||||
storage_local_net_ip: 10.0.0.108
|
||||
- name: fuel-swift-02
|
||||
role: storage
|
||||
internal_address: 10.0.0.109
|
||||
public_address: 10.0.1.109
|
||||
swift_zone: 5
|
||||
mountpoints: 1 2\n 2 1
|
||||
storage_local_net_ip: 10.0.0.109
|
||||
- name: fuel-swift-03
|
||||
role: storage
|
||||
internal_address: 10.0.0.110
|
||||
public_address: 10.0.1.110
|
||||
swift_zone: 6
|
||||
mountpoints: 1 2\n 2 1
|
||||
storage_local_net_ip: 10.0.0.110
|
||||
- name: fuel-swiftproxy-01
|
||||
role: primary-swift-proxy
|
||||
internal_address: 10.0.0.111
|
||||
public_address: 10.0.1.111
|
||||
- name: fuel-swiftproxy-02
|
||||
role: swift-proxy
|
||||
internal_address: 10.0.0.112
|
||||
public_address: 10.0.1.112
|
||||
|
||||
cobbler_common:
|
||||
# for Centos
|
||||
profile: "centos64_x86_64"
|
||||
# for Ubuntu
|
||||
# profile: "ubuntu_1204_x86_64"
|
||||
netboot-enabled: "1"
|
||||
# for Ubuntu
|
||||
# ksmeta: "puppet_version=2.7.19-1puppetlabs2 \
|
||||
# for Centos
|
||||
name-servers: "10.0.0.100"
|
||||
name-servers-search: "localdomain"
|
||||
gateway: 10.0.0.100
|
||||
ksmeta: "puppet_version=2.7.19-1.el6 \
|
||||
puppet_auto_setup=1 \
|
||||
puppet_master=fuel-pm.localdomain \
|
||||
puppet_enable=0 \
|
||||
ntp_enable=1 \
|
||||
mco_auto_setup=1 \
|
||||
mco_pskey=un0aez2ei9eiGaequaey4loocohjuch4Ievu3shaeweeg5Uthi \
|
||||
mco_stomphost=10.0.0.100 \
|
||||
mco_stompport=61613 \
|
||||
mco_stompuser=mcollective \
|
||||
mco_stomppassword=AeN5mi5thahz2Aiveexo \
|
||||
mco_enable=1"
|
||||
|
||||
|
||||
fuel-controller-01:
|
||||
hostname: "fuel-controller-01"
|
||||
role: controller
|
||||
interfaces:
|
||||
eth0:
|
||||
mac: "52:54:00:0a:39:ec"
|
||||
static: "1"
|
||||
ip-address: "10.0.0.101"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-controller-01.localdomain"
|
||||
management: "1"
|
||||
eth1:
|
||||
mac: "52:54:00:e6:dc:c9"
|
||||
static: "0"
|
||||
eth2:
|
||||
mac: "52:54:00:ae:22:04"
|
||||
static: "1"
|
||||
interfaces_extra:
|
||||
eth0:
|
||||
peerdns: "no"
|
||||
eth1:
|
||||
peerdns: "no"
|
||||
eth2:
|
||||
promisc: "yes"
|
||||
userctl: "yes"
|
||||
peerdns: "no"
|
||||
|
||||
fuel-controller-02:
|
||||
# If you need create 'cinder-volumes' VG at install OS -- uncomment this line and move it above in middle of ksmeta section.
|
||||
# At this line you need describe list of block devices, that must come in this group.
|
||||
# cinder_bd_for_vg=/dev/sdb,/dev/sdc \
|
||||
hostname: "fuel-controller-02"
|
||||
role: controller
|
||||
interfaces:
|
||||
eth0:
|
||||
mac: "52:54:00:e4:46:5c"
|
||||
static: "1"
|
||||
ip-address: "10.0.0.102"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-controller-02.localdomain"
|
||||
management: "1"
|
||||
eth1:
|
||||
mac: "52:54:00:b4:a5:25"
|
||||
static: "0"
|
||||
eth2:
|
||||
mac: "52:54:00:28:f8:06"
|
||||
static: "1"
|
||||
interfaces_extra:
|
||||
eth0:
|
||||
peerdns: "no"
|
||||
eth1:
|
||||
peerdns: "no"
|
||||
eth2:
|
||||
promisc: "yes"
|
||||
userctl: "yes"
|
||||
peerdns: "no"
|
||||
|
||||
fuel-controller-03:
|
||||
# If you need create 'cinder-volumes' VG at install OS -- uncomment this line and move it above in middle of ksmeta section.
|
||||
# At this line you need describe list of block devices, that must come in this group.
|
||||
# cinder_bd_for_vg=/dev/sdb,/dev/sdc \
|
||||
hostname: "fuel-controller-03"
|
||||
role: controller
|
||||
interfaces:
|
||||
eth0:
|
||||
mac: "52:54:00:09:04:40"
|
||||
static: "1"
|
||||
ip-address: "10.0.0.103"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-controller-03.localdomain"
|
||||
management: "1"
|
||||
eth1:
|
||||
mac: "52:54:00:78:23:b7"
|
||||
static: "0"
|
||||
eth2:
|
||||
mac: "52:54:00:84:60:bf"
|
||||
static: "1"
|
||||
interfaces_extra:
|
||||
eth0:
|
||||
peerdns: "no"
|
||||
eth1:
|
||||
peerdns: "no"
|
||||
eth2:
|
||||
promisc: "yes"
|
||||
userctl: "yes"
|
||||
peerdns: "no"
|
||||
|
||||
fuel-quantum:
|
||||
hostname: "fuel-quantum"
|
||||
role: quantum
|
||||
interfaces:
|
||||
eth0:
|
||||
mac: "52:54:00:68:ff:9b"
|
||||
static: "1"
|
||||
ip-address: "10.0.0.105"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-quantum.localdomain"
|
||||
management: "1"
|
||||
eth1:
|
||||
mac: "52:54:00:27:49:44"
|
||||
static: "0"
|
||||
eth2:
|
||||
mac: "52:54:00:19:0d:56"
|
||||
static: "1"
|
||||
interfaces_extra:
|
||||
eth0:
|
||||
peerdns: "no"
|
||||
eth1:
|
||||
peerdns: "no"
|
||||
eth2:
|
||||
promisc: "yes"
|
||||
userctl: "yes"
|
||||
peerdns: "no"
|
||||
|
||||
fuel-compute-01:
|
||||
hostname: "fuel-compute-01"
|
||||
role: compute
|
||||
interfaces:
|
||||
eth0:
|
||||
mac: "52:54:00:68:ff:9b"
|
||||
static: "1"
|
||||
ip-address: "10.0.0.110"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-compute-01.localdomain"
|
||||
management: "1"
|
||||
eth1:
|
||||
mac: "52:54:00:27:49:44"
|
||||
static: "0"
|
||||
eth2:
|
||||
mac: "52:54:00:19:0d:56"
|
||||
static: "1"
|
||||
interfaces_extra:
|
||||
eth0:
|
||||
peerdns: "no"
|
||||
eth1:
|
||||
peerdns: "no"
|
||||
eth2:
|
||||
promisc: "yes"
|
||||
userctl: "yes"
|
||||
peerdns: "no"
|
||||
|
||||
fuel-compute-02:
|
||||
hostname: "fuel-compute-02"
|
||||
role: compute
|
||||
interfaces:
|
||||
eth0:
|
||||
mac: "52:54:00:68:ff:9b"
|
||||
static: "1"
|
||||
ip-address: "10.0.0.111"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-compute-02.localdomain"
|
||||
management: "1"
|
||||
eth1:
|
||||
mac: "52:54:00:27:49:44"
|
||||
static: "0"
|
||||
eth2:
|
||||
mac: "52:54:00:19:0d:56"
|
||||
static: "1"
|
||||
interfaces_extra:
|
||||
eth0:
|
||||
peerdns: "no"
|
||||
eth1:
|
||||
peerdns: "no"
|
||||
eth2:
|
||||
promisc: "yes"
|
||||
userctl: "yes"
|
||||
peerdns: "no"
|
Before Width: | Height: | Size: 6.3 KiB |
Before Width: | Height: | Size: 7.7 KiB |
Before Width: | Height: | Size: 13 KiB |
@ -1,49 +0,0 @@
|
||||
This document explains how to use Fuel to more easily create and
|
||||
maintain an OpenStack cloud infrastructure.
|
||||
|
||||
Fuel can be used to create virtually any OpenStack configuration, but the
|
||||
installation includes several pre-defined architectures. To simplify
|
||||
matters, the guide emphasises a single common reference architecture,
|
||||
the multi-node, high-availability configuration. It begins by explaining
|
||||
that architecture, then moves on to the details of creating that
|
||||
configuration in a development setting using VirtualBox. Finally, it
|
||||
gives you the information you need to know to create this and other
|
||||
OpenStack architectures in a production environment.
|
||||
|
||||
This document assumes that you are familiar with general Linux
|
||||
commands and administration concepts, as well as general networking
|
||||
concepts. You should have some familiarity with grid or virtualization
|
||||
systems such as Amazon Web Services or VMware, as well as OpenStack
|
||||
itself, but you don't need to be an expert.
|
||||
|
||||
The Fuel User's Guide is organized as follows:
|
||||
|
||||
* Section 1, :ref:`Introduction <Introduction>` (this section), explains what
|
||||
Fuel is and gives you a general idea of how it works.
|
||||
|
||||
* Section 2, :ref:`Reference Architecture <Reference-Architecture>`, provides a
|
||||
general look at the components that make up OpenStack, and describes the
|
||||
reference architecture to be instantiated in Section 3.
|
||||
|
||||
* Section 3, :ref:`Create a multi-node OpenStack cluster using Fuel <Create-Cluster>`,
|
||||
takes you step-by-step through the process of creating a high-availability
|
||||
OpenStack cluster.
|
||||
|
||||
* Section 4, :ref:`Production Considerations <Production>`, looks at the
|
||||
real-world questions and problems involved in creating an OpenStack cluster for
|
||||
production use. It discusses issues such as network layout and hardware
|
||||
requirements, and provides tips and tricks for creating a cluster of up to 100 nodes.
|
||||
|
||||
* Even with a utility as powerful as Fuel, creating an OpenStack cluster can be
|
||||
complex, and Section 5, :ref:`Frequently Asked Questions <FAQ>`, covers many of
|
||||
the issues that tend to arise during that process.
|
||||
|
||||
* Finally, the User's Guide assumes that you are taking advantage of certain
|
||||
shortcuts, such as using a pre-built Puppet master; if you prefer not to go that
|
||||
route, Appendix A, :ref:`Creating the Puppet master <Create-PM>`.
|
||||
|
||||
|
||||
Lets start off by taking a look at Fuel itself. We'll start by
|
||||
explaining what it is and how it works, and then get you set up and ready
|
||||
to start using it.
|
||||
|
@ -1,19 +0,0 @@
|
||||
What is Fuel?
|
||||
-----------------
|
||||
Fuel is a ready-to-install collection of all of the packages and
|
||||
scripts you need to create a robust, configurable, vendor-independant
|
||||
OpenStack cloud in your own environment.
|
||||
|
||||
A single OpenStack cloud consists of packages from many different open
|
||||
source projects, each with its own requirements, installation
|
||||
procedures, and configuration management. Fuel brings all of these
|
||||
projects together into a single open source distribution, with
|
||||
components that have been tested and are guaranteed to work together,
|
||||
all wrapped up using scripts to help you to work through a single
|
||||
installation rather than multiple smaller installations.
|
||||
|
||||
Simply put, Fuel is a way for you to easily configure and install an
|
||||
OpenStack-based infrastructure in your own environment.
|
||||
|
||||
.. image:: /pages/introduction/FuelSimpleDiagramv.gif
|
||||
:width: 800px
|
@ -1,31 +0,0 @@
|
||||
How Fuel Works
|
||||
--------------
|
||||
|
||||
Fuel works on the premise that rather than installing each of the
|
||||
myriad components that make up OpenStack directly, you can instead use
|
||||
a configuration management system such as Puppet to create scripts
|
||||
that can provide a configurable, repeatable, sharable installation
|
||||
process.
|
||||
|
||||
In practice, that means that the process of using Fuel looks like this:
|
||||
|
||||
#. First, use Fuel's automation tools and instructions to set up a master
|
||||
node with Puppet Master and Cobbler. This process only needs to be completed
|
||||
once per installation.
|
||||
|
||||
#. Next, use Fuel's snippets, kickstart files, and preseed files for Cobbler
|
||||
to boot the appropriate servers from bare metal and automatically install the
|
||||
appropriate operating systems. These virtual or physical servers boot up
|
||||
already prepared to call on the Puppet Master to receive their respective
|
||||
OpenStack components.
|
||||
|
||||
#. Finally, to complete the basic OpenStack install, use Fuel's puppet manifests
|
||||
to install OpenStack on the newly created servers. These manifests are
|
||||
completely customizable, enabling you to start with one of the included
|
||||
OpenStack architectures and adapt to your own situation as necessary.
|
||||
|
||||
.. image:: https://docs.google.com/drawings/pub?id=15vTTG2_575M7-kOzwsYyDmQrMgCPT2joLF2Cgiyzv7Q&w=678&h=617
|
||||
|
||||
Fuel comes with several pre-defined deployment configurations, some of which include
|
||||
additional options from which you can choose.
|
||||
|
@ -1,52 +0,0 @@
|
||||
Deployment Configurations Provided By Fuel
|
||||
------------------------------------------
|
||||
|
||||
One of the advantages of Fuel is that it comes with several pre-built
|
||||
deployment configurations that you can use to immediately build your own
|
||||
OpenStack cloud infrastructure. These are well-specified configurations of
|
||||
OpenStack and its constituent components tailored to one or more cloud use cases.
|
||||
Fuel provides the ability to create the following cluster types without requiring
|
||||
extensive customization:
|
||||
|
||||
**Single node**: Perfect for getting a basic feel for how OpenStack works, the
|
||||
Single-node installation is the simplest way to get OpenStack up and running.
|
||||
The Single-node installation provides an easy way to install an entire OpenStack
|
||||
cluster on a single physical or virtual machine.
|
||||
|
||||
**Multi-node (non-HA)**: The Multi-node (non-HA) installation enables you to try
|
||||
out additional OpenStack services such as Cinder, Quantum, and Swift without
|
||||
requiring the degree of increased hardware involved in ensuring high availability.
|
||||
In addition to the ability to independently specify which services to activate,
|
||||
you also have the following options:
|
||||
|
||||
**Compact Swift**: When you choose this option, Swift will be installed on
|
||||
your controllers, reducing your hardware requirements by eliminating the
|
||||
need for additional Swift servers.
|
||||
|
||||
**Standalone Swift**: This option enables you to install independant Swift
|
||||
nodes, so that you can separate their operation from your controller nodes.
|
||||
|
||||
**Multi-node (HA)**: When you're ready to begin your move to production, the
|
||||
Multi-node (HA) configuration is a straightforward way to create an OpenStack
|
||||
cluster that provides high availability. With three controller nodes and the
|
||||
ability to individually specify services such as Cinder, Quantum, and Swift,
|
||||
Fuel provides the following variations of the Multi-node (HA) configuration:
|
||||
|
||||
**Compact Swift**: When you choose this variation, Swift will be installed
|
||||
on your controllers, reducing your hardware requirements by eliminating the
|
||||
need for additional Swift servers while still addressing high availability
|
||||
requirements.
|
||||
|
||||
**Standalone Swift**: This variation enables you to install independant Swift
|
||||
nodes, so that you can separate their operation from your controller nodes.
|
||||
|
||||
**Compact Quantum**: If you don't need the flexibility of a separate Quantum
|
||||
node, Fuel provides the option to combine your Quantum node with one of your
|
||||
controllers.
|
||||
|
||||
In addition to these configurations, Fuel is designed to be completely
|
||||
customizable. Upcoming editions of this guide discuss techniques for
|
||||
creating additional OpenStack deployment configurations.
|
||||
|
||||
To configure Fuel immediately for more extensive variations on these use cases,
|
||||
you can `contact Mirantis for further assistance <http://www.mirantis.com/contact/>`_.
|
@ -1,49 +0,0 @@
|
||||
Supported Software
|
||||
------------------
|
||||
|
||||
Fuel has been tested and is guaranteed to work with the following software
|
||||
components:
|
||||
|
||||
* Operating Systems
|
||||
* CentOS 6.4 (x86_64 architecture only)
|
||||
* RHEL 6.4 (x86_64 architecture only)
|
||||
|
||||
* Puppet (IT automation tool)
|
||||
* 2.7.19
|
||||
|
||||
* MCollective
|
||||
* 2.2.4
|
||||
|
||||
* Cobbler (bare-metal provisioning tool)
|
||||
* 2.2.3
|
||||
|
||||
* OpenStack
|
||||
* Grizzly release 2013.1
|
||||
|
||||
* Hypervisor
|
||||
* KVM
|
||||
|
||||
* Open vSwitch
|
||||
* 1.10.0
|
||||
|
||||
* HA Proxy
|
||||
* 1.4.19
|
||||
|
||||
* Galera
|
||||
* 23.2.2
|
||||
|
||||
* RabbitMQ
|
||||
* 2.8.7
|
||||
|
||||
* Pacemaker
|
||||
* 1.1.8
|
||||
|
||||
* Corosync
|
||||
* 1.4.3
|
||||
|
||||
* Keepalived
|
||||
* 1.2.4
|
||||
|
||||
* Nagios
|
||||
* 3.4.4
|
||||
|
@ -1,17 +0,0 @@
|
||||
Download Fuel
|
||||
-------------
|
||||
|
||||
The first step in installing Fuel is to download the version appropriate for
|
||||
your environment.
|
||||
|
||||
Fuel is available for Essex, Folsom and Grizzly OpenStack installations, and
|
||||
will be available for Havana shortly after Havana's release.
|
||||
|
||||
To make your installation easier, we also offer a pre-built ISO for installing
|
||||
the master node with Puppet Master and Cobbler. You can mount this ISO in a
|
||||
physical or VirtualBox machine in order to easily create your master node.
|
||||
(Instructions for performing this step without the ISO are given in
|
||||
:ref:`Appendix A <Create-PM>`.)
|
||||
|
||||
The master node ISO, along with other Fuel releases, is available in the
|
||||
`Downloads <http://fuel.mirantis.com/your-downloads/>`_ section of the Fuel portal.
|
@ -1,8 +0,0 @@
|
||||
Release Notes
|
||||
-------------
|
||||
|
||||
.. include:: /pages/introduction/release-notes/v3-0-grizzly.rst
|
||||
.. include:: /pages/introduction/release-notes/v2-1-folsom.rst
|
||||
.. include:: /pages/introduction/release-notes/v2-0-folsom.rst
|
||||
.. include:: /pages/introduction/release-notes/v1-0-essex.rst
|
||||
|
Before Width: | Height: | Size: 25 KiB |
@ -1,13 +0,0 @@
|
||||
|
||||
v1.0-essex
|
||||
^^^^^^^^^^
|
||||
|
||||
* Features:
|
||||
|
||||
* Puppet manifests for deploying OpenStack Essex in HA mode
|
||||
* Active/Active HA architecture for Essex, based on RabbitMQ / MySQL Galera / HAProxy / keepalived
|
||||
* Cobbler-based bare-metal provisioning for CentOS 6.3 and RHEL 6.3
|
||||
* Access to the mirror with OpenStack packages
|
||||
* Configuration templates for different OpenStack cluster setups
|
||||
* User Guide
|
||||
|
@ -1,12 +0,0 @@
|
||||
|
||||
v2.0-folsom
|
||||
^^^^^^^^^^^
|
||||
|
||||
* Features:
|
||||
|
||||
* Puppet manifests for deploying OpenStack Folsom in HA mode
|
||||
* Active/Active HA architecture for Folsom, based on RabbitMQ / MySQL Galera / HAProxy / keepalived
|
||||
* Added support for Ubuntu 12.04 in addition to CentOS 6.3 and RHEL 6.3 (includes bare metal provisioning, Puppet manifests, and OpenStack packages)
|
||||
* Supports deploying Folsom with Quantum/OVS
|
||||
* Supports deploying Folsom with Cinder
|
||||
* Supports Puppet 2.7 and 3.0
|
@ -1,24 +0,0 @@
|
||||
|
||||
v2.1-folsom
|
||||
^^^^^^^^^^^
|
||||
|
||||
* Features
|
||||
|
||||
* Support deploying Quantum on controller nodes, as well as on a dedicated networking node
|
||||
* Active/Standby HA for Quantum with Pacemaker when Quantum is deployed on controller nodes
|
||||
* Logging: an option to send OpenStack logs to local and remote locations through syslog
|
||||
* Monitoring: deployment of Nagios, health checks for infrastructure components (OpenStack API, MySQL, RabbitMQ)
|
||||
* Installation of Puppet Master & Cobbler Server node from ISO
|
||||
* Deployment orchestration based on mcollective eliminates the need to run Puppet manually on each node
|
||||
* Recommended master node setup for mid-scale deployments, tested up to 100 nodes
|
||||
|
||||
* Improvements
|
||||
|
||||
* Support for multiple environments from a single Fuel master node
|
||||
* RabbitMQ service moved behind HAProxy to make controller failures transparent to the clients
|
||||
* Updated RabbitMQ to 2.8.7 to improve handling on expired HA queues under Ubuntu
|
||||
* Changed RabbitMQ init script to automatically reassemble RabbitMQ cluster after failures
|
||||
* Configurable HTTP vs. HTTPS for Horizon
|
||||
* Changed mirror type option to either be 'default' (installation from the internet) or 'custom' (installation from a local mirror containing packages)
|
||||
* Option to allow cinder-volume deployment on controller nodes as well as compute nodes
|
||||
|
@ -1,17 +0,0 @@
|
||||
|
||||
v2.2-folsom
|
||||
^^^^^^^^^^^
|
||||
|
||||
* Features
|
||||
|
||||
* NIC Bonding support
|
||||
* New firewall (iptables) module
|
||||
* One-pass swift deployment
|
||||
* User choice on where to store Cinder volumes
|
||||
* Ability to plug in custom services in HA mode under HAProxy
|
||||
* Add controller and compute nodes without downtime
|
||||
* Remove controller and compute nodes without downtime *(caveat with Cinder on controllers)*
|
||||
|
||||
* Improvements
|
||||
|
||||
* CentOS 6.3 package repository moved to Mirantis mirror
|
@ -1,356 +0,0 @@
|
||||
Release Notes for Fuel™ and Fuel™ Web Version 3.0
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
June 6, 2013
|
||||
|
||||
Mirantis, Inc. is releasing version 3.0.1 of the Fuel™ Library and Fuel™ Web products. This is a `cumulative` maintenance release to the previously available version 3.0. It contains the complete distribution of version 3.0 as well as additional enhancements and defect fixes. Customers are strongly recommended to install version 3.0.1.
|
||||
|
||||
These release notes supplement the product documentation and list enhancements, resolved issues and known issues. Issues addressed specifically in version 3.0.1 will be clearly marked.
|
||||
|
||||
* :ref:`what-is-fuel`
|
||||
* :ref:`what-is-fuel-web`
|
||||
* :ref:`new-features`
|
||||
* :ref:`resolved-issues`
|
||||
* :ref:`known-issues`
|
||||
* :ref:`get-products`
|
||||
* :ref:`contact-support`
|
||||
|
||||
|
||||
.. _what-is-fuel:
|
||||
|
||||
|
||||
What is Fuel™?
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Fuel™ is the ultimate OpenStack Do-it-Yourself Kit. Purpose built to assimilate the hard-won experience of our services team, it contains the tooling, information, and support you need to accelerate time to production with OpenStack cloud. Fuel is designed to work with Puppet configuration management software, using Cobbler for bare metal provisioning. Fuel includes all core OpenStack components including Nova, Glance, Horizon, Swift, Keystone, Quantum and Cinder plus Open source packages for components required to support High Availability deployment configurations, including Galera, keepalived, and HA Proxy.
|
||||
|
||||
|
||||
.. _what-is-fuel-web:
|
||||
|
||||
|
||||
What is Fuel™ Web?
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Fuel™ Web is simplified way to deploy OpenStack with Fuel Library of scripts. If you are familiar with tools like Cobbler and Puppet and want maximum flexibility in your deployment, you can use the command-line capabilities of the Fuel Library to install OpenStack. However, if you want a streamlined, graphical console experience, you can install OpenStack using Fuel Web. It uses the same exact underlying scripts from Fuel Library, but offers a more user-friendly experience for deploying and managing OpenStack environments.
|
||||
|
||||
|
||||
.. _new-features:
|
||||
|
||||
|
||||
New Features in Fuel and Fuel Web 3.0.x
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* Support for OpenStack Grizzly
|
||||
* Deployment improvements
|
||||
|
||||
* Fuel
|
||||
|
||||
* Deployment of CentOS 6.4
|
||||
* Deployment of Cinder on standalone nodes
|
||||
* User defined disk space allocation for the base OS, Cinder and VMs
|
||||
* Add new nodes without redeployment
|
||||
* Updated Oracle VirtualBox® deployment scripts
|
||||
|
||||
* Fuel Library Only
|
||||
|
||||
* Swift installation occurs in a single pass instead of multiple passes
|
||||
* Users may now choose where to store Cinder volumes
|
||||
|
||||
* Network configuration enhancements
|
||||
|
||||
* Fuel
|
||||
|
||||
* Partition networks across multiple network interface cards
|
||||
* Mapping of logical networks to physical interfaces
|
||||
* Define multiple IP ranges for public and floating networks
|
||||
* Security improvements
|
||||
|
||||
* Fuel Library Only
|
||||
|
||||
* Support for NIC bonding
|
||||
* Improved firewall module
|
||||
|
||||
**Support for OpenStack Grizzly**
|
||||
|
||||
`OpenStack Grizzly <http://www.openstack.org/software/grizzly/>`_ is the seventh release of the open source software for building public, private, and hybrid clouds. Fuel and Fuel Web now both feature support for deploying clusters using the Grizzly version of OpenStack, including deployment of a new nova-conductor service. Deployments can be done in a variety of configurations including High Availability (HA) mode.
|
||||
|
||||
For a list of known limitations, please refer to the Known Issues section below.
|
||||
|
||||
|
||||
.. _deployment-improvements:
|
||||
|
||||
|
||||
Deployment Improvements
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Deployment of CentOS 6.4**
|
||||
|
||||
CentOS 6.4 is now used as the base operating system for the Fuel master node as well as the deployed slave nodes when deploying via Fuel Web. It is also the Operating System included in the Fuel Library ISO.
|
||||
Red Hat Enterprise Linux continues to be an available choice when deploying through the Fuel Library CLI. Support for Ubuntu® is expected in a near future release.
|
||||
|
||||
**Deployment of Cinder from Fuel Web**
|
||||
|
||||
This release introduces the ability to deploy Cinder on a set of standalone nodes from Fuel Web.
|
||||
|
||||
**User defined disk space allocation**
|
||||
|
||||
Previously, deployments created using Fuel Web used all allocated space on a defined hard drive (virtual or physical). You may now in Fuel Web define the amount of disk space you want to use for each component on a given node. For example, you may wish to define that more space be utilized by Cinder and less space be used for the remaining needs of the base system.
|
||||
|
||||
**Ability to add new nodes without redeployment**
|
||||
|
||||
In previous releases of Fuel Web, to add a node you had to tear down the deployed OpenStack environment and rebuild it with the new configuration. Now, you can choose to add a new compute or Cinder node without having to redeploy the entire environment. The node will be deployed, it will automatically be pointed to RabbitMQ and MySQL and it will start receiving messages from scheduler. Please see the Known Issues section for limitations on this feature.
|
||||
|
||||
**Updated VirtualBox scripts**
|
||||
|
||||
The Fuel Web Virtualbox scripts provided for convenient creation of a small demo or POC cloud have been updated to more closely resemble a production environment. Each virtual machine created by the scripts will have 3 disks and 3 network cards, which can be then configured in Fuel Web.
|
||||
|
||||
**Swift Installation in a single pass**
|
||||
|
||||
During the deployment of Swift from the Fuel Library CLI, users were previously required to run Puppet against the Swift node several times to successfully complete a deployment. This requirement has been removed and you can now deploy Swift nodes in a single operation. This reduces the deployment time for High Availability configurations.
|
||||
|
||||
**User choice of Cinder deployment**
|
||||
|
||||
Previously, Cinder could only be deployed on a compute or controller node when utilizing the Fuel Library CLI. Now, you may choose to deploy Cinder as a standalone node or deployed with a compute or controller node.
|
||||
|
||||
|
||||
.. _net-config-improvements:
|
||||
|
||||
|
||||
Network Configuration Improvements
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Partition networks across multiple network interface cards**
|
||||
|
||||
Fuel Web now recognizes when multiple network interfaces are present on a node and enable usage of each NIC independently during network configuration.
|
||||
|
||||
**Mapping of logical networks to physical interfaces**
|
||||
|
||||
Already available through Fuel, mapping of logical networks allows you to specify that a given virtual network be run only on a chosen physical interface. This ability is now provided as an option within Fuel Web.
|
||||
|
||||
**Define multiple IP ranges for public and floating networks**
|
||||
|
||||
Previously Fuel Web assumed that the gateway is always the first IP in the public network. Users can now define multiple IP ranges for public and floating networks, and specify public gateway IP addresses. It is also possible to specify floating IPs one by one.
|
||||
|
||||
**Security improvements**
|
||||
|
||||
In the OpenStack settings tab user can provide a SSH public key for nodes. In this case, remote access is restricted to use only ssh public key authentication for slave nodes. In addition, the Fuel Web master node root password can be changed with the “passwd” command.
|
||||
|
||||
**NIC bonding**
|
||||
|
||||
NIC bonding is the ability to combine multiple network interfaces together to increase throughput beyond what a single connection could sustain, and to provide redundancy in case one of the links fails. This configuration is now supported by the Fuel Library. This enables, for example, use of switches that utilize the Link Aggregation Control Protocol (LACP). This is available through the Fuel Library CLI but not when using Fuel Web.
|
||||
|
||||
**Improved firewall module**
|
||||
|
||||
Fuel provides a basic firewall module during the deployment of an OpenStack environment. An upgraded module is now included that allows a greater capability to manage and configure IP tables. These configurations are done automatically by Fuel and do not require you to make any additional changes to the Fuel Library scripts to take advantage of this new module.
|
||||
|
||||
|
||||
.. _resolved-issues:
|
||||
|
||||
|
||||
Resolved Issues in Fuel and Fuel Web 3.0
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Ability to remove offline nodes**
|
||||
|
||||
In the previous release if a node was powered off, it was impossible to remove the entire environment or remove an offline node from it. This limitation is now resolved.
|
||||
|
||||
**Networks restricted to 8 bit netmasks**
|
||||
|
||||
Fuel and Fuel Web now work properly with networks utilizing a netmask larger or smaller than 8 bits (i.e. x.x.x.x/24).
|
||||
|
||||
**Duplicate entries in /var/lib/cobbler/cobbler_hosts**
|
||||
|
||||
When deploying nodes, an entry in /var/lib/cobbler/cobbler_hosts is created with a different IP address for each physical interface (regardless of whether cable is connected or not). This causes deployment to fail because RabbitMQ appears to be down on the controller (even though it's not) because the wrong IP is returned from DNS.
|
||||
|
||||
**Log files grow too quickly**
|
||||
|
||||
In the previous release, logging of each API was performed to the same log file as all other messages. Nodes agents sent data to the API every minute or so and these messages were logged also. Because of this, the log became non-readable and increased in size very quickly.
|
||||
|
||||
Fuel Web now separates log files - one for API calls, one for HTTP request/response, and another for static requests. This makes each log file more readable and keeps each log file from growing in size as quickly.
|
||||
|
||||
**Design IP ranges for public/floating nets instead of simple CIDR**
|
||||
|
||||
This issue has been resolved through the implementation of the more flexible IP parameters in Fuel Web.
|
||||
|
||||
**Deployment fails when nodes have drives greater than 2TB**
|
||||
|
||||
Previously, the Cobbler snippet for partitioning the disk did not properly set the disk label to GPT to support partitions greater than 2TB. This has now been corrected.
|
||||
|
||||
|
||||
.. _other-resolved-issues:
|
||||
|
||||
|
||||
Other resolved issues
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* A Cobbler error no longer occurs when deploying a previously removed node.
|
||||
* A better validation of puppet status has addressed a “Use failed_to_restart” error in the last_run_summary of a puppet run
|
||||
* Large RAM sizes (e.g. 1 Tb) are now correctly handled
|
||||
* Removal of nodes is handled much better
|
||||
* Special characters are now correctly handled in OpenStack passwords
|
||||
* Corrected a situation where puppet would not attempt a retry after the error “Could not request certificate: Error 400 on SERVER: Could not find certificate request for [hostname].tld”
|
||||
* Fixed simultaneous operations to ensure that threads in astute are safe
|
||||
* Nodes with multiple NICs can now boot normally via cobbler
|
||||
|
||||
.. _resolved-in-301:
|
||||
|
||||
Resolved issues in Fuel and Fuel Web 3.0.1
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Support for CCISS controllers**
|
||||
|
||||
In some cases, the hard drives on target nodes were not detected during deployment because the nodes utilized a non-standard CCISS hard drive controller. This situation has been resolved. The target nodes can now use CCISS HD controllers and the hard drives will be recognized during deployment.
|
||||
|
||||
**Increased timeout during provisioning**
|
||||
|
||||
On occasion, the deployment would fail due to a timeout while deploying the OS, especially for Cinder and Compute nodes with high capacity hard drives. This is because the process to format the hard drives took longer than the timeout value. This has been corrected by increasing the timeout value.
|
||||
|
||||
**SSL certificate error**
|
||||
|
||||
Sometimes, puppet would produce an error stating “Failed to generate additional resources using 'eval_generate: Error 400 on SERVER”. This issue has been corrected.
|
||||
|
||||
**Recognizing network interfaces that start with em instead of eth**
|
||||
|
||||
When a NIC is embedded in the motherboard, some operating systems will use the prefix of ``em`` (meaning “embedded”) instead of ``eth``. Fuel previously had an issue installing onto systems where the NIC used a prefix of em. This has now been corrected.
|
||||
|
||||
**Installing Fuel Web onto a system with multiple CD drives**
|
||||
|
||||
The installation script for Fuel Web is designed to mount ``/dev/cdrom`` and copy files to the system. When multiple CD drives exist on a system, the ``/dev/cdrom`` symbolic link does not always point to the expected device. The scripts have been corrected to work properly in this scenario.
|
||||
|
||||
**Sufficient disk space for Glance when using defaults**
|
||||
|
||||
Previously in Fuel Web, if the a controller node is deployed with the default disk configuration, only a small amount of space was allocated to the OS volume (28GB on a 2TB drive for instance). This limited the number of images that could be stored in Glance. All available disk space is now allocated by default. This default can be changed by selecting the Disk Configuration button when viewing the details of a node prior to deployment.
|
||||
|
||||
**Logical volume for the base operating system properly allocated**
|
||||
|
||||
In previous releases, Fuel Web improperly allocated only a small percentage of the logical volume for the base operating system when a user requested that the entire volume be used for the base system. Previously, this situation had to be resolved manually. This issue has now been corrected and Fuel Web will properly allocate all of the available disk space for the base system when requested to do so.
|
||||
|
||||
**Creating a Cinder volume from a Glance image**
|
||||
|
||||
Previously, in a simple deployment you couldn’t create a Cinder volume from a Glance image. This was because the ``glance_host`` parameter was not set in ``cinder.conf`` and the default is ``localhost``. The ``glance_host`` parameter is now set to the controller IP.
|
||||
|
||||
**Auto-assigning floating IP addresses**
|
||||
|
||||
Previously in Fuel Web, even when a user enabled auto-assigning of floating IP addresses in the OpenStack settings tab, the feature still was not enabled and user had to manually associate floating IP addresses to instances. Fuel Web now correctly assigns the floating IP addresses to instances when the option is enabled.
|
||||
|
||||
**Floating IP address range**
|
||||
|
||||
In some isolated cases in the previous releases, Fuel would create only one floating IP address instead of a specified range defined by the user. This issue has been resolved and Fuel will now properly create all of the floating IP addresses in the requested range.
|
||||
|
||||
**Adding users to multiple projects in Horizon**
|
||||
|
||||
Previously, when adding a user to multiple projects in Horizon, only the first project was accessible. There was no drop-down for selecting the other assigned projects. This could lead to users, especially the admin user, being assigned to another projects as a member only - thus losing admin access to Horizon. This issue has now been resolved and all of the projects are now visible when adding a user in Horizon.
|
||||
|
||||
**Time synchronization issues no longer lead to error condition**
|
||||
|
||||
From time to time ntpd may fail to synchronize and when this happens, the offset gets progressively larger until it resets itself and starts the cycle again of getting further out of synchronization. This issue could lead to an error condition within mCollective. This issue has been addressed by increasing the Time-to-Live (TTL) value for mCollective and setting the panic threshold for NTP to zero.
|
||||
|
||||
**Deployment on small capacity hard drives using Fuel Web**
|
||||
|
||||
In previous releases, Fuel Web would produce an error when trying to deploy OpenStack components onto nodes with hard drives less than 13GB. Fuel Web now calculates the minimum size base on multiple factors including os size, boot size and swap size (which itself is calculated based on available RAM). However, Mirantis still recommends a minimum hard drive size of 15GB if possible.
|
||||
|
||||
|
||||
|
||||
.. _known-issues:
|
||||
|
||||
|
||||
Known Issues in Fuel and Fuel Web 3.0.x
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Support for OpenStack Grizzly**
|
||||
|
||||
The following improvements in Grizzly are not currently supported directly by Fuel:
|
||||
|
||||
* Nova Compute
|
||||
|
||||
* Cells
|
||||
* Availability zones
|
||||
* Host aggregates
|
||||
|
||||
* Neutron (formerly Quantum)
|
||||
|
||||
* LBaaS (Load Balancer as a Service)
|
||||
* Multiple L3 and DHCP agents per cloud
|
||||
|
||||
* Keystone
|
||||
|
||||
* Multi-factor authentication
|
||||
* PKI authentication
|
||||
|
||||
* Swift
|
||||
|
||||
* Regions
|
||||
* Adjustable replica count
|
||||
* Cross-project ACLs
|
||||
|
||||
* Cinder
|
||||
|
||||
* Support for FCoE
|
||||
* Support for LIO as an iSCSI backend
|
||||
* Support for multiple backends on the same manager
|
||||
|
||||
* Ceilometer
|
||||
* Heat
|
||||
|
||||
It is expected that these capabilities will be supported in a future release of Fuel.
|
||||
|
||||
In addition, support for High Availability of Quantum on CentOS or Red Hat Enterprise Linux (RHEL) is not available due to a imitation within the CentOS kernel. It is expected that this issue will addressed by a patch to CentOS and RHEL in late 2013.
|
||||
|
||||
**Ability to add new nodes without redeployment**
|
||||
|
||||
It’s possible to add new compute and Cinder nodes to an existing OpenStack environment. However, this capability can not be used yet to deploy additional controller nodes in HA mode.
|
||||
|
||||
**Ability to map logical networks to physical interfaces**
|
||||
|
||||
It is not possible to map logical OpenStack networks to physical interfaces without using VLANs. Even if there is just one L3 network, you will still be required to use a VLAN. This limitation only applies to Fuel Web; the Fuel Library does not have any such limitation.
|
||||
|
||||
**Other Limitations:**
|
||||
|
||||
* Swift in High Availability mode must use loopback devices.
|
||||
* In Fuel Web, the size for Swift is hard coded to be 10Gb. If you need to change this, please contact support; they can help modify this value.
|
||||
* When using Fuel Web, IP addresses for slave nodes (but not the master node) are assigned via DHCP during PXE booting from the master node. Because of this, even after installation, the Fuel Web master node must remain available and continue to act as a DHCP server.
|
||||
* When using Fuel Web, the floating VLAN and public networks must use the same L2 network. In the UI, these two networks are locked together, and can only run via the same physical interface on the server.
|
||||
* Fuel Web creates all networks on all servers, even if it they not required by a specific role (e.g. A Cinder node will have VLANs created and addresses obtained from the public network)
|
||||
* Some of OpenStack services listen on all interfaces, which may be detected and reported by security audits or scans. Please discuss this issue with your security administrator if it is of concern in your organization.
|
||||
* The provided scripts that enable Fuel Web to be automatically installed on VirtualBox will create separated host interfaces. If a user associates logical networks to different physical interfaces on different nodes, it will lead to network connectivity issues between OpenStack components. Please check to see if this has happened prior to deployment by clicking on the “Verify Networks” button on the networking tab.
|
||||
* The networks tab was redesigned to allow the user to provide IP ranges instead of CIDRs, however not all user input is properly verified. Entering a wrong wrong value may cause failures in deployment.
|
||||
* Quantum Metadata API agents in High Availability mode are only supported for compact and minimal scenarios if network namespaces (netns) is not used.
|
||||
* The Quantum namespace metadata proxy is not supported unless netns is used.
|
||||
* Quantum multi-node balancing conflicts with pacemaker, so the two should not be used together in the same environment.
|
||||
* In order for Virtual machines to have access to internet and/or external networks you need to set the floating network prefix and public_address so that they do not intersect with the network external interface to which it belongs. This is due to specifics of how Quantum sets Network Address Translation (NAT) rules and a lack of namespaces support in CentOS 6.4.
|
||||
* The ``Total Space`` displayed in the ``Disk Configuration`` screen may be slightly larger than what is actually available. Either choose “use all unallocated space” or enter a number significantly lower than the displayed value when modifying volume groups.
|
||||
|
||||
.. _get-products:
|
||||
|
||||
|
||||
How to obtain the products
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
**Fuel**
|
||||
|
||||
The first step in installing Fuel is to download the version appropriate for your environment.
|
||||
|
||||
To make your installation easier, we also offer a pre-built ISO for installing the master node with Puppet Master and Cobbler. You can mount this ISO in a physical or VirtualBox machine in order to easily create your master node. (Instructions for performing this step without the ISO are given in Appendix A of the documentation.)
|
||||
|
||||
The master node ISO, along with other Fuel releases, is available in the `Downloads <http://fuel.mirantis.com/your-downloads>`_ section of the Fuel portal.
|
||||
|
||||
**Fuel Web**
|
||||
|
||||
Fuel Web is distributed as a self-contained ISO that, once downloaded, does not require Internet access to provision OpenStack nodes. This ISO is available in the `Fuel Web Download <http://fuel.mirantis.com/your-downloads>`_ section of the Fuel Portal. Here you will also find the Oracle VirtualBox scripts to enable quick and easy deployment of a multi-node OpenStack cloud for evaluation purposes.
|
||||
|
||||
|
||||
.. _contact-support:
|
||||
|
||||
|
||||
Contacting Support
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can contact support online, through E-mail or via phone. Instructions on how to use any of these contact options can be found here: https://mirantis.zendesk.com/home.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
To learn more about how Mirantis can help your business, please visit http://www.mirantis.com.
|
||||
|
||||
Mirantis, Fuel, the Mirantis logos and other Mirantis marks are trademarks or registered trademarks of Mirantis, Inc. in the U.S. and/or certain other countries. Red Hat Enterprise Linux is a registered trademark of Red Hat, Inc. Ubuntu is a registered trademark of Canonical Ltd. VirtualBox is a registered trademark of Oracle Corporation. All other registered trademarks or trademarks belong to their respective companies. © 2013 Mirantis, Inc. All rights reserved.
|
@ -1,25 +0,0 @@
|
||||
OpenStack is a very versatile and flexible cloud management platform.
|
||||
By exposing its portfolio of cloud infrastructure services – compute, storage,
|
||||
networking and other core resources — through ReST APIs, it enables a wide range
|
||||
of control over these services, from the perspective of both integrated
|
||||
Infrastructure as a Service (IaaS) controlled by applications and automated
|
||||
manipulation of the infrastructure itself.
|
||||
|
||||
This architectural flexibility doesn’t set itself up magically, however. It asks
|
||||
you, the user and cloud administrator, to organize and manage a large array of
|
||||
configuration options. Consequently, getting the most out of your OpenStack
|
||||
cloud over time – in terms of flexibility, scalability, and manageability –
|
||||
requires a thoughtful combination of automation and configuration choices.
|
||||
|
||||
Mirantis Fuel for OpenStack was created to solve exactly this problem. This
|
||||
step-by-step guide takes you through the process of:
|
||||
|
||||
* Configuring OpenStack and its supporting components into a robust cloud
|
||||
architecture
|
||||
* Deploying that architecture through an effective, well-integrated automation
|
||||
package that sets up and maintains the components and their configurations
|
||||
* Providing access to a well-integrated, up-to-date set of components known to
|
||||
work together
|
||||
|
||||
|
||||
|
@ -1,2 +0,0 @@
|
||||
.. include:: /pages/production-considerations/0010-introduction.rst
|
||||
.. include:: /pages/production-considerations/0015-sizing-hardware.rst
|
@ -1,8 +0,0 @@
|
||||
One of the advantages of using Fuel is that it makes it easy to set up an
|
||||
OpenStack cluster so that you can feel your way around and get your feet wet.
|
||||
You can easily set up a cluster using test, or even virtual machines, but when
|
||||
you're ready to do an actual deployment there are a number of things you need to
|
||||
consider.
|
||||
|
||||
In this section, you'll find information such as how to size the hardware for
|
||||
your cloud and how to handle large-scale deployments.
|
@ -1,219 +0,0 @@
|
||||
Sizing Hardware
|
||||
---------------
|
||||
|
||||
One of the first questions that comes to mind when planning an OpenStack deployment
|
||||
is "what kind of hardware do I need?" Finding the answer is rarely simple, but
|
||||
getting some idea is not impossible.
|
||||
|
||||
Many factors contribute to decisions regarding hardware for an OpenStack cluster
|
||||
-- `contact Mirantis <http://www.mirantis.com/contact/>`_ for information on
|
||||
your specific situation -- but in general, you will want to consider the
|
||||
following four areas:
|
||||
|
||||
* CPU
|
||||
* Memory
|
||||
* Disk
|
||||
* Networking
|
||||
|
||||
Your needs in each of these areas are going to determine your overall hardware
|
||||
requirements.
|
||||
|
||||
CPU
|
||||
^^^
|
||||
|
||||
The basic consideration when it comes to CPU is how many GHz you're going to need.
|
||||
To determine that, think about how many VMs you plan to support, and the average
|
||||
speed you plan to provide, as well as the maximum you plan to provide for a single VM.
|
||||
For example, consider a situation in which you expect:
|
||||
|
||||
* 100 VMs
|
||||
* 2 EC2 compute units (2 GHz) average
|
||||
* 16 EC2 compute units (16 GHz) max
|
||||
|
||||
What does this mean? Well, to make it possible to provide the maximum CPU, you
|
||||
will need at least 5 cores (16 GHz/(2.4 GHz per core * 1.3 for hyperthreading))
|
||||
per machine, and at least 84 cores ((100 VMs * 2 GHz per VM)/2.4 GHz per core) in total.
|
||||
|
||||
If you were to choose the Intel E5 2650-70 8 core CPU, that means you need 10-11
|
||||
sockets (84 cores / 8 cores per socket).
|
||||
|
||||
All of this means you will need 5-6 dual core servers (11 sockets / 2 sockets
|
||||
per server), for a "packing density" of 17 VMs per server (100 VMs / 6 servers).
|
||||
|
||||
You will need to take into account a couple of additional notes:
|
||||
|
||||
* This model assumes you are not oversubscribing your CPU.
|
||||
* If you are considering Hyperthreading, count each core as 1.3, not 2.
|
||||
* Choose a good value CPU.
|
||||
|
||||
Memory
|
||||
^^^^^^
|
||||
|
||||
The process of determining memory requirements is similar to determining CPU.
|
||||
Start by deciding how much memory will be devoted to each VM. In this example,
|
||||
with 4 GB per VM and a maximum of 32 GB for a single VM, you will need 400 GB of RAM.
|
||||
|
||||
For cost reasons, you will want to use 8 GB or smaller DIMMs, so considering
|
||||
16 - 24 slots per server (or 128 GB at the low end) you will need 4 servers to
|
||||
meet your needs.
|
||||
|
||||
However, remember that you need 6 servers to meet your CPU requirements, so
|
||||
instead you can go with 6 64 GB or 96 GB machines.
|
||||
|
||||
Again, you do not want to oversubscribe memory.
|
||||
|
||||
Disk Space
|
||||
^^^^^^^^^^
|
||||
|
||||
When it comes to disk space there are several types that you need to consider:
|
||||
|
||||
* Ephemeral (the local drive space for a VM)
|
||||
* Persistent (the remote volumes that can be attached to a VM)
|
||||
* Object Storage (such as images or other objects)
|
||||
|
||||
As far as local drive space that must reside on the compute nodes, in our example
|
||||
of 100 VMs, our assumptions are:
|
||||
|
||||
* 50 GB local space per VM
|
||||
* 5 TB total of local space (100 VMs * 50 GB per VM)
|
||||
* 500 GB of persistent volume space per VM
|
||||
* 50 TB total persistent storage
|
||||
|
||||
Again you have 6 servers, so that means you're looking at .9TB per server
|
||||
(5 TB / 6 servers) for local drive space.
|
||||
|
||||
Throughput
|
||||
~~~~~~~~~~
|
||||
|
||||
As far as throughput, that's going to depend on what kind of storage you choose.
|
||||
In general, you calculate IOPS based on the packing density (drive IOPS * drives
|
||||
in the server / VMs per server), but the actual drive IOPS will depend on what
|
||||
you choose. For example:
|
||||
|
||||
* 3.5" slow and cheap (100 IOPS per drive, with 2 mirrored drives)
|
||||
|
||||
* 100 IOPS * 2 drives / 17 VMs per server = 12 Read IOPS, 6 Write IOPS
|
||||
|
||||
* 2.5" 15K (200 IOPS, 4 600 GB drive, RAID 10)
|
||||
|
||||
* 200 IOPS * 4 drives / 17 VMs per server = 48 Read IOPS, 24 Write IOPS
|
||||
|
||||
* SSD (40K IOPS, 8 300 GB drive, RAID 10)
|
||||
|
||||
* 40K * 8 drives / 17 VMs per server = 19K Read IOPS, 9.5K Write IOPS
|
||||
|
||||
Clearly, SSD gives you the best performance, but the difference in cost between
|
||||
that and the lower end solution is going to be signficant, to say the least.
|
||||
You'll need to decide based on your own situation.
|
||||
|
||||
Remote storage
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
IOPS will also be a factor in determining how you decide to handle persistent
|
||||
storage. For example, consider these options for laying out your 50 TB of
|
||||
remote volume space:
|
||||
|
||||
* 12 drive storage frame using 3 TB 3.5" drives mirrored
|
||||
|
||||
* 36 TB raw, or 18 TB usable space per 2U frame
|
||||
* 3 frames (50 TB / 18 TB per server)
|
||||
* 12 slots x 100 IOPS per drive = 1200 Read IOPS, 600 Write IOPS per frame
|
||||
* 3 frames x 1200 IOPS per frame / 100 VMs = 36 Read IOPS, 18 Write IOPS per VM
|
||||
|
||||
* 24 drive storage frame using 1TB 7200 RPM 2.5" drives
|
||||
|
||||
* 24 TB raw, or 12 TB usable space per 2U frame
|
||||
* 5 frames (50 TB / 12 TB per server)
|
||||
* 24 slots x 100 IOPS per drive = 2400 Read IOPS, 1200 Write IOPS per frame
|
||||
* 5 frames x 2400 IOPS per frame / 100 VMs = 120 Read IOPS, 60 Write IOPS per frame
|
||||
|
||||
You can accomplish the same thing with a single 36 drive frame using 3 TB drives,
|
||||
but this becomes a single point of failure in your cluster.
|
||||
|
||||
Object storage
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
When it comes to object storage, you will find that you need more space than you
|
||||
think. For example, this example specifies 50 TB of object storage. Easy right?
|
||||
|
||||
Well, no. Object storage uses a default of 3 times the required space for
|
||||
replication, which means you will need 150 TB. However, to accommodate two
|
||||
hands-off zones, you will need 5 times the required space, which means 250 TB.
|
||||
|
||||
But the calculations don't end there. You don't ever want to run out of space,
|
||||
so "full" should really be more like 75% of capacity, which means 333 TB, or a
|
||||
multiplication factor of 6.66.
|
||||
|
||||
Of course, that might be a bit much to start with; you might want to start with
|
||||
a happy medium of a multiplier of 4, then acquire more hardware as your drives
|
||||
begin to fill up. That means 200 TB in this example.
|
||||
|
||||
So how do you put that together? If you were to use 3 TB 3.5" drives, you could
|
||||
use a 12 drive storage frame, with 6 servers hosting 36 TB each (for a total of 216 TB).
|
||||
|
||||
You could also use a 36 drive storage frame, with just 2 servers hosting 108 TB
|
||||
each, but it's not recommended due to several factors, from the high cost of
|
||||
failure to replication and capacity issues.
|
||||
|
||||
Networking
|
||||
^^^^^^^^^^
|
||||
|
||||
Perhaps the most complex part of designing an OpenStack cluster is the networking.
|
||||
An OpenStack cluster can involve multiple networks even beyond the Public,
|
||||
Private, and Internal networks. Your cluster may involve tenant networks, storage
|
||||
networks, multiple tenant private networks, and so on. Many of these will be
|
||||
VLANs, and all of them will need to be planned out.
|
||||
|
||||
In terms of the example network, consider these assumptions:
|
||||
|
||||
* 100 Mbits/second per VM
|
||||
* HA architecture
|
||||
* Network Storage is not latency sensitive
|
||||
|
||||
In order to achieve this, you can use 2 1Gb links per server
|
||||
(2 x 1000 Mbits/second / 17 VMs = 118 Mbits/second). Using 2 links also helps with HA.
|
||||
|
||||
You can also increase throughput and decrease latency by using 2 x 10 Gb links,
|
||||
bringing the bandwidth per VM to 1 Gb/second, but if you're going to do that,
|
||||
you've got one more factor to consider.
|
||||
|
||||
Scalability and oversubscription
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
It is one of the ironies of networking that 1Gb Ethernet generally scales better
|
||||
than 10Gb Ethernet -- at least until 100Gb switches are more commonly available.
|
||||
It's possible to aggregate the 1Gb links in a 48 port switch, so that you have
|
||||
48 x 1Gb links down, but 4 x 10GB links up. Do the same thing with a 10Gb switch,
|
||||
however, and you have 48 x 10Gb links down and 4 x 100Gb links up, resulting in
|
||||
oversubscription.
|
||||
|
||||
Like many other issues in OpenStack, you can avoid this problem to a great extent
|
||||
with careful planning. Problems only arise when you are moving between racks,
|
||||
so plan to create "pods", each of which includes both storage and compute nodes.
|
||||
Generally, a pod is the size of a non-oversubscribed L2 domain.
|
||||
|
||||
Hardware for this example
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
In this example, you are looking at:
|
||||
|
||||
* 2 data switches (for HA), each with a minimum of 12 ports for data (2 x 1Gb
|
||||
links per server x 6 servers)
|
||||
* 1 x 1Gb switch for IPMI (1 port per server x 6 servers)
|
||||
* Optional Cluster Management switch, plus a second for HA
|
||||
|
||||
Because your network will in all likelihood grow, it's best to choose 48 port
|
||||
switches. Also, as your network grows, you will need to consider uplinks and
|
||||
aggregation switches.
|
||||
|
||||
Summary
|
||||
^^^^^^^
|
||||
|
||||
In general, your best bet is to choose a large multi-socket server, such as a 2
|
||||
socket server with a balance in I/o, CPU, Memory, and Disk. Look for a 1U low
|
||||
cost R-class or 2U high density C-class server. Some good alternatives for
|
||||
compute nodes include:
|
||||
|
||||
* Dell PowerEdge R620
|
||||
* Dell PowerEdge C6220 Rack Server
|
||||
* Dell PowerEdge R720XD (for high disk or IOPS requirements)
|
@ -1,98 +0,0 @@
|
||||
Redeploying an environment
|
||||
--------------------------
|
||||
|
||||
Because Puppet is additive only, there is no ability to revert changes as you
|
||||
would in a typical application deployment. If a change needs to be backed out,
|
||||
you must explicitly add a configuration to reverse it, check this configuration
|
||||
in, and promote it to production using the pipeline. This means that if a
|
||||
breaking change did get deployed into production, typically a manual fix was
|
||||
applied, with the proper fix subsequently checked into version control.
|
||||
|
||||
Fuel combines the ability to isolate code changes while developing with
|
||||
minimizing the headaches associated with maintaining multiple environments
|
||||
serviced by one puppet server by creating environments.
|
||||
|
||||
Environments
|
||||
^^^^^^^^^^^^
|
||||
|
||||
Puppet supports putting nodes into separate 'environments'. These environments
|
||||
can map cleanly to your development, QA and production life cycles, so it’s a
|
||||
way to hand out different code to different nodes.
|
||||
|
||||
* On the Master/Server Node
|
||||
|
||||
The Puppet Master tries to find modules using its ``modulepath`` setting,
|
||||
which is typically something like ``/etc/puppet/modules``. You usually just
|
||||
set this value once in your ``/etc/puppet/puppet.conf``. Environments expand
|
||||
on this idea and give you the ability to use different settings for different
|
||||
environments.
|
||||
|
||||
For example, you can specify several search paths. The following example
|
||||
dynamically sets the ``modulepath`` so Puppet will check a per-environment
|
||||
folder for a module before serving it from the main set::
|
||||
|
||||
[master]
|
||||
modulepath = $confdir/$environment/modules:$confdir/modules
|
||||
|
||||
[production]
|
||||
manifest = $confdir/manifests/site.pp
|
||||
|
||||
[development]
|
||||
manifest = $confdir/$environment/manifests/site.pp
|
||||
|
||||
* On the Agent Node
|
||||
|
||||
Once the agent node makes a request, the Puppet Master gets informed of its
|
||||
environment. If you don’t specify an environment, the agent uses the default
|
||||
``production`` environment.
|
||||
|
||||
To set an environment agent-side, just specify the environment setting in the
|
||||
``[agent]`` block of ``puppet.conf``::
|
||||
|
||||
[agent]
|
||||
environment = development
|
||||
|
||||
|
||||
Deployment pipeline
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* Deploy
|
||||
|
||||
In order to deploy multiple environments that don't interfere with each other,
|
||||
you should specify the ``$deployment_id`` option in
|
||||
``/etc/puppet/manifests/site.pp``. It should be an even integer value in the
|
||||
range of 2-254.
|
||||
|
||||
This value is used in dynamic environment-based tag generation. Fuel also apply
|
||||
that tag globally to all resources on each node. It is also used for the
|
||||
keepalived daemon, which evaluates a unique ``virtual_router_id``.
|
||||
|
||||
* Clean/Revert
|
||||
|
||||
At this stage you just need to make sure the environment has the
|
||||
original/virgin state.
|
||||
|
||||
* Puppet node deactivate
|
||||
|
||||
This will ensure that any resources exported by that node will stop appearing
|
||||
in the catalogs served to the agent nodes::
|
||||
|
||||
puppet node deactivate <node>
|
||||
|
||||
where ``<node>`` is the fully qualified domain name as seen in
|
||||
``puppet cert list --all``.
|
||||
|
||||
You can deactivate nodes manually one by one, or execute the following command
|
||||
to automatically deactivate all nodes::
|
||||
|
||||
cert list --all | awk '! /DNS:puppet/ { gsub(/"/, "", $2); print $2}' | xargs puppet node deactivate
|
||||
|
||||
* Redeploy
|
||||
|
||||
Fire up the puppet agent again to apply a desired node configuration.
|
||||
|
||||
Links
|
||||
^^^^^
|
||||
|
||||
* http://puppetlabs.com/blog/a-deployment-pipeline-for-infrastructure/
|
||||
* http://docs.puppetlabs.com/guides/environment.html
|
@ -1,65 +0,0 @@
|
||||
Large Scale Deployments
|
||||
-----------------------
|
||||
|
||||
When deploying large clusters (those of 100 nodes or more) there are two basic
|
||||
bottlenecks:
|
||||
|
||||
* Certificate signing requests and Puppet Master/Cobbler capacity
|
||||
* Downloading of Operating System and other software packages
|
||||
|
||||
All of these bottlenecks can be mitigated with careful planning.
|
||||
|
||||
If you are deploying Fuel from the ISO, Fuel takes care of these problems by
|
||||
careful use of caching and orchestration, but it's good to have a sense of how
|
||||
to solve these problems.
|
||||
|
||||
Certificate signing requests and Puppet Master/Cobbler capacity
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When deploying a large cluster, you may find that Puppet Master begins to have
|
||||
difficulty when you start exceeding 20 or more simultaneous requests. Part of
|
||||
this problem is because the initial process of requesting and signing certificates
|
||||
involves \*.tmp files that can create conflicts. To solve this problem, you
|
||||
have two options: reduce the number of simultaneous requests, or increase the
|
||||
number of Puppet Master/Cobbler servers.
|
||||
|
||||
Reducing the number of simultaneous requests is a simple matter of staggering
|
||||
Puppet agent runs. Orchestration can provide a convenient way to accomplish this
|
||||
goal. You don't need extreme staggering (1 to 5 seconds will do) but if this
|
||||
method isn't practical, you can increase the number of Puppet Master/Cobbler servers.
|
||||
|
||||
If you're simply overwhelming the Puppet Master process and not running into file
|
||||
conflicts, one way to get around this problem is to use Puppet Master with Thin
|
||||
as a backend and nginx as a front end. This configuration will enable you to
|
||||
dynamically scale the number of Puppet Master processes up and down to accommodate load.
|
||||
|
||||
.. You can find sample configuration files for nginx and puppetmasterd at [CONTENT NEEDED HERE].
|
||||
|
||||
You can also increase the number of servers by creating a cluster of servers
|
||||
behind a round robin DNS managed by a service such as HAProxy. You will also
|
||||
need to ensure that these nodes are kept in sync. For Cobbler, that means a
|
||||
combination of the ``--replicate`` switch, XMLRPC for metadata, rsync for
|
||||
profiles and distributions. Similarly, Puppet Master and PuppetDB can be kept in
|
||||
sync with a combination of rsync (for modules, manifests, and SSL data) and
|
||||
database replication.
|
||||
|
||||
.. image:: /pages/production-considerations/cobbler-puppet-ha.png
|
||||
|
||||
Downloading of Operating System and other software packages
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Large deployments also suffer from a bottleneck in terms of downloading of
|
||||
software. One way to avoid this problem is the use of multiple 1G interfaces
|
||||
bonded together. You might also want to consider 10G Ethernet, if the rest of
|
||||
your architecture warrants it. (See "Sizing Hardware" for more information on
|
||||
choosing networking equipment)
|
||||
|
||||
Another option is to prevent the need to download so much data in the first
|
||||
place using either apt-cacher, which acts as a repository cache, or a private repository.
|
||||
|
||||
To use apt-cacher, the kickstarts provided by Cobbler to each node should specify
|
||||
Cobbler's IP address and the apt-cacher port as the proxy server. This will
|
||||
prevent all of the nodes from having to download the software individually.
|
||||
|
||||
`Contact Mirantis <http://www.mirantis.com/contact/>`_ for information on creating
|
||||
a private repository.
|
Before Width: | Height: | Size: 83 KiB |
@ -1,121 +0,0 @@
|
||||
Overview
|
||||
--------
|
||||
|
||||
|
||||
Before you install any hardware or software, you must know what it is
|
||||
you're trying to achieve. This section looks at the basic components of
|
||||
an OpenStack infrastructure and organizes them into one of the more
|
||||
common reference architectures. You'll then use that architecture as a
|
||||
basis for installing OpenStack in the next section.
|
||||
|
||||
As you know, OpenStack provides the following basic services:
|
||||
|
||||
* **Compute**: Compute servers are the workhorses of your installation;
|
||||
they're the servers on which your users' virtual machines are created.
|
||||
`Nova-scheduler` controls the life-cycle of these VMs.
|
||||
|
||||
* **Networking**: Because an OpenStack cluster (virtually) always includes
|
||||
multiple servers, the ability for them to communicate with each other and
|
||||
with the outside world is crucial. Networking was originally handled by the
|
||||
`Nova-network` service, but it is slowly giving way to the newer `Quantum`
|
||||
networking service. Authentication and authorization for these transactions
|
||||
are handled by `Keystone`.
|
||||
|
||||
* **Storage**: OpenStack provides for two different types of storage: block
|
||||
storage and object storage. Block storage is traditional data storage, with
|
||||
small, fixed-size blocks that are mapped to locations on storage media. At
|
||||
its simplest level, OpenStack provides block storage using `nova-volume`, but
|
||||
it is common to use `Cinder`.
|
||||
|
||||
Object storage, on the other hand, consists of single variable-size objects
|
||||
that are described by system-level metadata, and you can access this
|
||||
capability using `Swift`.
|
||||
|
||||
OpenStack storage is used for your users' objects, but it is also used for
|
||||
storing the images used to create new VMs. This capability is handled by `Glance`.
|
||||
|
||||
These services can be combined in many different ways. Out of the box,
|
||||
Fuel supports the following deployment configurations:
|
||||
|
||||
Single node deployment
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In a production environment, you will never have a single-node
|
||||
deployment of OpenStack, partly because it forces you to make a number
|
||||
of compromises as to the number and types of services that you can
|
||||
deploy. It is, however, extremely useful if you just want to see how
|
||||
OpenStack works from a user's point of view. In this case, all of the
|
||||
essential services run out of a single server:
|
||||
|
||||
|
||||
|
||||
.. image:: https://docs.google.com/drawings/d/1gGNYYayPAPPHgOYi98Dmebry4hP1SOGF2APXWzbnNo8/pub?w=767&h=413
|
||||
|
||||
|
||||
|
||||
|
||||
Multi-node (non-HA) deployment (compact Swift)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
More commonly, your OpenStack installation will consist of multiple
|
||||
servers. Exactly how many is up to you, of course, but the main idea
|
||||
is that your controller(s) are separate from your compute servers, on
|
||||
which your users' VMs will actually run. One arrangement that will
|
||||
enable you to achieve this separation while still keeping your
|
||||
hardware investment relatively modest is to house your storage on your
|
||||
controller nodes.
|
||||
|
||||
|
||||
Multi-node (non-HA) deployment (standalone Swift)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
A more common arrangement is to provide separate servers for storage.
|
||||
This has the advantage of reducing the number of controllers you must
|
||||
provide; because Swift runs on its own servers, you can reduce the
|
||||
number of controllers from three (or five, for a full Swift implementation) to
|
||||
one, if desired:
|
||||
|
||||
.. image:: https://docs.google.com/drawings/d/1nVEtfpNLaLV4EBKJQleLxovqMVrDCRT7yFWTYUQASB0/pub?w=767&h=413
|
||||
|
||||
Multi-node (HA) deployment (Compact)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Production environments typically require high availability, which
|
||||
involves several architectural requirements. Specifically, you will
|
||||
need at least three controllers, and
|
||||
certain components will be deployed in multiple locations to prevent
|
||||
single points of failure. That's not to say, however, that you can't
|
||||
reduce hardware requirements by combining your storage, network, and controller
|
||||
nodes:
|
||||
|
||||
.. image:: https://docs.google.com/drawings/d/1xLv4zog19j0MThVGV9gSYa4wh1Ma4MQYsBz-4vE1xvg/pub?w=767&h=413
|
||||
|
||||
Multi-node (HA) deployment (Compact Quantum)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Another way you can add functionality to your cluster without
|
||||
increasing hardware requirements is to install Quantum on your
|
||||
controller nodes. This architecture still provides high availability,
|
||||
but avoids the need for a separate Quantum node:
|
||||
|
||||
.. image:: https://docs.google.com/drawings/d/1GYNM5yTJSlZe9nB5SHnlrqyMfVRdVh02OFLwXlz-itc/pub?w=767&h=413
|
||||
|
||||
Multi-node (HA) deployment (Standalone)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For larger production deployments, its more common to provide
|
||||
dedicated hardware for storage and networking. This architecture still
|
||||
gives you the advantages of high availability, but this clean
|
||||
separation makes your cluster more maintainable by separating storage,
|
||||
networking, and controller functionality:
|
||||
|
||||
.. image:: https://docs.google.com/drawings/d/1rJEZi5-l9oemMmrkH5UPjitQQDVGuZQ1KS0pPWTuovY/pub?w=769&h=594
|
||||
|
||||
Where Fuel really shines is in the creation of more complex
|
||||
architectures, so in this document you'll learn how to use Fuel to
|
||||
easily create a multi-node HA OpenStack cluster. To reduce the amount
|
||||
of hardware you'll need to follow the installation in section 3,
|
||||
however, the guide focuses on the Multi-node HA Compact
|
||||
architecture.
|
||||
|
||||
Lets take a closer look at the details of this deployment configuration.
|
@ -1,25 +0,0 @@
|
||||
A closer look at the Multi-node (HA) Compact deployment
|
||||
-------------------------------------------------------
|
||||
|
||||
In this section, you'll learn more about the Multi-node (HA) Compact
|
||||
deployment configuration and how it achieves high availability in preparation
|
||||
for installing this cluster in section 3. As you may recall, this
|
||||
configuration looks something like this:
|
||||
|
||||
.. image:: https://docs.google.com/drawings/d/1xLv4zog19j0MThVGV9gSYa4wh1Ma4MQYsBz-4vE1xvg/pub?w=767&h=413
|
||||
|
||||
OpenStack services are interconnected by RESTful HTTP-based APIs and
|
||||
AMQP-based RPC messages. So redundancy for stateless OpenStack API
|
||||
services is implemented through the combination of Virtual IP (VIP)
|
||||
management using keepalived and load balancing using HAProxy. Stateful
|
||||
OpenStack components, such as the state database and messaging server,
|
||||
rely on their respective active/active modes for high availability.
|
||||
For example, RabbitMQ uses built-in clustering capabilities, while the
|
||||
database uses MySQL/Galera replication.
|
||||
|
||||
.. image:: https://docs.google.com/drawings/pub?id=1PzRBUaZEPMG25488mlb42fRdlFS3BygPwbAGBHudnTM&w=750&h=491
|
||||
|
||||
Lets take a closer look at what an OpenStack deployment looks like, and
|
||||
what it will take to achieve high availability for an OpenStack
|
||||
deployment.
|
||||
|
@ -1,80 +0,0 @@
|
||||
|
||||
Logical Setup
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
An OpenStack HA cluster involves, at a minimum, three types of nodes:
|
||||
controller nodes, compute nodes, and storage nodes.
|
||||
|
||||
Controller Nodes
|
||||
++++++++++++++++
|
||||
|
||||
|
||||
The first order of business in achieving high availability (HA) is
|
||||
redundancy, so the first step is to provide multiple controller nodes.
|
||||
You must keep in mind, however, that the database uses Galera to
|
||||
achieve HA, and Galera is a quorum-based system. That means that you must provide
|
||||
at least 3 controller nodes.
|
||||
|
||||
.. image:: https://docs.google.com/drawings/pub?id=1aftE8Yes7CdVSZgZD1A82T_2GqL2SMImtRYU914IMyQ&w=869&h=855
|
||||
|
||||
|
||||
|
||||
Every OpenStack controller runs keepalived, which manages a single
|
||||
Virtual IP (VIP) for all controller nodes, and HAProxy, which manages
|
||||
HTTP and TCP load balancing of requests going to OpenStack API
|
||||
services, RabbitMQ, and MySQL.
|
||||
|
||||
|
||||
|
||||
When an end user accesses the OpenStack cloud using Horizon or makes a
|
||||
request to the REST API for services such as nova-api, glance-api,
|
||||
keystone-api, quantum-api, nova-scheduler, MySQL or RabbitMQ, the
|
||||
request goes to the live controller node currently holding the VIP,
|
||||
and the connection gets terminated by HAProxy. When the next request
|
||||
comes in, HAProxy handles it, and may send it to the original
|
||||
controller or another in the cluster, depending on load conditions.
|
||||
|
||||
|
||||
|
||||
Each of the services housed on the controller nodes has its own
|
||||
mechanism for achieving HA:
|
||||
|
||||
|
||||
* nova-api, glance-api, keystone-api, quantum-api and nova-scheduler are
|
||||
stateless services that do not require any special attention besides load balancing.
|
||||
* Horizon, as a typical web application, requires sticky sessions to be enabled
|
||||
at the load balancer.
|
||||
* RabbitMQ provides active/active high availability using mirrored queues.
|
||||
* MySQL high availability is achieved through Galera active/active multi-master
|
||||
deployment.
|
||||
|
||||
|
||||
Compute Nodes
|
||||
+++++++++++++
|
||||
|
||||
OpenStack compute nodes are, in many ways, the foundation of your
|
||||
cluster; they are the servers on which your users will create their
|
||||
Virtual Machines (VMs) and host their applications. Compute nodes need
|
||||
to talk to controller nodes and reach out to essential services such
|
||||
as RabbitMQ and MySQL. They use the same approach that provides
|
||||
redundancy to the end-users of Horizon and REST APIs, reaching out to
|
||||
controller nodes using the VIP and going through HAProxy.
|
||||
|
||||
|
||||
.. image:: https://docs.google.com/drawings/pub?id=16gsjc81Ptb5SL090XYAN8Kunrxfg6lScNCo3aReqdJI&w=873&h=801
|
||||
|
||||
|
||||
Storage Nodes
|
||||
+++++++++++++
|
||||
|
||||
|
||||
In this OpenStack cluster reference architecture, shared storage acts
|
||||
as a backend for Glance, so that multiple Glance instances running on
|
||||
controller nodes can store images and retrieve images from it. To
|
||||
achieve this, you are going to deploy Swift. This enables you to use
|
||||
it not only for storing VM images, but also for any other objects such
|
||||
as user files.
|
||||
|
||||
|
||||
.. image:: https://docs.google.com/drawings/pub?id=1Xd70yy7h5Jq2oBJ12fjnPWP8eNsWilC-ES1ZVTFo0m8&w=777&h=778
|
||||
|
@ -1,51 +0,0 @@
|
||||
|
||||
Cluster Sizing
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
This reference architecture is well suited for production-grade
|
||||
OpenStack deployments on a medium and large scale when you can afford
|
||||
allocating several servers for your OpenStack controller nodes in
|
||||
order to build a fully redundant and highly available environment.
|
||||
|
||||
|
||||
|
||||
The absolute minimum requirement for a highly-available OpenStack
|
||||
deployment is to allocate 4 nodes:
|
||||
|
||||
|
||||
* 3 controller nodes, combined with storage
|
||||
* 1 compute node
|
||||
|
||||
|
||||
.. image:: https://docs.google.com/drawings/pub?id=19Dk1qD5V50-N0KX4kdG_0EhGUBP7D_kLi2dU6caL9AM&w=767&h=413
|
||||
|
||||
|
||||
If you want to run storage separately from the controllers, you can do that as
|
||||
well by raising the bar to 7 nodes:
|
||||
|
||||
* 3 controller nodes
|
||||
* 3 storage nodes
|
||||
* 1 compute node
|
||||
|
||||
|
||||
.. image:: https://docs.google.com/drawings/pub?id=1xmGUrk2U-YWmtoS77xqG0tzO3A47p6cI3mMbzLKG8tY&w=769&h=594
|
||||
|
||||
|
||||
Of course, you are free to choose how to deploy OpenStack based on the
|
||||
amount of available hardware and on your goals (such as whether you
|
||||
want a compute-oriented or storage-oriented cluster).
|
||||
|
||||
|
||||
|
||||
For a typical OpenStack compute deployment, you can use this table as
|
||||
high-level guidance to determine the number of controllers, compute,
|
||||
and storage nodes you should have:
|
||||
|
||||
============= =========== ======= ==============
|
||||
# of Machines Controllers Compute Storage
|
||||
============= =========== ======= ==============
|
||||
4-10 3 1-7 on controllers
|
||||
11-40 3 5-34 3 (separate)
|
||||
41-100 4 31-90 6 (separate)
|
||||
>100 5 >86 9 (separate)
|
||||
============= =========== ======= ==============
|
@ -1,78 +0,0 @@
|
||||
|
||||
Network Architecture
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
||||
The current architecture assumes the presence of 3 NIC cards in
|
||||
hardware, but can be customized to a different number of NICs (less,
|
||||
or more). In this case, let's consider a typical example of 3 NIC cards.
|
||||
They're utilized as follows:
|
||||
|
||||
* **eth0**: the internal management network, used for communication with Puppet
|
||||
& Cobbler
|
||||
* **eth1**: the public network, and floating IPs assigned to VMs
|
||||
* **eth2**: the private network, for communication between OpenStack VMs, and
|
||||
the bridge interface (VLANs)
|
||||
|
||||
In the multi-host networking mode, you can choose between the
|
||||
FlatDHCPManager and VlanManager network managers in OpenStack. The
|
||||
figure below illustrates the relevant nodes and networks.
|
||||
|
||||
.. image:: https://docs.google.com/drawings/pub?id=11KtrvPxqK3ilkAfKPSVN5KzBjnSPIJw-jRDc9fiYhxw&w=810&h=1060
|
||||
|
||||
Lets take a closer look at each network and how its used within the cluster.
|
||||
|
||||
Public Network
|
||||
++++++++++++++
|
||||
|
||||
This network allows inbound connections to VMs from the outside world
|
||||
(allowing users to connect to VMs from the Internet). It also allows
|
||||
outbound connections from VMs to the outside world.
|
||||
|
||||
For security reasons, the public network is usually isolated from the
|
||||
private network and internal (management) network. Typically, it's a
|
||||
single C class network from your globally routed or private network
|
||||
range.
|
||||
|
||||
To enable Internet access to VMs, the public network provides the
|
||||
address space for the floating IPs assigned to individual VM instances
|
||||
by the project administrator. Nova-network or Quantum services can
|
||||
then configure this address on the public network interface of the
|
||||
Network controller node. If the cluster uses nova-network, nova-
|
||||
network uses iptables to create a Destination NAT from this address to
|
||||
the fixed IP of the corresponding VM instance through the appropriate
|
||||
virtual bridge interface on the Network controller node.
|
||||
|
||||
In the other direction, the public network provides connectivity to
|
||||
the globally routed address space for VMs. The IP address from the
|
||||
public network that has been assigned to a compute node is used as the
|
||||
source for the Source NAT performed for traffic going from VM
|
||||
instances on the compute node to Internet.
|
||||
|
||||
The public network also provides VIPs for Endpoint nodes, which are
|
||||
used to connect to OpenStack services APIs.
|
||||
|
||||
Internal (Management) Network
|
||||
+++++++++++++++++++++++++++++
|
||||
|
||||
The internal network connects all OpenStack nodes in the cluster. All
|
||||
components of an OpenStack cluster communicate with each other using
|
||||
this network. This network must be isolated from both the private and
|
||||
public networks for security reasons.
|
||||
|
||||
The internal network can also be used for serving iSCSI protocol
|
||||
exchanges between Compute and Storage nodes.
|
||||
|
||||
This network usually is a single C class network from your private,
|
||||
non-globally routed IP address range.
|
||||
|
||||
Private Network
|
||||
+++++++++++++++
|
||||
|
||||
The private network facilitates communication between each tenant's
|
||||
VMs. Private network address spaces are part of the enterprise network
|
||||
address space. Fixed IPs of virtual instances are directly accessible
|
||||
from the rest of Enterprise network.
|
||||
|
||||
The private network can be segmented into separate isolated VLANs,
|
||||
which are managed by nova-network or Quantum services.
|
@ -1,3 +0,0 @@
|
||||
.. include:: /pages/reference-architecture/0050-technical-considerations-overview.rst
|
||||
.. include:: /pages/reference-architecture/0060-quantum-vs-nova-network.rst
|
||||
|
@ -1,7 +0,0 @@
|
||||
Technical Considerations
|
||||
----------------------------
|
||||
|
||||
Before performing any installations, you'll need to make a number of
|
||||
decisions about which services to deploy, but from a general
|
||||
architectural perspective, it's important to think about how you want
|
||||
to handle both networking and block storage.
|
@ -1,23 +0,0 @@
|
||||
|
||||
Quantum vs. nova-network
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Quantum is a service which provides networking-as-a-service
|
||||
functionality in OpenStack. It has a rich tenant-facing API for
|
||||
defining network connectivity and addressing in the cloud, and gives
|
||||
operators the ability to leverage different networking technologies to
|
||||
power their cloud networking.
|
||||
|
||||
There are various deployment use cases for Quantum. Fuel
|
||||
supports the most common of them, called Provider Router with Private
|
||||
Networks. It provides each tenant with one or more private networks,
|
||||
which can communicate with the outside world via a Quantum router.
|
||||
|
||||
Quantum is not, however, required in order to run an OpenStack
|
||||
cluster; if you don't need (or want) this added functionality, it's
|
||||
perfectly acceptable to continue using nova-network.
|
||||
|
||||
In order to deploy Quantum, you need to enable it in the Fuel
|
||||
configuration. Fuel will then set up an additional node in the
|
||||
OpenStack installation to act as an L3 router, or, depending on the
|
||||
configuration options you've chosen, install Quantum on the controllers.
|