Moving .rst format files to main admin-guide-cloud folder
This change moves the .rst files into the main adming-guide-cloud folder now conversion is complete. changes to the project config and to the openstack manuals to stop sync of .xml files are also needed. Change-Id: I498e8d6ac3cb80da413e23b14a0959abd58e7d79 Implements: blueprint reorganise-user-guides
This commit is contained in:
parent
910bfef86d
commit
8e9507bf9a
13
.tx/config
13
.tx/config
@ -1,13 +1,6 @@
|
||||
[main]
|
||||
host = https://www.transifex.com
|
||||
|
||||
[openstack-manuals-i18n.admin-guide-cloud]
|
||||
file_filter = doc/admin-guide-cloud/locale/<lang>.po
|
||||
minimum_perc = 75
|
||||
source_file = doc/admin-guide-cloud/locale/admin-guide-cloud.pot
|
||||
source_lang = en
|
||||
type = PO
|
||||
|
||||
[openstack-manuals-i18n.cli-reference]
|
||||
file_filter = doc/cli-reference/locale/<lang>.po
|
||||
minimum_perc = 75
|
||||
@ -85,9 +78,9 @@ source_file = doc/common-rst/source/locale/common-rst.pot
|
||||
source_lang = en
|
||||
type = PO
|
||||
|
||||
[openstack-manuals-i18n.admin-guide-cloud-rst]
|
||||
file_filter = doc/admin-guide-cloud-rst/source/locale/<lang>/LC_MESSAGES/admin-guide-cloud-rst.po
|
||||
[openstack-manuals-i18n.admin-guide-cloud]
|
||||
file_filter = doc/admin-guide-cloud/source/locale/<lang>/LC_MESSAGES/admin-guide-cloud.po
|
||||
minimum_perc = 75
|
||||
source_file = doc/admin-guide-cloud-rst/source/locale/admin-guide-cloud-rst.pot
|
||||
source_file = doc/admin-guide-cloud/source/locale/admin-guide-cloud.pot
|
||||
source_lang = en
|
||||
type = PO
|
||||
|
@ -35,8 +35,7 @@ declare -A SPECIAL_BOOKS=(
|
||||
["networking-guide"]="RST"
|
||||
["user-guide"]="RST"
|
||||
["user-guide-admin"]="RST"
|
||||
# In process of migration to RST
|
||||
["admin-guide-cloud-rst"]="RST"
|
||||
["admin-guide-cloud"]="RST"
|
||||
# Skip guide while it's created
|
||||
["contributor-guide"]="skip"
|
||||
# This needs special handling, handle it with the RST tools.
|
||||
|
@ -1,179 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<book xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="openstack-compute-admin-manual">
|
||||
<title>OpenStack Cloud Administrator Guide</title>
|
||||
<?rax title.font.size="28px" subtitle.font.size="28px"?>
|
||||
<titleabbrev>Cloud Administrator Guide</titleabbrev>
|
||||
<info>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname/>
|
||||
<surname/>
|
||||
</personname>
|
||||
<affiliation>
|
||||
<orgname>OpenStack Foundation</orgname>
|
||||
</affiliation>
|
||||
</author>
|
||||
<copyright>
|
||||
<year>2013</year>
|
||||
<year>2014</year>
|
||||
<year>2015</year>
|
||||
<holder>OpenStack Foundation</holder>
|
||||
</copyright>
|
||||
<releaseinfo>current</releaseinfo>
|
||||
<productname>OpenStack</productname>
|
||||
<pubdate/>
|
||||
<legalnotice role="apache2">
|
||||
<annotation>
|
||||
<remark>Copyright details are filled in by the template.</remark>
|
||||
</annotation>
|
||||
</legalnotice>
|
||||
<legalnotice role="cc-by">
|
||||
<annotation>
|
||||
<remark>Remaining licensing details are filled in by the template.</remark>
|
||||
</annotation>
|
||||
</legalnotice>
|
||||
<abstract>
|
||||
<para>OpenStack offers open source software for cloud
|
||||
administrators to manage and troubleshoot an OpenStack
|
||||
cloud.</para>
|
||||
<para>This guide documents OpenStack Kilo, OpenStack
|
||||
Juno, and OpenStack Icehouse releases.</para>
|
||||
</abstract>
|
||||
<revhistory>
|
||||
<!-- ... continue adding more revisions here as you change this document using the markup shown below... -->
|
||||
<revision>
|
||||
<date>2015-02-20</date>
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
For the Kilo release, the guide has been
|
||||
updated with a new Measurements section
|
||||
in the Telemetry chapter. The tables
|
||||
contain the release information for all
|
||||
collected meters regarding to when they
|
||||
were introduced in the module. In addition,
|
||||
the Orchestration chapter has been added to
|
||||
the guide. It describes in details
|
||||
Orchestration module available in OpenStack
|
||||
since Havana release.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
<revision>
|
||||
<date>2014-10-15</date>
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
For the Juno release, the guide has been
|
||||
updated with a new Telemetry chapter.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
<revision>
|
||||
<date>2014-07-21</date>
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Updated variables to use correct formatting.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
<revision>
|
||||
<date>2014-04-17</date>
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>For the Icehouse release, the guide was organized with system
|
||||
administration and system architecture sections. Also, how-to
|
||||
sections were moved to this guide instead of the
|
||||
<citetitle>OpenStack Configuration Reference</citetitle>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
<revision>
|
||||
<date>2013-11-12</date>
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Adds options for tuning operational status synchronization in the
|
||||
NSX plug-in.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
<revision>
|
||||
<date>2013-10-17</date>
|
||||
<revdescription>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Havana release.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
<revision>
|
||||
<date>2013-09-05</date>
|
||||
<revdescription>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>Moves object storage monitoring section to this guide.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Removes redundant object storage information.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
<revision>
|
||||
<date>2013-09-03</date>
|
||||
<revdescription>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>Moved all but configuration and installation information from
|
||||
these component guides to create the new guide:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>OpenStack Compute Administration Guide</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>OpenStack Networking Administration Guide</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>OpenStack Object Storage Administration Guide</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>OpenStack Block Storage Service Administration
|
||||
Guide</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</revdescription>
|
||||
</revision>
|
||||
</revhistory>
|
||||
</info>
|
||||
<!-- Chapters are referred from the book file through these include statements. You can add additional chapters using these types of statements. -->
|
||||
<xi:include href="../common/ch_preface.xml"/>
|
||||
<xi:include href="../common/ch_getstart.xml"/>
|
||||
<xi:include href="ch_identity_mgmt.xml"/>
|
||||
<xi:include href="ch_dashboard.xml"/>
|
||||
<xi:include href="ch_compute.xml"/>
|
||||
<xi:include href="ch_objectstorage.xml"/>
|
||||
<xi:include href="ch_blockstorage.xml"/>
|
||||
<xi:include href="ch_networking.xml"/>
|
||||
<xi:include href="ch_telemetry.xml"/>
|
||||
<xi:include href="ch_database.xml"/>
|
||||
<xi:include href="ch_orchestration.xml"/>
|
||||
<xi:include href="../common/app_support.xml"/>
|
||||
</book>
|
@ -1,275 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="backup-block-storage-disks">
|
||||
<title>Back up Block Storage service disks</title>
|
||||
<para>While you can use the LVM snapshot to create snapshots, you
|
||||
can also use it to back up your volumes. By using LVM
|
||||
snapshot, you reduce the size of the backup; only existing
|
||||
data is backed up instead of the entire volume.</para>
|
||||
<para>To back up a volume, you must create a snapshot of it. An
|
||||
LVM snapshot is the exact copy of a logical volume, which
|
||||
contains data in a frozen state. This prevents data
|
||||
corruption, because data cannot be manipulated during the
|
||||
volume creation process. Remember that the volumes created
|
||||
through a <command>nova volume-create</command> command exist
|
||||
in an LVM logical volume.</para>
|
||||
<para>You must also make sure that the operating system is not
|
||||
using the volume, and that all data has been flushed on the
|
||||
guest file systems. This usually means that those file systems
|
||||
have to be unmounted during the snapshot creation. They can be
|
||||
mounted again as soon as the logical volume snapshot has been
|
||||
created.</para>
|
||||
<para>Before you create the snapshot, you must have enough space
|
||||
to save it. As a precaution, you should have at least twice as
|
||||
much space as the potential snapshot size. If insufficient
|
||||
space is available, the snapshot might become
|
||||
corrupted.</para>
|
||||
<para>For this example, assume that a 100 GB volume named
|
||||
<literal>volume-00000001</literal> was created for an
|
||||
instance while only 4 GB are used. This example uses these
|
||||
commands to back up only those 4 GB:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><command>lvm2</command> command. Directly
|
||||
manipulates the volumes.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><command>kpartx</command> command. Discovers the
|
||||
partition table created inside the instance.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><command>tar</command> command. Creates a
|
||||
minimum-sized backup.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><command>sha1sum</command> command. Calculates the
|
||||
backup checksum to check its consistency.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>You can apply this process to volumes of any size.</para>
|
||||
<procedure>
|
||||
<title>To back up Block Storage service disks</title>
|
||||
<step>
|
||||
<title>Create a snapshot of a used volume</title>
|
||||
<substeps>
|
||||
<step>
|
||||
<para>Use this command to list all volumes:</para>
|
||||
<screen><prompt>#</prompt> <userinput>lvdisplay</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create the snapshot; you can do this while
|
||||
the volume is attached to an instance:</para>
|
||||
<screen><prompt>#</prompt> <userinput>lvcreate --size 10G --snapshot --name volume-00000001-snapshot /dev/cinder-volumes/volume-00000001</userinput></screen>
|
||||
<para>Use the <parameter>--snapshot</parameter>
|
||||
configuration option to tell LVM that you want
|
||||
a snapshot of an already existing volume. The
|
||||
command includes the size of the space
|
||||
reserved for the snapshot volume, the name of
|
||||
the snapshot, and the path of an already
|
||||
existing volume. Generally, this path is
|
||||
<filename>/dev/cinder-volumes/<replaceable>VOLUME_NAME</replaceable></filename>.</para>
|
||||
<para>The size does not have to be the same as the
|
||||
volume of the snapshot. The
|
||||
<parameter>--size</parameter> parameter
|
||||
defines the space that LVM reserves for the
|
||||
snapshot volume. As a precaution, the size
|
||||
should be the same as that of the original
|
||||
volume, even if the whole space is not
|
||||
currently used by the snapshot.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Run the <command>lvdisplay</command> command
|
||||
again to verify the snapshot:</para>
|
||||
<programlisting>--- Logical volume ---
|
||||
LV Name /dev/cinder-volumes/volume-00000001
|
||||
VG Name cinder-volumes
|
||||
LV UUID gI8hta-p21U-IW2q-hRN1-nTzN-UC2G-dKbdKr
|
||||
LV Write Access read/write
|
||||
LV snapshot status source of
|
||||
/dev/cinder-volumes/volume-00000026-snap [active]
|
||||
LV Status available
|
||||
# open 1
|
||||
LV Size 15,00 GiB
|
||||
Current LE 3840
|
||||
Segments 1
|
||||
Allocation inherit
|
||||
Read ahead sectors auto
|
||||
- currently set to 256
|
||||
Block device 251:13
|
||||
|
||||
--- Logical volume ---
|
||||
LV Name /dev/cinder-volumes/volume-00000001-snap
|
||||
VG Name cinder-volumes
|
||||
LV UUID HlW3Ep-g5I8-KGQb-IRvi-IRYU-lIKe-wE9zYr
|
||||
LV Write Access read/write
|
||||
LV snapshot status active destination for /dev/cinder-volumes/volume-00000026
|
||||
LV Status available
|
||||
# open 0
|
||||
LV Size 15,00 GiB
|
||||
Current LE 3840
|
||||
COW-table size 10,00 GiB
|
||||
COW-table LE 2560
|
||||
Allocated to snapshot 0,00%
|
||||
Snapshot chunk size 4,00 KiB
|
||||
Segments 1
|
||||
Allocation inherit
|
||||
Read ahead sectors auto
|
||||
- currently set to 256
|
||||
Block device 251:14</programlisting>
|
||||
</step>
|
||||
</substeps>
|
||||
</step>
|
||||
<step>
|
||||
<title>Partition table discovery</title>
|
||||
<substeps>
|
||||
<step>
|
||||
<para>To exploit the snapshot with the
|
||||
<command>tar</command> command, mount your
|
||||
partition on the Block Storage service
|
||||
server.</para>
|
||||
<para>The <command>kpartx</command> utility
|
||||
discovers and maps table partitions. You
|
||||
can use it to view partitions that are created
|
||||
inside the instance. Without using the
|
||||
partitions created inside instances, you
|
||||
cannot see its content and create efficient
|
||||
backups.</para>
|
||||
<screen><prompt>#</prompt> <userinput>kpartx -av /dev/cinder-volumes/volume-00000001-snapshot</userinput></screen>
|
||||
<note os="debian">
|
||||
<para>On a Debian-based distribution, you can
|
||||
use the <command>apt-get install kpartx</command>
|
||||
command to install
|
||||
<command>kpartx</command>.</para>
|
||||
</note>
|
||||
<para>If the tools successfully find and map the
|
||||
partition table, no errors are
|
||||
returned.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>To check the partition table map, run this
|
||||
command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>ls /dev/mapper/nova*</userinput></screen>
|
||||
<para>You can see the
|
||||
<literal>cinder--volumes-volume--00000001--snapshot1</literal>
|
||||
partition.</para>
|
||||
<para>If you created more than one partition on
|
||||
that volume, you see several partitions; for
|
||||
example:
|
||||
<literal>cinder--volumes-volume--00000001--snapshot2</literal>,
|
||||
<literal>cinder--volumes-volume--00000001--snapshot3</literal>,
|
||||
and so on.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Mount your partition:</para>
|
||||
<screen><prompt>#</prompt> <userinput>mount /dev/mapper/cinder--volumes-volume--volume--00000001--snapshot1 /mnt</userinput></screen>
|
||||
<para>If the partition mounts successfully, no
|
||||
errors are returned.</para>
|
||||
<para>You can directly access the data inside the
|
||||
instance. If a message prompts you for a
|
||||
partition or you cannot mount it, determine
|
||||
whether enough space was allocated for the
|
||||
snapshot or the <command>kpartx</command>
|
||||
command failed to discover the partition
|
||||
table.</para>
|
||||
<para>Allocate more space to the snapshot and try
|
||||
the process again.</para>
|
||||
</step>
|
||||
</substeps>
|
||||
</step>
|
||||
<step>
|
||||
<title>Use the <command>tar</command> command to create
|
||||
archives</title>
|
||||
<para>Create a backup of the volume:</para>
|
||||
<screen><prompt>$</prompt> <userinput>tar --exclude="lost+found" --exclude="some/data/to/exclude" -czf volume-00000001.tar.gz -C /mnt/ /backup/destination</userinput></screen>
|
||||
<para>This command creates a <filename>tar.gz</filename>
|
||||
file that contains the data, <emphasis role="italic"
|
||||
>and data only</emphasis>. This ensures that you
|
||||
do not waste space by backing up empty sectors.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>Checksum calculation I</title>
|
||||
<para>You should always have the checksum for your backup
|
||||
files. When you transfer the same file over the
|
||||
network, you can run a checksum calculation to ensure
|
||||
that your file was not corrupted during its transfer.
|
||||
The checksum is a unique ID for a file. If the
|
||||
checksums are different, the file is corrupted.</para>
|
||||
<para>Run this command to run a checksum for your file and
|
||||
save the result to a file:</para>
|
||||
<screen><prompt>$</prompt> <userinput>sha1sum volume-00000001.tar.gz > volume-00000001.checksum</userinput></screen>
|
||||
<note>
|
||||
<para>Use the <command>sha1sum</command> command
|
||||
carefully because the time it takes to complete
|
||||
the calculation is directly proportional to the
|
||||
size of the file.</para>
|
||||
<para>For files larger than around 4 to 6 GB, and
|
||||
depending on your CPU, the process might take a
|
||||
long time.</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<title>After work cleaning</title>
|
||||
<para>Now that you have an efficient and consistent
|
||||
backup, use this command to clean up the file
|
||||
system:</para>
|
||||
<substeps>
|
||||
<step>
|
||||
<para>Unmount the volume:</para>
|
||||
<screen><userinput>umount /mnt</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Delete the partition table:</para>
|
||||
<screen><userinput>kpartx -dv /dev/cinder-volumes/volume-00000001-snapshot</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Remove the snapshot:</para>
|
||||
<screen><userinput>lvremove -f /dev/cinder-volumes/volume-00000001-snapshot</userinput></screen>
|
||||
</step>
|
||||
</substeps>
|
||||
<para>Repeat these steps for all your volumes.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>Automate your backups</title>
|
||||
<para>Because more and more volumes might be allocated to
|
||||
your Block Storage service, you might want to automate
|
||||
your backups. The <link
|
||||
xlink:href="https://github.com/Razique/BashStuff/blob/master/SYSTEMS/OpenStack/SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh"
|
||||
>SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh</link>
|
||||
script assists you with this task. The script performs
|
||||
the operations from the previous example, but also
|
||||
provides a mail report and runs the backup based on
|
||||
the <option>backups_retention_days</option>
|
||||
setting.</para>
|
||||
<para>Launch this script from the server that runs the
|
||||
Block Storage service.</para>
|
||||
<para>This example shows a mail report:</para>
|
||||
<programlisting>Backup Start Time - 07/10 at 01:00:01
|
||||
Current retention - 7 days
|
||||
|
||||
The backup volume is mounted. Proceed...
|
||||
Removing old backups... : /BACKUPS/EBS-VOL/volume-00000019/volume-00000019_28_09_2011.tar.gz
|
||||
/BACKUPS/EBS-VOL/volume-00000019 - 0 h 1 m and 21 seconds. Size - 3,5G
|
||||
|
||||
The backup volume is mounted. Proceed...
|
||||
Removing old backups... : /BACKUPS/EBS-VOL/volume-0000001a/volume-0000001a_28_09_2011.tar.gz
|
||||
/BACKUPS/EBS-VOL/volume-0000001a - 0 h 4 m and 15 seconds. Size - 6,9G
|
||||
---------------------------------------
|
||||
Total backups size - 267G - Used space : 35%
|
||||
Total execution time - 1 h 75 m and 35 seconds</programlisting>
|
||||
<para>The script also enables you to SSH to your instances
|
||||
and run a <command>mysqldump</command> command into
|
||||
them. To make this work, enable the connection to the
|
||||
Compute project keys. If you do not want to run the
|
||||
<command>mysqldump</command> command, you can add
|
||||
<literal>enable_mysql_dump=0</literal> to the
|
||||
script to turn off this functionality.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
@ -1,257 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="consistency-groups">
|
||||
<title>Consistency Groups</title>
|
||||
<para>Consistency group support is available in OpenStack Block Storage.
|
||||
The support is added for creating snapshots of consistency groups.
|
||||
This feature leverages the storage level consistency technology.
|
||||
It allows snapshots of multiple volumes in the same consistency group
|
||||
to be taken at the same point-in-time to ensure data consistency.
|
||||
The consistency group operations can be performed using the Block Storage
|
||||
command line.</para>
|
||||
<note><para>Only Block Storage V2 API supports consistency groups. You can
|
||||
specify <literal>--os-volume-api-version 2</literal> when using Block
|
||||
Storage command line for consistency group operations.</para></note>
|
||||
<para>Before using consistency groups, make sure the Block Storage driver
|
||||
that you are running has consistency group support by reading the
|
||||
Block Storage manual or consulting the driver maintainer. There are
|
||||
a small number of drivers that have implemented this feature. The
|
||||
default LVM driver does not support consistency groups yet because
|
||||
the consistency technology is not available at the storage level.</para>
|
||||
<para>Before using consistency groups, you must change policies for the
|
||||
consistency group APIs in the <filename>/etc/cinder/policy.json
|
||||
</filename> file. By default, the consistency group APIs are disabled.
|
||||
Enable them before running consistency group operations.</para>
|
||||
<para>Here are existing policy entries for consistency groups:</para>
|
||||
<programlisting language="ini">"consistencygroup:create": "group:nobody",
|
||||
"consistencygroup:delete": "group:nobody",
|
||||
"consistencygroup:get": "group:nobody",
|
||||
"consistencygroup:get_all": "group:nobody",
|
||||
"consistencygroup:create_cgsnapshot" : "group:nobody",
|
||||
"consistencygroup:delete_cgsnapshot": "group:nobody",
|
||||
"consistencygroup:get_cgsnapshot": "group:nobody",
|
||||
"consistencygroup:get_all_cgsnapshots": "group:nobody",</programlisting>
|
||||
<para>Change them to the following by removing <literal>group:nobody
|
||||
</literal> to enable these APIs:</para>
|
||||
<programlisting language="ini">"consistencygroup:create": "",
|
||||
"consistencygroup:delete": "",
|
||||
"consistencygroup:update": "",
|
||||
"consistencygroup:get": "",
|
||||
"consistencygroup:get_all": "",
|
||||
"consistencygroup:create_cgsnapshot" : "",
|
||||
"consistencygroup:delete_cgsnapshot": "",
|
||||
"consistencygroup:get_cgsnapshot": "",
|
||||
"consistencygroup:get_all_cgsnapshots": "",</programlisting>
|
||||
<para>Restart Block Storage API service after changing policies.</para>
|
||||
<para>The following consistency group operations are supported:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Create a consistency group, given volume types.</para>
|
||||
<note><para>A consistency group can support more than
|
||||
one volume type. The scheduler is responsible for finding
|
||||
a back end that can support all given volume types.</para>
|
||||
</note>
|
||||
<note><para>A consistency group can only contain volumes
|
||||
hosted by the same back end.</para></note>
|
||||
<note><para>A consistency group is empty upon its creation.
|
||||
Volumes need to be created and added to it later.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Show a consistency group.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>List consistency groups.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Create a volume and add it to a consistency group,
|
||||
given volume type and consistency group id.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Create a snapshot for a consistency group.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Show a snapshot of a consistency group.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>List consistency group snapshots.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Delete a snapshot of a consistency group.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Delete a consistency group.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Modify a consistency group.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Create a consistency group from the snapshot of another
|
||||
consistency group.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>The following operations are not allowed if a volume
|
||||
is in a consistency group:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Volume migration.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Volume retype.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Volume deletion.</para>
|
||||
<note><para>A consistency group has to be
|
||||
deleted as a whole with all the volumes.
|
||||
</para></note>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>The following operations are not allowed if a
|
||||
volume snapshot is in a consistency group snapshot:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Volume snapshot deletion.</para>
|
||||
<note><para>A consistency group snapshot has
|
||||
to be deleted as a whole with all the
|
||||
volume snapshots.</para></note>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>The details of consistency group operations are shown in the
|
||||
following.</para>
|
||||
<para>Create a consistency group:</para>
|
||||
<screen>cinder consisgroup-create
|
||||
[--name name]
|
||||
[--description description]
|
||||
[--availability-zone availability-zone]
|
||||
volume-types</screen>
|
||||
<note><para>The parameter <literal>volume-types</literal> is required.
|
||||
It can be a list of names or UUIDs of volume types separated by
|
||||
commas without spaces in between. For example, <literal>
|
||||
volumetype1,volumetype2,volumetype3.</literal>.</para></note>
|
||||
<screen><prompt>$</prompt> <userinput>cinder consisgroup-create --name bronzeCG2 volume_type_1</userinput></screen>
|
||||
<para></para>
|
||||
<screen><computeroutput>+-------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+-------------------+--------------------------------------+
|
||||
| availability_zone | nova |
|
||||
| created_at | 2014-12-29T12:59:08.000000 |
|
||||
| description | None |
|
||||
| id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 |
|
||||
| name | bronzeCG2 |
|
||||
| status | creating |
|
||||
+-------------------+--------------------------------------+</computeroutput></screen>
|
||||
<para>Show a consistency group:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder consisgroup-show 1de80c27-3b2f-47a6-91a7-e867cbe36462</userinput>
|
||||
<computeroutput>+-------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+-------------------+--------------------------------------+
|
||||
| availability_zone | nova |
|
||||
| created_at | 2014-12-29T12:59:08.000000 |
|
||||
| description | None |
|
||||
| id | 2a6b2bda-1f43-42ce-9de8-249fa5cbae9a |
|
||||
| name | bronzeCG2 |
|
||||
| status | available |
|
||||
+-------------------+--------------------------------------+</computeroutput></screen>
|
||||
<para>List consistency groups:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder consisgroup-list</userinput>
|
||||
<computeroutput>+--------------------------------------+-----------+-----------+
|
||||
| ID | Status | Name |
|
||||
+--------------------------------------+-----------+-----------+
|
||||
| 1de80c27-3b2f-47a6-91a7-e867cbe36462 | available | bronzeCG2 |
|
||||
| 3a2b3c42-b612-479a-91eb-1ed45b7f2ad5 | error | bronzeCG |
|
||||
+--------------------------------------+-----------+-----------+</computeroutput></screen>
|
||||
<para>Create a volume and add it to a consistency group:</para>
|
||||
<note><para>When creating a volume and adding it to a consistency
|
||||
group, a volume type and a consistency group id must be provided.
|
||||
This is because a consistency group can support more than one
|
||||
volume type.</para></note>
|
||||
<screen><prompt>$</prompt> <userinput>cinder create --volume-type volume_type_1 --name cgBronzeVol --consisgroup-id 1de80c27-3b2f-47a6-91a7-e867cbe36462 1</userinput>
|
||||
<computeroutput>+---------------------------------------+----------------------------------------+
|
||||
| Property | Value |
|
||||
+---------------------------------------+----------------------------------------+
|
||||
| attachments | [] |
|
||||
| availability_zone | nova |
|
||||
| bootable | false |
|
||||
| consistencygroup_id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 |
|
||||
| created_at | 2014-12-29T13:16:47.000000 |
|
||||
| description | None |
|
||||
| encrypted | False |
|
||||
| id | 5e6d1386-4592-489f-a56b-9394a81145fe |
|
||||
| metadata | {} |
|
||||
| name | cgBronzeVol |
|
||||
| os-vol-host-attr:host | server-1@backend-1#pool-1 |
|
||||
| os-vol-mig-status-attr:migstat | None |
|
||||
| os-vol-mig-status-attr:name_id | None |
|
||||
| os-vol-tenant-attr:tenant_id | 1349b21da2a046d8aa5379f0ed447bed |
|
||||
| os-volume-replication:driver_data | None |
|
||||
| os-volume-replication:extended_status | None |
|
||||
| replication_status | disabled |
|
||||
| size | 1 |
|
||||
| snapshot_id | None |
|
||||
| source_volid | None |
|
||||
| status | creating |
|
||||
| user_id | 93bdea12d3e04c4b86f9a9f172359859 |
|
||||
| volume_type | volume_type_1 |
|
||||
+---------------------------------------+----------------------------------------+</computeroutput></screen>
|
||||
<para>Create a snapshot for a consistency group:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder cgsnapshot-create 1de80c27-3b2f-47a6-91a7-e867cbe36462</userinput>
|
||||
<computeroutput>+---------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+---------------------+--------------------------------------+
|
||||
| consistencygroup_id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 |
|
||||
| created_at | 2014-12-29T13:19:44.000000 |
|
||||
| description | None |
|
||||
| id | d4aff465-f50c-40b3-b088-83feb9b349e9 |
|
||||
| name | None |
|
||||
| status | creating |
|
||||
+---------------------+-------------------------------------+</computeroutput></screen>
|
||||
<para>Show a snapshot of a consistency group:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder cgsnapshot-show d4aff465-f50c-40b3-b088-83feb9b349e9</userinput></screen>
|
||||
<para>List consistency group snapshots:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder cgsnapshot-list</userinput>
|
||||
<computeroutput>+--------------------------------------+--------+----------+
|
||||
| ID | Status | Name |
|
||||
+--------------------------------------+--------+----------+
|
||||
| 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 | available | None |
|
||||
| aa129f4d-d37c-4b97-9e2d-7efffda29de0 | available | None |
|
||||
| bb5b5d82-f380-4a32-b469-3ba2e299712c | available | None |
|
||||
| d4aff465-f50c-40b3-b088-83feb9b349e9 | available | None |
|
||||
+--------------------------------------+--------+----------+</computeroutput></screen>
|
||||
<para>Delete a snapshot of a consistency group:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder cgsnapshot-delete d4aff465-f50c-40b3-b088-83feb9b349e9</userinput></screen>
|
||||
<para>Delete a consistency group:</para>
|
||||
<note><para>The force flag is needed when there are volumes in the
|
||||
consistency group.</para></note>
|
||||
<screen><prompt>$</prompt> <userinput>cinder consisgroup-delete --force 1de80c27-3b2f-47a6-91a7-e867cbe36462</userinput></screen>
|
||||
<para>Modify a consistency group:</para>
|
||||
<screen>cinder consisgroup-update
|
||||
[--name <replaceable>NAME</replaceable>]
|
||||
[--description <replaceable>DESCRIPTION</replaceable>]
|
||||
[--add-volumes <replaceable>UUID1,UUID2,......</replaceable>]
|
||||
[--remove-volumes <replaceable>UUID3,UUID4,......</replaceable>]
|
||||
<replaceable>CG</replaceable></screen>
|
||||
<para>The parameter <replaceable>CG</replaceable> is required.
|
||||
It can be a name or UUID of a consistency group.
|
||||
<replaceable>UUID1,UUID2,......</replaceable> are
|
||||
UUIDs of one or more volumes to be added to the
|
||||
consistency group, separated by commas. Default is None.
|
||||
<replaceable>UUID3,UUId4,......</replaceable> are
|
||||
UUIDs of one or more volumes to be removed from the
|
||||
consistency group, separated by commas. Default is None.
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder consisgroup-update --name 'new name' --description 'new description' --add-volumes 0b3923f5-95a4-4596-a536-914c2c84e2db,1c02528b-3781-4e32-929c-618d81f52cf3 --remove-volumes 8c0f6ae4-efb1-458f-a8fc-9da2afcc5fb1,a245423f-bb99-4f94-8c8c-02806f9246d8 1de80c27-3b2f-47a6-91a7-e867cbe36462</userinput></screen>
|
||||
<para>Create a consistency group from the snapshot of another consistency
|
||||
group:
|
||||
</para>
|
||||
<screen>cinder consisgroup-create-from-src
|
||||
[--cgsnapshot <replaceable>CGSNAPSHOT</replaceable>]
|
||||
[--name <replaceable>NAME</replaceable>]
|
||||
[--description <replaceable>DESCRIPTION</replaceable>]</screen>
|
||||
<para>The parameter <replaceable>CGSNAPSHOT</replaceable> is a name
|
||||
or UUID of a snapshot of a consistency group.</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder consisgroup-create-from-src --cgsnapshot 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 --name 'new cg' --description 'new cg from cgsnapshot'</userinput></screen>
|
||||
</section>
|
@ -1,515 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="driver_filter_weighing">
|
||||
<title>
|
||||
Configure and use driver filter and weighing for scheduler
|
||||
</title>
|
||||
|
||||
<para>
|
||||
OpenStack Block Storage enables you to choose a volume back
|
||||
end based on back-end specific properties by using the
|
||||
DriverFilter and GoodnessWeigher for the scheduler. The
|
||||
driver filter and weigher scheduling can help ensure that the
|
||||
scheduler chooses the best back end based on requested volume
|
||||
properties as well as various back-end specific properties.
|
||||
</para>
|
||||
|
||||
<simplesect>
|
||||
<title>
|
||||
What is driver filter and weigher and when to use it
|
||||
</title>
|
||||
<para>
|
||||
The driver filter and weigher gives you the ability
|
||||
to more finely control how the OpenStack Block Storage
|
||||
scheduler chooses the best back end to use when handling
|
||||
a volume request.
|
||||
|
||||
One example scenario where using the driver filter and
|
||||
weigher can be if a back end that utilizes thin-provisioning
|
||||
is used. The default filters use the "free capacity"
|
||||
property to determine the best back end, but that is not
|
||||
always perfect. If a back end has the ability to provide a
|
||||
more accurate back-end specific value you can use that as
|
||||
part of the weighing.
|
||||
|
||||
Another example of when the driver filter and weigher can
|
||||
prove useful is if a back end exists where there is a hard
|
||||
limit of 1000 volumes. The maximum volume size is
|
||||
500 GB. Once 75% of the total space is occupied the
|
||||
performance of the back end degrades. The driver filter and
|
||||
weigher can provide a way for these limits to be checked
|
||||
for.
|
||||
</para>
|
||||
</simplesect>
|
||||
|
||||
<simplesect>
|
||||
<title>Enable driver filter and weighing</title>
|
||||
<para>
|
||||
To enable the driver filter, set the <option>
|
||||
scheduler_default_filters</option> option in the <filename>
|
||||
cinder.conf</filename> file to <literal>DriverFilter
|
||||
</literal> or add it to the list if other filters are
|
||||
already present.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To enable the goodness filter as a weigher, set the <option>
|
||||
scheduler_default_weighers</option> option in the <filename>
|
||||
cinder.conf</filename> file to <literal>GoodnessWeigher
|
||||
</literal> or add it to the list if other weighers are
|
||||
already present.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can choose to use the <literal>DriverFilter</literal>
|
||||
without the <literal>GoodnessWeigher</literal> or
|
||||
vice-versa. The filter and weigher working together,
|
||||
however, create the most benefits when helping the
|
||||
scheduler choose an ideal back end.
|
||||
</para>
|
||||
|
||||
<important>
|
||||
<para>
|
||||
The support for the <literal>DriverFilter</literal> and
|
||||
<literal>GoodnessWeigher</literal> is optional for back
|
||||
ends. If you are using a back end that does not support
|
||||
the filter and weigher functionality you may not get the
|
||||
full benefit.
|
||||
</para>
|
||||
</important>
|
||||
|
||||
<para>
|
||||
Example <filename>cinder.conf</filename> configuration file:
|
||||
</para>
|
||||
<programlisting language="ini">scheduler_default_filters = DriverFilter
|
||||
scheduler_default_weighers = GoodnessWeigher</programlisting>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
It is useful to use the other filters and weighers
|
||||
available in OpenStack in combination with these custom
|
||||
ones. For example, the <literal>CapacityFilter</literal>
|
||||
and <literal>CapacityWeigher</literal> can be combined
|
||||
with these.
|
||||
</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
|
||||
<simplesect>
|
||||
<title>Defining your own filter and goodness functions</title>
|
||||
<para>
|
||||
You can define your own filter and goodness functions
|
||||
through the use of various properties that OpenStack
|
||||
Block Storage has exposed. Properties exposed include
|
||||
information about the volume request being made, volume_type
|
||||
settings, and back-end specific information about drivers.
|
||||
All of these allow for a lot of control over how the ideal
|
||||
back end for a volume request will be decided.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <option>filter_function</option> option is a string
|
||||
defining an equation that will determine whether a back end
|
||||
should be considered as a potential candidate in the
|
||||
scheduler.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <option>goodness_function</option> option is a string
|
||||
defining an equation that will rate the quality of the
|
||||
potential host (0 to 100, 0 lowest, 100 highest).
|
||||
</para>
|
||||
|
||||
<important>
|
||||
<para>
|
||||
Default values for the filter and goodness functions will
|
||||
be used for each back end if you do not define them
|
||||
yourself. If complete control is desired then a filter
|
||||
and goodness function should be defined for each of the
|
||||
back ends in the <filename>cinder.conf</filename> file.
|
||||
</para>
|
||||
</important>
|
||||
</simplesect>
|
||||
|
||||
<simplesect>
|
||||
<title>
|
||||
Supported operations in filter and goodness functions
|
||||
</title>
|
||||
<para>
|
||||
Below is a table of all the operations currently usable in
|
||||
custom filter and goodness functions created by you:
|
||||
</para>
|
||||
|
||||
<informaltable>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Operations</th>
|
||||
<th>Type</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><para>+, -, *, /, ^</para></td>
|
||||
<td><para>standard math</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para>not, and, or, &, |, !</para></td>
|
||||
<td><para>logic</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>
|
||||
>, >=, <, <=, ==, <>, !=
|
||||
</para>
|
||||
</td>
|
||||
<td><para>equality</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para>+, -</para></td>
|
||||
<td><para>sign</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para>x ? a : b</para></td>
|
||||
<td><para>ternary</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para>abs(x), max(x, y), min(x, y)</para></td>
|
||||
<td><para>math helper functions</para></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</informaltable>
|
||||
|
||||
<caution>
|
||||
<para>
|
||||
Syntax errors in filter or goodness strings defined by you
|
||||
will cause errors to be thrown at volume request time.
|
||||
</para>
|
||||
</caution>
|
||||
</simplesect>
|
||||
|
||||
<simplesect>
|
||||
<title>
|
||||
Available properties when creating custom functions
|
||||
</title>
|
||||
<para>
|
||||
There are various properties that can be used in either the
|
||||
<option>filter_function</option> or the <option>
|
||||
goodness_function</option> strings. The properties allow
|
||||
access to volume info, qos settings, extra specs, and so on.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here is a list of the properties and their sub-properties
|
||||
currently available for use:
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>stats</literal>—These are the host stats
|
||||
for a back end.
|
||||
</para>
|
||||
<variablelist>
|
||||
<varlistentry><term>host</term>
|
||||
<listitem><para>The host's name.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>volume_backend_name</term>
|
||||
<listitem><para>The volume back end name.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>vendor_name</term>
|
||||
<listitem><para>The vendor name.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>driver_version</term>
|
||||
<listitem><para>The driver version.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>storage_protocol</term>
|
||||
<listitem><para>The storage protocol.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>QoS_support</term>
|
||||
<listitem><para>Boolean signifying whether QoS is
|
||||
supported.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>total_capacity_gb</term>
|
||||
<listitem><para>The total capacity in GB.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>allocated_capacity_gb</term>
|
||||
<listitem><para>The allocated capacity in GB.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>reserved_percentage</term>
|
||||
<listitem><para>The reserved storage percentage.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>capabilities</literal>—These are the
|
||||
capabilities specific to a back end.
|
||||
</para>
|
||||
<para>
|
||||
The properties available here are determined by the
|
||||
specific back end you are creating filter and goodness
|
||||
functions for. Some back ends may not have any
|
||||
properties available here.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>volume</literal>—The requested volume
|
||||
properties.
|
||||
</para>
|
||||
<variablelist>
|
||||
<varlistentry><term>status</term>
|
||||
<listitem><para>Status for the requested volume.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>volume_type_id</term>
|
||||
<listitem><para>The volume type ID.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>display_name</term>
|
||||
<listitem><para>The display name of the volume.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>volume_metadata</term>
|
||||
<listitem><para>Any metadata the volume has.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>reservations</term>
|
||||
<listitem><para>Any reservations the volume has.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>user_id</term>
|
||||
<listitem><para>The volume's user ID.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>attach_status</term>
|
||||
<listitem><para>The attach status for the volume.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>display_description</term>
|
||||
<listitem><para>The volume's display description.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>id</term>
|
||||
<listitem><para>The volume's ID.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>replication_status</term>
|
||||
<listitem><para>The volume's replication status.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>snapshot_id</term>
|
||||
<listitem><para>The volume's snapshot ID.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>encryption_key_id</term>
|
||||
<listitem><para>The volume's encryption key ID.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>source_volid</term>
|
||||
<listitem><para>The source volume ID.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>volume_admin_metadata</term>
|
||||
<listitem><para>Any admin metadata for this volume.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>source_replicaid</term>
|
||||
<listitem><para>The source replication ID.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>consistencygroup_id</term>
|
||||
<listitem><para>The consistency group ID.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>size</term>
|
||||
<listitem><para>The size of the volume in GB.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry><term>metadata</term>
|
||||
<listitem><para>General metadata.</para></listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<para>
|
||||
The property most used from here will most likely be the
|
||||
<literal>size</literal> sub-property.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>extra</literal>—The extra specs for the
|
||||
requested volume type.
|
||||
</para>
|
||||
<para>
|
||||
View the available properties for volume types by
|
||||
running: <screen><prompt>$</prompt> <userinput>cinder extra-specs-list</userinput></screen>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>qos</literal>—The current QoS specs for
|
||||
the requested volume type.
|
||||
</para>
|
||||
<para>
|
||||
View the available properties for volume types by
|
||||
running: <screen><prompt>$</prompt> <userinput>cinder qos-list</userinput></screen>
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
<para>
|
||||
In order to access these properties in a custom string use
|
||||
the following format:
|
||||
</para>
|
||||
<para>
|
||||
<command><property>.<sub_property></command>
|
||||
</para>
|
||||
</simplesect>
|
||||
|
||||
<simplesect>
|
||||
<title>Driver filter and weigher usage examples</title>
|
||||
<para>
|
||||
Below are examples for using the filter and weigher separately, together,
|
||||
and using driver-specific properties.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Example <filename>cinder.conf</filename> file configuration
|
||||
for customizing the filter function:
|
||||
</para>
|
||||
<programlisting language="ini">[default]
|
||||
scheduler_default_filters = DriverFilter
|
||||
enabled_backends = lvm-1, lvm-2
|
||||
|
||||
[lvm-1]
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
||||
volume_backend_name = sample_LVM
|
||||
filter_function = "volume.size < 10"
|
||||
|
||||
[lvm-2]
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
||||
volume_backend_name = sample_LVM
|
||||
filter_function = "volume.size >= 10"</programlisting>
|
||||
<para>
|
||||
The above example will filter volumes to different back ends
|
||||
depending on the size of the requested volume. Default
|
||||
OpenStack Block Storage scheduler weighing is done. Volumes
|
||||
with a size less than 10 GB are sent to lvm-1 and
|
||||
volumes with a size greater than or equal to 10 GB are
|
||||
sent to lvm-2.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Example <filename>cinder.conf</filename> file configuration
|
||||
for customizing the goodness function:
|
||||
</para>
|
||||
<programlisting language="ini">[default]
|
||||
scheduler_default_weighers = GoodnessWeigher
|
||||
enabled_backends = lvm-1, lvm-2
|
||||
|
||||
[lvm-1]
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
||||
volume_backend_name = sample_LVM
|
||||
goodness_function = "(volume.size < 5) ? 100 : 50"
|
||||
|
||||
[lvm-2]
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
||||
volume_backend_name = sample_LVM
|
||||
goodness_function = "(volume.size >= 5) ? 100 : 25"</programlisting>
|
||||
<para>
|
||||
The above example will determine the goodness rating of a
|
||||
back end based off of the requested volume's size. Default
|
||||
OpenStack Block Storage scheduler filtering is done. The
|
||||
example shows how the ternary if statement can be used in a
|
||||
filter or goodness function. If a requested volume is of
|
||||
size 10 GB then lvm-1 is rated as 50 and lvm-2 is rated
|
||||
as 100. In this case lvm-2 wins. If a requested volume is
|
||||
of size 3 GB then lvm-1 is rated 100 and lvm-2 is rated
|
||||
25. In this case lvm-1 would win.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Example <filename>cinder.conf</filename> file configuration
|
||||
for customizing both the filter and goodness functions:
|
||||
</para>
|
||||
<programlisting language="ini">[default]
|
||||
scheduler_default_filters = DriverFilter
|
||||
scheduler_default_weighers = GoodnessWeigher
|
||||
enabled_backends = lvm-1, lvm-2
|
||||
|
||||
[lvm-1]
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
||||
volume_backend_name = sample_LVM
|
||||
filter_function = "stats.total_capacity_gb < 500"
|
||||
goodness_function = "(volume.size < 25) ? 100 : 50"
|
||||
|
||||
[lvm-2]
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
||||
volume_backend_name = sample_LVM
|
||||
filter_function = "stats.total_capacity_gb >= 500"
|
||||
goodness_function = "(volume.size >= 25) ? 100 : 75"</programlisting>
|
||||
|
||||
<para>
|
||||
The above example combines the techniques from the first
|
||||
two examples. The best back end is now decided based off
|
||||
of the total capacity of the back end and the requested
|
||||
volume's size.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Example <filename>cinder.conf</filename> file configuration
|
||||
for accessing driver specific properties:
|
||||
</para>
|
||||
<programlisting language="ini">[default]
|
||||
scheduler_default_filters = DriverFilter
|
||||
scheduler_default_weighers = GoodnessWeigher
|
||||
enabled_backends = lvm-1,lvm-2,lvm-3
|
||||
|
||||
[lvm-1]
|
||||
volume_group = stack-volumes-lvmdriver-1
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name = lvmdriver-1
|
||||
filter_function = "volume.size < 5"
|
||||
goodness_function = "(capabilities.total_volumes < 3) ? 100 : 50"
|
||||
|
||||
[lvm-2]
|
||||
volume_group = stack-volumes-lvmdriver-2
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name = lvmdriver-2
|
||||
filter_function = "volumes.size < 5"
|
||||
goodness_function = "(capabilities.total_volumes < 8) ? 100 : 50"
|
||||
|
||||
[lvm-3]
|
||||
volume_group = stack-volumes-lvmdriver-3
|
||||
volume_driver = cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name = lvmdriver-3
|
||||
goodness_function = "55"</programlisting>
|
||||
|
||||
<para>
|
||||
The above is an example of how back-end specific properties
|
||||
can be used in the filter and goodness functions. In this
|
||||
example the LVM driver's 'total_volumes' capability is being
|
||||
used to determine which host gets used during a volume
|
||||
request.
|
||||
|
||||
In the above example, lvm-1 and lvm-2 will handle volume
|
||||
requests for all volumes with a size less than 5 GB.
|
||||
The lvm-1 host will have priority until it contains three
|
||||
or more volumes. After than lvm-2 will have priority until
|
||||
it contains eight or more volumes. The lvm-3 will collect
|
||||
all volumes greater or equal to 5 GB as well as all
|
||||
volumes once lvm-1 and lvm-2 lose priority.
|
||||
</para>
|
||||
</simplesect>
|
||||
</section>
|
@ -1,219 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="glusterfs_backend">
|
||||
<title>Configure a GlusterFS back end</title>
|
||||
<para>This section explains how to configure OpenStack Block Storage to use GlusterFS as a back
|
||||
end. You must be able to access the GlusterFS shares from the server that hosts the
|
||||
<systemitem class="service">cinder</systemitem> volume service.</para>
|
||||
<note>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">The <systemitem class="service"
|
||||
>cinder</systemitem> volume service is named
|
||||
<literal>openstack-cinder-volume</literal> on the following distributions:</para>
|
||||
<itemizedlist os="rhel;centos;fedora;opensuse;sles">
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>CentOS</para>
|
||||
</listitem>
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>Fedora</para>
|
||||
</listitem>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>openSUSE</para>
|
||||
</listitem>
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>Red Hat Enterprise Linux</para>
|
||||
</listitem>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>SUSE Linux Enterprise</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>In Ubuntu and Debian distributions, the <systemitem class="service"
|
||||
>cinder</systemitem> volume service is named
|
||||
<literal>cinder-volume</literal>.</para>
|
||||
</note>
|
||||
<para>Mounting GlusterFS volumes requires utilities and libraries from the
|
||||
<package>glusterfs-fuse</package> package. This package must be installed on all systems
|
||||
that will access volumes backed by GlusterFS.</para>
|
||||
<note os="ubuntu;debian">
|
||||
<para>The utilities and libraries required for mounting GlusterFS volumes on Ubuntu and
|
||||
Debian distributions are available from the <package>glusterfs-client</package> package
|
||||
instead.</para>
|
||||
</note>
|
||||
<para>For information on how to install and configure GlusterFS, refer to the <link
|
||||
xlink:href="http://gluster.org/community/documentation/index.php/Main_Page"
|
||||
>GlusterDocumentation</link> page.</para>
|
||||
<procedure>
|
||||
<title>Configure GlusterFS for OpenStack Block Storage</title>
|
||||
<para>The GlusterFS server must also be configured accordingly in order to allow OpenStack
|
||||
Block Storage to use GlusterFS shares:</para>
|
||||
<step>
|
||||
<para>Log in as <systemitem>root</systemitem> to the GlusterFS server.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set each Gluster volume to use the same UID and GID as the
|
||||
<systemitem>cinder</systemitem> user:</para>
|
||||
<screen><prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> storage.owner-uid <replaceable>CINDER_UID</replaceable></userinput>
|
||||
<prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> storage.owner-gid <replaceable>CINDER_GID</replaceable></userinput></screen>
|
||||
<para>Where:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><replaceable>VOL_NAME</replaceable> is the Gluster volume name.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><replaceable>CINDER_UID</replaceable> is the UID of the
|
||||
<systemitem>cinder</systemitem> user.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><replaceable>CINDER_GID</replaceable> is the GID of the
|
||||
<systemitem>cinder</systemitem> user.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<para>The default UID and GID of the <systemitem>cinder</systemitem> user is
|
||||
<literal>165</literal> on most distributions.</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure each Gluster volume to accept <systemitem>libgfapi</systemitem>
|
||||
connections. To do this, set each Gluster volume to allow insecure ports:</para>
|
||||
<screen><prompt>#</prompt> <userinput>gluster volume set <replaceable>VOL_NAME</replaceable> server.allow-insecure on</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Enable client connections from unprivileged ports. To do this, add the following
|
||||
line to <filename>/etc/glusterfs/glusterd.vol</filename>:</para>
|
||||
<programlisting>option rpc-auth-allow-insecure on</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem>glusterd</systemitem> service:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service glusterd restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Configure Block Storage to use a GlusterFS back end</title>
|
||||
<para>After you configure the GlusterFS service, complete these steps:</para>
|
||||
<step>
|
||||
<para>Log in as <systemitem>root</systemitem> to the system hosting the Block Storage
|
||||
service.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a text file named <filename>glusterfs</filename> in
|
||||
<filename>/etc/cinder/</filename>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Add an entry to <filename>/etc/cinder/glusterfs</filename> for each GlusterFS
|
||||
share that OpenStack Block Storage should use for back end storage. Each entry
|
||||
should be a separate line, and should use the following format:</para>
|
||||
<programlisting><replaceable>HOST</replaceable>:/<replaceable>VOL_NAME</replaceable></programlisting>
|
||||
<para>Where:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<replaceable>HOST</replaceable> is the IP address or host name of the Red
|
||||
Hat Storage server.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<replaceable>VOL_NAME</replaceable> is the name an existing and accessible
|
||||
volume on the GlusterFS server.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Optionally, if your environment requires additional mount options for a share, you
|
||||
can add them to the share's entry:</para>
|
||||
<programlisting><replaceable>HOST</replaceable>:/<replaceable>VOL_NAME</replaceable> -o <replaceable>OPTIONS</replaceable></programlisting>
|
||||
<para>Replace <replaceable>OPTIONS</replaceable> with a comma-separated list of mount
|
||||
options.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set <filename>/etc/cinder/glusterfs</filename> to be owned by the
|
||||
<systemitem>root</systemitem> user and the <systemitem>cinder</systemitem>
|
||||
group.</para>
|
||||
<screen><prompt>#</prompt> <userinput>chown root:cinder /etc/cinder/glusterfs</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set <filename>/etc/cinder/glusterfs</filename> to be readable by members of the
|
||||
<systemitem>cinder</systemitem> group:</para>
|
||||
<screen><prompt>#</prompt> <userinput>chmod 0640 <replaceable>FILE</replaceable></userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure OpenStack Block Storage to use the
|
||||
<filename>/etc/cinder/glusterfs</filename> file created earlier. To do so, open
|
||||
the <filename>/etc/cinder/cinder.conf</filename> configuration file and set the
|
||||
<literal>glusterfs_shares_config</literal> configuration key to
|
||||
<filename>/etc/cinder/glusterfs</filename>.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On distributions that include
|
||||
<application>openstack-config</application>, you can configure this by running
|
||||
the following command instead:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT glusterfs_shares_config /etc/cinder/glusterfs</userinput></screen>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">The following distributions include
|
||||
<application>openstack-config</application>:</para>
|
||||
<itemizedlist os="rhel;centos;fedora;opensuse;sles">
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>CentOS</para>
|
||||
</listitem>
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>Fedora</para>
|
||||
</listitem>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>openSUSE</para>
|
||||
</listitem>
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>Red Hat Enterprise Linux</para>
|
||||
</listitem>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>SUSE Linux Enterprise</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure OpenStack Block Storage to use the correct volume driver, namely
|
||||
<literal>cinder.volume.drivers.glusterfs</literal>. To do so, open the
|
||||
<filename>/etc/cinder/cinder.conf</filename> configuration file and set the
|
||||
<literal>volume_driver</literal> configuration key to
|
||||
<literal>cinder.volume.drivers.glusterfs</literal>.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On distributions that include
|
||||
<application>openstack-config</application>, you can configure this by running
|
||||
the following command instead:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT volume_driver cinder.volume.drivers.glusterfs.GlusterfsDriver</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>You can now restart the service to apply the configuration.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">To restart the <systemitem class="service"
|
||||
>cinder</systemitem> volume service on CentOS, Fedora, openSUSE, Red Hat
|
||||
Enterprise Linux, or SUSE Linux Enterprise, run:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>service openstack-cinder-volume restart</userinput></screen>
|
||||
<para os="debian;ubuntu">To restart the <systemitem class="service">cinder</systemitem>
|
||||
volume service on Ubuntu or Debian, run:</para>
|
||||
<screen os="debian;ubuntu"><prompt>#</prompt> <userinput>service cinder-volume restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>OpenStack Block Storage is now configured to use a GlusterFS back end.</para>
|
||||
<note>
|
||||
<para>In <filename>/etc/cinder/cinder.conf</filename>, the
|
||||
<literal>glusterfs_sparsed_volumes</literal> configuration key determines whether
|
||||
volumes are created as sparse files and grown as needed or fully allocated up front. The
|
||||
default and recommended value of this key is <literal>true</literal>, which ensures
|
||||
volumes are initially created as sparse files.</para>
|
||||
<para>Setting <literal>glusterfs_sparsed_volumes</literal> to <literal>false</literal> will
|
||||
result in volumes being fully allocated at the time of creation. This leads to increased
|
||||
delays in volume creation.</para>
|
||||
<para>However, should you choose to set <literal>glusterfs_sparsed_volumes</literal> to
|
||||
<literal>false</literal>, you can do so directly in
|
||||
<filename>/etc/cinder/cinder.conf</filename>.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On distributions that include
|
||||
<application>openstack-config</application>, you can configure this by running the
|
||||
following command instead:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT glusterfs_sparsed_volumes false</userinput></screen>
|
||||
</note>
|
||||
<important>
|
||||
<para>If a client host has SELinux enabled, the <systemitem>virt_use_fusefs</systemitem>
|
||||
Boolean should also be enabled if the host requires access to GlusterFS volumes on an
|
||||
instance. To enable this Boolean, run the following command as the
|
||||
<systemitem>root</systemitem> user:</para>
|
||||
<screen><prompt>#</prompt> <userinput>setsebool -P virt_use_fusefs on</userinput></screen>
|
||||
<para>This command also makes the Boolean persistent across reboots. Run this command on all
|
||||
client hosts that require access to GlusterFS volumes on an instance. This includes all
|
||||
compute nodes.</para>
|
||||
</important>
|
||||
</section>
|
@ -1,36 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_gluster_removal_gracefully">
|
||||
<title>Gracefully remove a GlusterFS volume from usage</title>
|
||||
<para>Configuring the <systemitem>cinder</systemitem> volume
|
||||
service to use GlusterFS involves creating a shares file (for
|
||||
example, <filename>/etc/cinder/glusterfs</filename>). This
|
||||
shares file lists each GlusterFS volume (with its
|
||||
corresponding storage server) that the
|
||||
<systemitem>cinder</systemitem> volume service can use for
|
||||
back end storage.</para>
|
||||
<para>To remove a GlusterFS volume from usage as a back end,
|
||||
delete the volume's corresponding entry from the shares file.
|
||||
After doing so, restart the Block Storage services.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">To restart the
|
||||
Block Storage services on CentOS, Fedora, openSUSE, Red Hat
|
||||
Enterprise Linux, or SUSE Linux Enterprise, run:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>for i in api scheduler volume; do service openstack-cinder-$i restart; done</userinput></screen>
|
||||
<para os="debian;ubuntu">To restart the Block Storage services
|
||||
on Ubuntu or Debian, run:</para>
|
||||
<screen os="debian;ubuntu"><prompt>#</prompt> <userinput>for i in api scheduler volume; do service cinder-${i} restart; done</userinput></screen>
|
||||
<para>Restarting the Block Storage services will prevent
|
||||
the <systemitem>cinder</systemitem> volume service from
|
||||
exporting the deleted GlusterFS volume. This will prevent
|
||||
any instances from mounting the volume from that point
|
||||
onwards.</para>
|
||||
<para>However, the removed GlusterFS volume might still be
|
||||
mounted on an instance at this point. Typically, this is the
|
||||
case when the volume was already mounted while its entry was
|
||||
deleted from the shares file. Whenever this occurs, you
|
||||
will have to unmount the volume as normal after the Block
|
||||
Storage services are restarted.</para>
|
||||
</section>
|
@ -1,37 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="cinder-api-throughput">
|
||||
<title>Increase Block Storage API service throughput</title>
|
||||
<para>By default, the Block Storage API service runs in one process.
|
||||
This limits the number of API requests that the Block Storage
|
||||
service can process at any given time. In a production
|
||||
environment, you should increase the Block Storage API throughput
|
||||
by allowing the Block Storage API service to run in as many
|
||||
processes as the machine capacity allows.</para>
|
||||
<note>
|
||||
<para>The Block Storage API service is named
|
||||
<literal>openstack-cinder-api</literal> on the following
|
||||
distributions: CentOS, Fedora, openSUSE, Red Hat Enterprise
|
||||
Linux, and SUSE Linux Enterprise. In Ubuntu and Debian
|
||||
distributions, the Block Storage API service is named
|
||||
<literal>cinder-api</literal>.</para>
|
||||
</note>
|
||||
<para>To do so, use the Block Storage API service option
|
||||
<option>osapi_volume_workers</option>. This option allows
|
||||
you to specify the number of API service workers (or OS processes)
|
||||
to launch for the Block Storage API service.</para>
|
||||
<para>To configure this option, open the
|
||||
<filename>/etc/cinder/cinder.conf</filename> configuration file
|
||||
and set the <literal>osapi_volume_workers</literal> configuration
|
||||
key to the number of CPU cores/threads on a machine.</para>
|
||||
<para>On distributions that
|
||||
include <application>openstack-config</application>, you can
|
||||
configure this by running the following command instead:</para>
|
||||
<screen><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT osapi_volume_workers <replaceable>CORES</replaceable></userinput></screen>
|
||||
<para>Replace <replaceable>CORES</replaceable> with the number of CPU
|
||||
cores/threads on a machine.</para>
|
||||
</section>
|
@ -1,172 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="multi_backend">
|
||||
<title>Configure multiple-storage back ends</title>
|
||||
<para>When you configure multiple-storage back ends, you can
|
||||
create several back-end storage solutions that serve the same
|
||||
OpenStack Compute configuration and one <systemitem
|
||||
class="service">cinder-volume</systemitem> is launched for each
|
||||
back-end storage or back-end storage pool.</para>
|
||||
<para>In a multiple-storage back-end configuration, each back end has a name
|
||||
(<literal>volume_backend_name</literal>). Several back
|
||||
ends can have the same name. In that case, the scheduler
|
||||
properly decides which back end the volume has to be created
|
||||
in.</para>
|
||||
<para>The name of the back end is declared as an
|
||||
extra-specification of a volume type (such as,
|
||||
<literal>volume_backend_name=LVM_iSCSI</literal>). When a
|
||||
volume is created, the scheduler chooses an appropriate back
|
||||
end to handle the request, according to the volume type
|
||||
specified by the user.</para>
|
||||
<simplesect>
|
||||
<title>Enable multiple-storage back ends</title>
|
||||
<para>To enable a multiple-storage back ends, you must set
|
||||
the <option>enabled_backends</option> flag in the
|
||||
<filename>cinder.conf</filename> file. This flag
|
||||
defines the names (separated by a comma) of the
|
||||
configuration groups for the different back ends: one name
|
||||
is associated to one configuration group for a back end
|
||||
(such as, <literal>[lvmdriver-1]</literal>).</para>
|
||||
<note>
|
||||
<para>The configuration group name is not related to the
|
||||
<literal>volume_backend_name</literal>.</para>
|
||||
</note>
|
||||
<note>
|
||||
<para>After setting the <option>enabled_backends</option> flag
|
||||
on an existing cinder service, and restarting the Block Storage
|
||||
services, the original <literal>host</literal>
|
||||
service is replaced with a new host service. The new service
|
||||
appears with a name like <literal>host@backend</literal>. Use:
|
||||
<screen><prompt>$</prompt> <userinput>cinder-manage volume update_host --currentname <replaceable>CURRENTNAME</replaceable> --newname <replaceable>CURRENTNAME</replaceable>@<replaceable>BACKEND</replaceable></userinput></screen>
|
||||
to convert current block devices to the new hostname.</para>
|
||||
</note>
|
||||
<para>The options for a configuration group must be defined in
|
||||
the group (or default options are used). All the standard
|
||||
Block Storage configuration options
|
||||
(<literal>volume_group</literal>,
|
||||
<literal>volume_driver</literal>, and so on) might be
|
||||
used in a configuration group. Configuration values in the
|
||||
<literal>[DEFAULT]</literal> configuration group are
|
||||
not used.</para>
|
||||
<para>These examples show three back ends:</para>
|
||||
<programlisting language="ini">enabled_backends=lvmdriver-1,lvmdriver-2,lvmdriver-3
|
||||
[lvmdriver-1]
|
||||
volume_group=cinder-volumes-1
|
||||
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name=LVM_iSCSI
|
||||
[lvmdriver-2]
|
||||
volume_group=cinder-volumes-2
|
||||
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name=LVM_iSCSI
|
||||
[lvmdriver-3]
|
||||
volume_group=cinder-volumes-3
|
||||
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name=LVM_iSCSI_b</programlisting>
|
||||
<para>In this configuration, <literal>lvmdriver-1</literal>
|
||||
and <literal>lvmdriver-2</literal> have the same
|
||||
<literal>volume_backend_name</literal>. If a volume
|
||||
creation requests the <literal>LVM_iSCSI</literal> back
|
||||
end name, the scheduler uses the capacity filter scheduler
|
||||
to choose the most suitable driver, which is either
|
||||
<literal>lvmdriver-1</literal> or
|
||||
<literal>lvmdriver-2</literal>. The capacity filter
|
||||
scheduler is enabled by default. The next section provides
|
||||
more information. In addition, this example presents a
|
||||
<literal>lvmdriver-3</literal> back end.</para>
|
||||
<note>
|
||||
<para>For Fiber Channel drivers that support multipath, the configuration group
|
||||
requires the <literal>use_multipath_for_image_xfer=true</literal> option.
|
||||
In the example below, you can see details for HP 3PAR and EMC Fiber Channel
|
||||
drivers.</para>
|
||||
<programlisting language="ini">[3par]
|
||||
use_multipath_for_image_xfer = true
|
||||
volume_driver = cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
|
||||
volume_backend_name = 3parfc
|
||||
|
||||
[emc]
|
||||
use_multipath_for_image_xfer = true
|
||||
volume_driver = cinder.volume.drivers.emc.emc_smis_fc.EMCSMISFCDriver
|
||||
volume_backend_name = emcfc</programlisting>
|
||||
</note>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Configure Block Storage scheduler multi back
|
||||
end</title>
|
||||
<para>You must enable the <option>filter_scheduler</option>
|
||||
option to use multiple-storage back ends. The filter scheduler:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>Filters the available back
|
||||
ends. By default,
|
||||
<literal>AvailabilityZoneFilter</literal>,
|
||||
<literal>CapacityFilter</literal> and
|
||||
<literal>CapabilitiesFilter</literal> are
|
||||
enabled.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Weights the previously
|
||||
filtered back ends. By default, the
|
||||
<option>CapacityWeigher</option> option is
|
||||
enabled. When this option is enabled, the filter
|
||||
scheduler assigns the highest weight to back ends
|
||||
with the most available capacity.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>The scheduler uses filters and weights to pick the best
|
||||
back end to handle the request. The scheduler uses volume
|
||||
types to explicitly create volumes on specific back
|
||||
ends.</para>
|
||||
<!-- TODO: when filter/weighing scheduler documentation will be up, a ref should be added here -->
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Volume type</title>
|
||||
<para>Before using it, a volume type has to be declared to
|
||||
Block Storage. This can be done by the following
|
||||
command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder --os-username admin --os-tenant-name admin type-create lvm</userinput></screen>
|
||||
<para>Then, an extra-specification has to be created to link
|
||||
the volume type to a back end name. Run this
|
||||
command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder --os-username admin --os-tenant-name admin type-key lvm set volume_backend_name=LVM_iSCSI</userinput></screen>
|
||||
<para>This example creates a <literal>lvm</literal> volume
|
||||
type with <literal>volume_backend_name=LVM_iSCSI</literal>
|
||||
as extra-specifications.</para>
|
||||
<para>Create another volume type:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder --os-username admin --os-tenant-name admin type-create lvm_gold</userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>cinder --os-username admin --os-tenant-name admin type-key lvm_gold set volume_backend_name=LVM_iSCSI_b</userinput></screen>
|
||||
<para>This second volume type is named
|
||||
<literal>lvm_gold</literal> and has
|
||||
<literal>LVM_iSCSI_b</literal> as back end
|
||||
name.</para>
|
||||
<note>
|
||||
<para>To list the extra-specifications, use this
|
||||
command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder --os-username admin --os-tenant-name admin extra-specs-list</userinput></screen>
|
||||
</note>
|
||||
<note>
|
||||
<para>If a volume type points to a
|
||||
<literal>volume_backend_name</literal> that does
|
||||
not exist in the Block Storage configuration, the
|
||||
<literal>filter_scheduler</literal> returns an
|
||||
error that it cannot find a valid host with the
|
||||
suitable back end.</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Usage</title>
|
||||
<para>When you create a volume, you must specify the volume
|
||||
type. The extra-specifications of the volume type are used
|
||||
to determine which back end has to be used.
|
||||
<screen><prompt>$</prompt> <userinput>cinder create --volume_type lvm --display_name test_multi_backend 1</userinput></screen>
|
||||
Considering the <literal>cinder.conf</literal> described
|
||||
previously, the scheduler creates this volume on
|
||||
<literal>lvmdriver-1</literal> or
|
||||
<literal>lvmdriver-2</literal>.</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder create --volume_type lvm_gold --display_name test_multi_backend 1</userinput></screen>
|
||||
<para>This second volume is created on
|
||||
<literal>lvmdriver-3</literal>.</para>
|
||||
</simplesect>
|
||||
</section>
|
@ -1,214 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="nfs_backend">
|
||||
<title>Configure an NFS storage back end</title>
|
||||
<para>This section explains how to configure OpenStack Block
|
||||
Storage to use NFS storage. You must be able to access the NFS
|
||||
shares from the server that hosts the
|
||||
<systemitem class="service">cinder</systemitem> volume service.
|
||||
</para>
|
||||
<note>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">The
|
||||
<systemitem class="service">cinder</systemitem> volume service
|
||||
is named <literal>openstack-cinder-volume</literal> on the
|
||||
following distributions:</para>
|
||||
<itemizedlist os="rhel;centos;fedora;opensuse;sles">
|
||||
<listitem os="rhel;centos;fedora"><para>CentOS</para></listitem>
|
||||
<listitem os="rhel;centos;fedora"><para>Fedora</para></listitem>
|
||||
<listitem os="opensuse;sles"><para>openSUSE</para></listitem>
|
||||
<listitem os="rhel;centos;fedora"><para>Red Hat Enterprise
|
||||
Linux</para></listitem>
|
||||
<listitem os="opensuse;sles"><para>SUSE Linux Enterprise
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
<para>In Ubuntu and Debian distributions, the
|
||||
<systemitem class="service">cinder</systemitem> volume
|
||||
service is named <literal>cinder-volume</literal>.</para>
|
||||
</note>
|
||||
<procedure>
|
||||
<title>Configure Block Storage to use an NFS storage back end</title>
|
||||
<step>
|
||||
<para>Log in as <systemitem>root</systemitem> to the system
|
||||
hosting the <systemitem>cinder</systemitem> volume
|
||||
service.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Create a text file named <filename>nfsshares</filename> in
|
||||
<filename>/etc/cinder/</filename>.
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Add an entry to <filename>/etc/cinder/nfsshares</filename>
|
||||
for each NFS share that the
|
||||
<systemitem class="service">cinder</systemitem>
|
||||
volume service should use for back end storage. Each entry
|
||||
should be a separate line, and should use the following
|
||||
format:
|
||||
</para>
|
||||
<programlisting><replaceable>HOST</replaceable>:<replaceable>SHARE</replaceable></programlisting>
|
||||
<para>Where:</para>
|
||||
<itemizedlist>
|
||||
<listitem><para><replaceable>HOST</replaceable> is the IP
|
||||
address or host name of the NFS server.</para></listitem>
|
||||
<listitem><para><replaceable>SHARE</replaceable> is the
|
||||
absolute path to an existing and accessible NFS share.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Set <filename>/etc/cinder/nfsshares</filename> to be
|
||||
owned by the <systemitem>root</systemitem> user and the
|
||||
<systemitem>cinder</systemitem> group:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>chown root:cinder /etc/cinder/nfsshares</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Set <filename>/etc/cinder/nfsshares</filename> to be
|
||||
readable by members of the <systemitem>cinder</systemitem>
|
||||
group:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>chmod 0640 /etc/cinder/nfsshares</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure the
|
||||
<systemitem class="service">cinder</systemitem>
|
||||
volume service to use the
|
||||
<filename>/etc/cinder/nfsshares</filename> file created
|
||||
earlier. To do so, open the
|
||||
<filename>/etc/cinder/cinder.conf</filename> configuration
|
||||
file and set the
|
||||
<literal>nfs_shares_config</literal> configuration key to
|
||||
<filename>/etc/cinder/nfsshares</filename>.
|
||||
</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On
|
||||
distributions that include
|
||||
<application>openstack-config</application>, you can
|
||||
configure this by running the following command instead:
|
||||
</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT nfs_shares_config /etc/cinder/nfsshares</userinput></screen>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">The following
|
||||
distributions include
|
||||
<application>openstack-config</application>:</para>
|
||||
<itemizedlist os="rhel;centos;fedora;opensuse;sles">
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>CentOS</para>
|
||||
</listitem>
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>Fedora</para>
|
||||
</listitem>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>openSUSE</para>
|
||||
</listitem>
|
||||
<listitem os="rhel;centos;fedora">
|
||||
<para>Red Hat Enterprise Linux</para>
|
||||
</listitem>
|
||||
<listitem os="opensuse;sles">
|
||||
<para>SUSE Linux Enterprise</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>
|
||||
Optionally, provide any additional NFS mount options
|
||||
required in your environment in the
|
||||
<literal>nfs_mount_options</literal> configuration key of
|
||||
<filename>/etc/cinder/cinder.conf</filename>.
|
||||
If your NFS shares do not require any additional mount
|
||||
options (or if you are unsure), skip this step.
|
||||
</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On
|
||||
distributions that include
|
||||
<application>openstack-config</application>, you can
|
||||
configure this by running the following command instead:
|
||||
</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT nfs_mount_options <replaceable>OPTIONS</replaceable></userinput></screen>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">
|
||||
Replace <replaceable>OPTIONS</replaceable> with the mount
|
||||
options to be used when accessing NFS shares. See the
|
||||
manual page for NFS for more information on available
|
||||
mount options (<command>man nfs</command>).
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure the
|
||||
<systemitem class="service">cinder</systemitem>
|
||||
volume service to use the correct volume driver, namely
|
||||
<literal>cinder.volume.drivers.nfs.NfsDriver</literal>. To
|
||||
do so, open the
|
||||
<filename>/etc/cinder/cinder.conf</filename>
|
||||
configuration file and set the
|
||||
<literal>volume_driver</literal> configuration key to
|
||||
<literal>cinder.volume.drivers.nfs.NfsDriver</literal>.
|
||||
</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On
|
||||
distributions that include
|
||||
<application>openstack-config</application>, you can
|
||||
configure this by running the following command instead:
|
||||
</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT volume_driver cinder.volume.drivers.nfs.NfsDriver</userinput></screen>
|
||||
</step>
|
||||
<step><para>
|
||||
You can now restart the service to apply the configuration.
|
||||
</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">To restart the
|
||||
<systemitem class="service">cinder</systemitem> volume service
|
||||
on CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, or SUSE
|
||||
Linux Enterprise, run:</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>service openstack-cinder-volume restart</userinput></screen>
|
||||
<para os="debian;ubuntu">To restart the
|
||||
<systemitem>cinder</systemitem> volume service on Ubuntu or
|
||||
Debian, run:</para>
|
||||
<screen os="debian;ubuntu"><prompt>#</prompt> <userinput>service cinder-volume restart</userinput></screen></step>
|
||||
</procedure>
|
||||
<note>
|
||||
<para>
|
||||
The <literal>nfs_sparsed_volumes</literal> configuration
|
||||
key determines whether volumes are created as sparse files
|
||||
and grown as needed or fully allocated up front. The
|
||||
default and recommended value is <literal>true</literal>,
|
||||
which ensures volumes are initially created as sparse
|
||||
files.
|
||||
</para>
|
||||
<para>
|
||||
Setting <literal>nfs_sparsed_volumes</literal> to
|
||||
<literal>false</literal> will result in volumes
|
||||
being fully allocated at the time of creation. This
|
||||
leads to increased delays in volume creation.
|
||||
</para>
|
||||
<para>
|
||||
However, should you choose to set
|
||||
<literal>nfs_sparsed_volumes</literal> to
|
||||
<literal>false</literal>, you can do so directly in
|
||||
<filename>/etc/cinder/cinder.conf</filename>.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On
|
||||
distributions that include
|
||||
<application>openstack-config</application>, you can
|
||||
configure this by running the following command instead:
|
||||
</para>
|
||||
<screen os="rhel;centos;fedora;opensuse;sles"><prompt>#</prompt> <userinput>openstack-config --set /etc/cinder/cinder.conf \
|
||||
DEFAULT nfs_sparsed_volumes false</userinput></screen>
|
||||
</note>
|
||||
<important>
|
||||
<para>If a client host has SELinux enabled, the
|
||||
<systemitem>virt_use_nfs</systemitem> Boolean should also be
|
||||
enabled if the host requires access to NFS volumes on an
|
||||
instance. To enable this Boolean, run the following command
|
||||
as the <systemitem>root</systemitem> user:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>setsebool -P virt_use_nfs on</userinput></screen>
|
||||
<para>This command also makes the Boolean persistent across
|
||||
reboots. Run this command on all client hosts that require
|
||||
access to NFS volumes on an instance. This includes all Compute
|
||||
nodes.</para>
|
||||
</important>
|
||||
</section>
|
@ -1,105 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="over_subscription">
|
||||
<title>Oversubscription in thin provisioning</title>
|
||||
<para>OpenStack Block Storage enables you to choose a volume back
|
||||
end based on virtual capacities for thin provisioning using the
|
||||
oversubscription ratio.
|
||||
</para>
|
||||
<para>A reference implementation is provided for the default LVM driver.
|
||||
The illustration below uses the LVM driver as an example.
|
||||
</para>
|
||||
<simplesect><title>Configure oversubscription settings</title>
|
||||
<para>To support oversubscription in thin provisioning, a flag
|
||||
<option>max_over_subscription_ratio</option> is introduced into
|
||||
<filename>cinder.conf</filename>. This is a float representation
|
||||
of the oversubscription ratio when thin provisioning is involved.
|
||||
Default ratio is 20.0, meaning provisioned capacity can be 20
|
||||
times of the total physical capacity. A ratio of 10.5 means
|
||||
provisioned capacity can be 10.5 times of the total physical
|
||||
capacity. A ratio of 1.0 means provisioned capacity cannot exceed
|
||||
the total physical capacity. A ratio lower than 1.0 is
|
||||
ignored and the default value is used instead.
|
||||
</para>
|
||||
<note><para><option>max_over_subscription_ratio</option> can be
|
||||
configured for each back end when multiple-storage back ends are
|
||||
enabled. It is provided as a reference implementation and is used
|
||||
by the LVM driver. However, it is not a requirement for a driver
|
||||
to use this option from <filename>cinder.conf</filename>.
|
||||
<option>max_over_subscription_ratio</option> is for configuring
|
||||
a back end. For a driver that supports multiple pools per back
|
||||
end, it can report this ratio for each pool. The LVM driver does
|
||||
not support multiple pools.
|
||||
</para></note>
|
||||
<para>The existing <option>reserved_percentage</option> flag is used
|
||||
to prevent over provisioning. This flag represents the percentage
|
||||
of the back-end capacity that is reserved.</para>
|
||||
<note><para>There is a change on how <option>reserved_percentage
|
||||
</option> is used. It was measured against the free capacity in
|
||||
the past. Now it is measured against the total capacity.
|
||||
</para></note>
|
||||
</simplesect>
|
||||
<simplesect><title>Capabilities</title>
|
||||
<para>Drivers can report the following capabilities for a back end or
|
||||
a pool:
|
||||
</para>
|
||||
<programlisting language="ini">thin_provisioning_support=True(or False)
|
||||
thick_provisioning_support=True(or False)
|
||||
provisioned_capacity_gb=<replaceable>PROVISIONED_CAPACITY</replaceable>
|
||||
max_over_subscription_ratio=<replaceable>MAX_RATIO</replaceable></programlisting>
|
||||
<para>Where <replaceable>PROVISIONED_CAPACITY</replaceable> is the
|
||||
apparent allocated space indicating how much capacity has been
|
||||
provisioned and <replaceable>MAX_RATIO</replaceable> is the
|
||||
maximum oversubscription ratio. For the LVM driver, it is
|
||||
<option>max_over_subscription_ratio</option> in <filename>
|
||||
cinder.conf</filename>.
|
||||
</para>
|
||||
<para>Two capabilities are added here to allow a back end or pool to
|
||||
claim support for thin provisioning, or thick provisioning,
|
||||
or both.
|
||||
</para>
|
||||
<para>The LVM driver reports <option>thin_provisioning_support=True
|
||||
</option> and <option>thick_provisioning_support=False</option>
|
||||
if the <option>lvm_type</option> flag in <filename>cinder.conf
|
||||
</filename> is <literal>thin</literal>. Otherwise it reports
|
||||
<option>thin_provisioning_support=False</option> and <option>
|
||||
thick_provisioning_support=True</option>.
|
||||
</para>
|
||||
</simplesect>
|
||||
<simplesect><title>Volume type extra specs</title>
|
||||
<para>If volume type is provided as part of the volume creation
|
||||
request, it can have the following extra specs defined:</para>
|
||||
<programlisting language="ini">'capabilities:thin_provisioning_support': '<is> True' or '<is> False'
|
||||
'capabilities:thick_provisioning_support': '<is> True' or '<is> False'</programlisting>
|
||||
<note><para><literal>capabilities</literal> scope key before
|
||||
<literal>thin_provisioning_support</literal> and <literal>
|
||||
thick_provisioning_support</literal> is not required. So the
|
||||
following works too:</para></note>
|
||||
<programlisting language="ini">'thin_provisioning_support': '<is> True' or '<is> False'
|
||||
'thick_provisioning_support': '<is> True' or '<is> False'</programlisting>
|
||||
<para>The above extra specs are used by the scheduler to find a back
|
||||
end that supports thin provisioning, thick provisioning, or both to
|
||||
match the needs of a specific volume type.
|
||||
</para>
|
||||
</simplesect>
|
||||
<simplesect><title>Capacity filter</title>
|
||||
<para>In the capacity filter, <literal>max_over_subscription_ratio
|
||||
</literal> is used when choosing a back end if <literal>
|
||||
thin_provisioning_support</literal> is True and <option>
|
||||
max_over_subscription_ratio</option> is greater than 1.0.
|
||||
</para>
|
||||
</simplesect>
|
||||
<simplesect><title>Capacity weigher</title>
|
||||
<para>In the capacity weigher, virtual free capacity is used for
|
||||
ranking if <literal>thin_provisioning_support</literal> is True.
|
||||
Otherwise, real free capacity will be used as before.
|
||||
</para>
|
||||
</simplesect>
|
||||
</section>
|
@ -1,47 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ratelimit-volume-copy-bandwidth">
|
||||
<title>Rate-limit volume copy bandwidth</title>
|
||||
<para>When you create a new volume from an image or an existing
|
||||
volume, or when you upload a volume image to the Image Service,
|
||||
large data copy may stress disk and network bandwidth.
|
||||
To mitigate slow down of data access from the instances,
|
||||
OpenStack Block Storage supports rate-limiting of volume data
|
||||
copy bandwidth.</para>
|
||||
<simplesect>
|
||||
<title>Configure volume copy bandwidth limit</title>
|
||||
<para>To configure the volume copy bandwidth limit, set the
|
||||
<option>volume_copy_bps_limit</option> option in the
|
||||
configuration groups for each back end in the
|
||||
<filename>cinder.conf</filename> file. This option takes
|
||||
the integer of maximum bandwidth allowed for volume data
|
||||
copy in byte per second. If this option is set to
|
||||
<literal>0</literal>, the rate-limit is disabled.</para>
|
||||
<para>While multiple volume data copy operations are running
|
||||
in the same back end, the specified bandwidth is divided to
|
||||
each copy.</para>
|
||||
<para>Example <filename>cinder.conf</filename> configuration file
|
||||
to limit volume copy bandwidth of <literal>lvmdriver-1</literal>
|
||||
up to 100 MiB/s:</para>
|
||||
<programlisting language="ini">[lvmdriver-1]
|
||||
volume_group=cinder-volumes-1
|
||||
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name=LVM_iSCSI
|
||||
volume_copy_bps_limit=104857600</programlisting>
|
||||
<note>
|
||||
<para>This feature requires libcgroup to set up blkio cgroup
|
||||
for disk I/O bandwidth limit. The libcgroup is provided
|
||||
by the <package>cgroup-bin</package> package in Debian
|
||||
and Ubuntu, or by the <package>libcgroup-tools</package>
|
||||
package in Fedora, Red Hat Enterprise Linux, CentOS, openSUSE, and
|
||||
SUSE Linux Enterprise.</para>
|
||||
</note>
|
||||
<note>
|
||||
<para>Some back ends which use remote file systems such as NFS
|
||||
are not supported by this feature.</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
</section>
|
@ -1,51 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_HTTP_bad_req_in_cinder_vol_log">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Failed to attach volume after detaching</title>
|
||||
<section
|
||||
xml:id="section_ts_HTTP_bad_req_in_cinder_vol_log_problem">
|
||||
<title>Problem</title>
|
||||
<para>These errors appear in the
|
||||
<filename>cinder-volume.log</filename> file.</para>
|
||||
<screen><?db-font-size 75%?><computeroutput>2013-05-03 15:16:33 INFO [cinder.volume.manager] Updating volume status
|
||||
2013-05-03 15:16:33 DEBUG [hp3parclient.http]
|
||||
REQ: curl -i https://10.10.22.241:8080/api/v1/cpgs -X GET -H "X-Hp3Par-Wsapi-Sessionkey: 48dc-b69ed2e5
|
||||
f259c58e26df9a4c85df110c-8d1e8451" -H "Accept: application/json" -H "User-Agent: python-3parclient"
|
||||
|
||||
2013-05-03 15:16:33 DEBUG [hp3parclient.http] RESP:{'content-length': 311, 'content-type': 'text/plain',
|
||||
'status': '400'}
|
||||
|
||||
2013-05-03 15:16:33 DEBUG [hp3parclient.http] RESP BODY:Second simultaneous read on fileno 13 detected.
|
||||
Unless you really know what you're doing, make sure that only one greenthread can read any particular socket.
|
||||
Consider using a pools.Pool. If you do know what you're doing and want to disable this error,
|
||||
call eventlet.debug.hub_multiple_reader_prevention(False)
|
||||
|
||||
2013-05-03 15:16:33 ERROR [cinder.manager] Error during VolumeManager._report_driver_status: Bad request (HTTP 400)
|
||||
Traceback (most recent call last):
|
||||
File "/usr/lib/python2.7/dist-packages/cinder/manager.py", line 167, in periodic_tasks task(self, context)
|
||||
File "/usr/lib/python2.7/dist-packages/cinder/volume/manager.py", line 690, in _report_driver_status volume_stats =
|
||||
self.driver.get_volume_stats(refresh=True)
|
||||
File "/usr/lib/python2.7/dist-packages/cinder/volume/drivers/san/hp/hp_3par_fc.py", line 77, in get_volume_stats stats =
|
||||
self.common.get_volume_stats(refresh, self.client)
|
||||
File "/usr/lib/python2.7/dist-packages/cinder/volume/drivers/san/hp/hp_3par_common.py", line 421, in get_volume_stats cpg =
|
||||
client.getCPG(self.config.hp3par_cpg)
|
||||
File "/usr/lib/python2.7/dist-packages/hp3parclient/client.py", line 231, in getCPG cpgs = self.getCPGs()
|
||||
File "/usr/lib/python2.7/dist-packages/hp3parclient/client.py", line 217, in getCPGs response, body = self.http.get('/cpgs')
|
||||
File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 255, in get return self._cs_request(url, 'GET', **kwargs)
|
||||
File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 224, in _cs_request **kwargs)
|
||||
File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 198, in _time_request resp, body = self.request(url, method, **kwargs)
|
||||
File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 192, in request raise exceptions.from_response(resp, body)
|
||||
HTTPBadRequest: Bad request (HTTP 400)</computeroutput></screen>
|
||||
</section>
|
||||
<section
|
||||
xml:id="section_ts_HTTP_bad_req_in_cinder_vol_log_solution">
|
||||
<title>Solution</title>
|
||||
<para>You need to update your copy of the
|
||||
<filename>hp_3par_fc.py</filename> driver which
|
||||
contains the synchronization code.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,189 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_cinder_config">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title xml:id="ts_block_config">Troubleshoot the Block Storage
|
||||
configuration</title>
|
||||
<para>Most Block Storage errors are caused by incorrect volume
|
||||
configurations that result in volume creation failures. To resolve
|
||||
these failures, review these logs:</para>
|
||||
<itemizedlist>
|
||||
<listitem><para><systemitem class="service">cinder-api</systemitem>
|
||||
log (<filename>/var/log/cinder/api.log</filename>)</para></listitem>
|
||||
<listitem><para><systemitem class="service">cinder-volume</systemitem>
|
||||
log (<filename>/var/log/cinder/volume.log</filename>)</para></listitem>
|
||||
</itemizedlist>
|
||||
<para>The <systemitem class="service">cinder-api</systemitem> log
|
||||
is useful for determining if you have endpoint or connectivity
|
||||
issues. If you send a request to create a volume and it fails,
|
||||
review the <systemitem class="service" >cinder-api</systemitem>
|
||||
log to determine whether the request made it to the Block Storage
|
||||
service. If the request is logged and you see no errors or
|
||||
trace-backs, check the
|
||||
<systemitem class="service">cinder-volume</systemitem> log for
|
||||
errors or trace-backs.</para>
|
||||
<note>
|
||||
<para>Create commands are listed in the <systemitem
|
||||
class="service">cinder-api</systemitem> log.</para>
|
||||
</note>
|
||||
<para>These entries in the
|
||||
<filename>cinder.openstack.common.log</filename> file can
|
||||
be used to assist in troubleshooting your block storage
|
||||
configuration.</para>
|
||||
<programlisting language="ini">
|
||||
# Print debugging output (set logging level to DEBUG instead
|
||||
# of default WARNING level). (boolean value)
|
||||
#debug=false
|
||||
|
||||
# Print more verbose output (set logging level to INFO instead
|
||||
# of default WARNING level). (boolean value)
|
||||
#verbose=false
|
||||
|
||||
# Log output to standard error (boolean value)
|
||||
#use_stderr=true
|
||||
|
||||
# Default file mode used when creating log files (string
|
||||
# value)
|
||||
#logfile_mode=0644
|
||||
|
||||
# format string to use for log messages with context (string
|
||||
# value)
|
||||
#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
|
||||
|
||||
# format string to use for log mes #logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
|
||||
|
||||
# data to append to log format when level is DEBUG (string
|
||||
# value)
|
||||
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
|
||||
|
||||
# prefix each line of exception output with this format
|
||||
# (string value)
|
||||
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
|
||||
|
||||
# list of logger=LEVEL pairs (list value)
|
||||
#default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARNsages without context
|
||||
# (string value)
|
||||
|
||||
# If an instance is passed with the log message, format it
|
||||
# like this (string value)
|
||||
#instance_format="[instance: %(uuid)s]"
|
||||
|
||||
# If an instance UUID is passed with the log message, format
|
||||
# it like this (string value)
|
||||
# A logging.Formatter log message format string which may use
|
||||
# any of the available logging.LogRecord attributes. Default:
|
||||
# %(default)s (string value)
|
||||
#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
|
||||
|
||||
# Format string for %%(asctime)s in log records. Default:
|
||||
# %(default)s (string value)
|
||||
#log_date_format=%Y-%m-%d %H:%M:%S
|
||||
|
||||
# (Optional) Name of log file to output to. If not set,
|
||||
# logging will go to stdout. (string value)
|
||||
#log_file=<None>
|
||||
|
||||
# (Optional) The directory to keep log files in (will be
|
||||
# prepended to --log-file) (string value)
|
||||
#log_dir=<None>
|
||||
#instance_uuid_format="[instance: %(uuid)s]"
|
||||
|
||||
# If this option is specified, the logging configuration file
|
||||
# specified is used and overrides any other logging options
|
||||
# specified. Please see the Python logging module
|
||||
# documentation for details on logging configuration files.
|
||||
# (string value) # Use syslog for logging. (boolean value)
|
||||
#use_syslog=false
|
||||
|
||||
# syslog facility to receive log lines (string value)
|
||||
#syslog_log_facility=LOG_USER
|
||||
#log_config=<None></programlisting>
|
||||
<para>These common issues might occur during configuration. To
|
||||
correct, use these suggested solutions.</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Issues with <literal>state_path</literal> and
|
||||
<literal>volumes_dir</literal> settings.</para>
|
||||
<para>The OpenStack Block Storage uses <command>tgtd</command>
|
||||
as the default iSCSI helper and implements persistent targets.
|
||||
This means that in the case of a tgt restart or even a
|
||||
node reboot your existing volumes on that node will be
|
||||
restored automatically with their original IQN.</para>
|
||||
<para>In order to make this possible the iSCSI target
|
||||
information needs to be stored in a file on creation
|
||||
that can be queried in case of restart of the tgt
|
||||
daemon. By default, Block Storage uses a
|
||||
<literal>state_path</literal> variable, which if
|
||||
installing with Yum or APT should be set to
|
||||
<filename>/var/lib/cinder/</filename>. The next
|
||||
part is the <literal>volumes_dir</literal> variable,
|
||||
by default this just simply appends a
|
||||
"<literal>volumes</literal>" directory to the
|
||||
<literal>state_path</literal>. The result is a
|
||||
file-tree
|
||||
<filename>/var/lib/cinder/volumes/</filename>.</para>
|
||||
<para>While the installer should handle all this,
|
||||
it can go wrong. If you have trouble creating volumes
|
||||
and this directory does not exist you should see an
|
||||
error message in the <systemitem class="service"
|
||||
>cinder-volume</systemitem> log indicating that
|
||||
the <literal>volumes_dir</literal> does not exist, and
|
||||
it should provide information about which path it was
|
||||
looking for.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The persistent tgt include file.</para>
|
||||
<para>Along with the <option>volumes_dir</option> option,
|
||||
the iSCSI target driver also needs to be configured to
|
||||
look in the correct place for the persist files. This
|
||||
is a simple entry in the
|
||||
<filename>/etc/tgt/conf.d</filename> file that you
|
||||
should have set when you installed OpenStack. If
|
||||
issues occur, verify that you have a
|
||||
<filename>/etc/tgt/conf.d/cinder.conf</filename>
|
||||
file.</para>
|
||||
<para>If the file is not present, create it with this
|
||||
command:</para>
|
||||
<screen><prompt>#</prompt> <userinput>echo 'include /var/lib/cinder/volumes/ *' >> /etc/tgt/conf.d/cinder.conf</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>No sign of attach call in the <systemitem
|
||||
class="service">cinder-api</systemitem>
|
||||
log.</para>
|
||||
<para>This is most likely going to be a minor adjustment
|
||||
to your <filename>nova.conf</filename> file. Make sure
|
||||
that your <filename>nova.conf</filename> has this
|
||||
entry:</para>
|
||||
<programlisting language="ini">volume_api_class=nova.volume.cinder.API</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Failed to create iscsi target error in the
|
||||
<filename>cinder-volume.log</filename>
|
||||
file.</para>
|
||||
<programlisting language="bash">2013-03-12 01:35:43 1248 TRACE cinder.openstack.common.rpc.amqp ISCSITargetCreateFailed: Failed to create iscsi target for volume volume-137641b2-af72-4a2f-b243-65fdccd38780.</programlisting>
|
||||
<para>You might see this error in
|
||||
<filename>cinder-volume.log</filename> after
|
||||
trying to create a volume that is 1 GB. To fix this
|
||||
issue:</para>
|
||||
<para>Change content of the
|
||||
<filename>/etc/tgt/targets.conf</filename> from
|
||||
<literal>include /etc/tgt/conf.d/*.conf</literal>
|
||||
to <literal>include
|
||||
/etc/tgt/conf.d/cinder_tgt.conf</literal>, as
|
||||
follows:</para>
|
||||
<programlisting language="bash">include /etc/tgt/conf.d/cinder_tgt.conf
|
||||
include /etc/tgt/conf.d/cinder.conf
|
||||
default-driver iscsi</programlisting>
|
||||
<para>Restart <systemitem class="service">tgt</systemitem>
|
||||
and <systemitem class="service">cinder-*</systemitem>
|
||||
services so they pick up the new configuration.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
@ -1,24 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_duplicate_3PAR_host">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Duplicate 3PAR host</title>
|
||||
<section xml:id="section_ts_duplicate_3PAR_host_problem">
|
||||
<title>Problem</title>
|
||||
<para>This error may be caused by a volume being exported outside of OpenStack using a
|
||||
host name different from the system name that OpenStack expects. This error could be displayed with the IQN if the host was exported using iSCSI.</para>
|
||||
<programlisting>Duplicate3PARHost: 3PAR Host already exists: Host wwn 50014380242B9750 already used by host cld4b5ubuntuW(id = 68. The hostname must be called 'cld4b5ubuntu'.</programlisting>
|
||||
</section>
|
||||
<section xml:id="section_ts_duplicate_3PAR_host_solution">
|
||||
<title>Solution</title>
|
||||
<para>Change the 3PAR host name to match the one that OpenStack expects. The 3PAR host
|
||||
constructed by the driver uses just the local hostname, not the fully qualified domain
|
||||
name (FQDN) of the compute host. For example, if the FQDN was
|
||||
<emphasis>myhost.example.com</emphasis>, just <emphasis>myhost</emphasis> would be
|
||||
used as the 3PAR hostname. IP addresses are not allowed as host names on the 3PAR
|
||||
storage server.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,191 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ts_eql_vol_size">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Addressing discrepancies in reported volume sizes for EqualLogic storage</title>
|
||||
<section xml:id="ts_eql_vol_size_problem">
|
||||
<title>Problem</title>
|
||||
<para>There is a discrepancy between both the actual volume size in EqualLogic (EQL)
|
||||
storage and the image size in the Image service, with what is reported
|
||||
OpenStack database. This could lead to confusion if a user is creating
|
||||
volumes from an image that was uploaded from an EQL volume (through the
|
||||
Image service). The image size is slightly larger than the target volume
|
||||
size; this is because EQL size reporting accounts for additional storage
|
||||
used by EQL for internal volume metadata.</para>
|
||||
<para>To reproduce the issue follow the steps in the following procedure.</para>
|
||||
<para>This procedure assumes that the EQL array is provisioned, and that
|
||||
appropriate configuration settings have been included in
|
||||
<filename>/etc/cinder/cinder.conf</filename> to connect to the EQL array.</para>
|
||||
<procedure>
|
||||
<step><para>Create a new volume. Note the ID and size of the volume. In the
|
||||
following example, the ID and size are <literal>74cf9c04-4543-47ae-a937-a9b7c6c921e7</literal>
|
||||
and <literal>1</literal>, respectively:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder create --display-name volume1 1</userinput>
|
||||
<computeroutput>+-----------------------+-------------------------------------------+
|
||||
| Property | Value |
|
||||
+-----------------------+-------------------------------------------+
|
||||
| attachments | [] |
|
||||
| availability zone | nova |
|
||||
| bootable | false |
|
||||
| created_at | 2014-03-21T18:31:54.248775 |
|
||||
| display_description | None |
|
||||
| display_name | volume1 |
|
||||
| id | 74cf9c04-4543-47ae-a937-a9b7c6c921e7 |
|
||||
| metadata | {} |
|
||||
| size | 1 |
|
||||
| snapshot_id | None |
|
||||
| source volid | None |
|
||||
| status | creating |
|
||||
| volume type | None |
|
||||
+-------------------------------+-----------------------------------+</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Verify the volume size on the EQL array by using its command-line interface.</para>
|
||||
<para>The actual size (<literal>VolReserve</literal>) is 1.01 GB. The <application>EQL Group
|
||||
Manager</application> should also report a volume size of 1.01 GB.</para>
|
||||
<screen><prompt>eql></prompt> <userinput>volume select volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7</userinput>
|
||||
<prompt>eql (volume_volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7)></prompt> <userinput>show</userinput>
|
||||
<computeroutput>_______________________________ Volume Information ________________________________
|
||||
Name: volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7
|
||||
Size: 1GB
|
||||
VolReserve: 1.01GB
|
||||
VolReservelnUse: 0MB
|
||||
ReplReservelnUse: 0MB
|
||||
iSCSI Alias: volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7
|
||||
iSCSI Name: iqn.2001-05.com.equallogic:0-8a0906-19f91850c-067000000b4532cl-volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7
|
||||
ActualMembers: 1
|
||||
Snap-Warn: 10%
|
||||
Snap-Depletion: delete-oldest
|
||||
Description:
|
||||
Snap-Reserve: 100%
|
||||
Snap-Reserve-Avail: 100% (1.01GB)
|
||||
Permission: read-write
|
||||
DesiredStatus: online
|
||||
Status: online
|
||||
Connections: O
|
||||
Snapshots: O
|
||||
Bind:
|
||||
Type: not-replicated
|
||||
ReplicationReserveSpace: 0MB</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a new image from this volume:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder upload-to-image --disk-format raw \
|
||||
--container-format bare volume1 image_from_volume1</userinput>
|
||||
<computeroutput>+---------------------+---------------------------------------+
|
||||
| Property | Value |
|
||||
+---------------------+---------------------------------------+
|
||||
| container_format | bare |
|
||||
| disk_format | raw |
|
||||
| display_description | None |
|
||||
| id | 74cf9c04-4543-47ae-a937-a9b7c6c921e7 |
|
||||
| image_id | 3020a21d-ba37-4495-8899-07fc201161b9 |
|
||||
| image_name | image_from_volume1 |
|
||||
| size | 1 |
|
||||
| status | uploading |
|
||||
| updated_at | 2014-03-21T18:31:55.000000 |
|
||||
| volume_type | None |
|
||||
+---------------------+---------------------------------------+</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>When you uploaded the volume in the previous step, the Image service
|
||||
reported the volume's size as <literal>1</literal> (GB). However, when
|
||||
using <command>glance image-list</command> to list the image, the
|
||||
displayed size is 1085276160 bytes, or roughly 1.01 GB:</para>
|
||||
<table frame="all">
|
||||
<title>Image settings reported by <command>glance image-list</command> for image ID</title>
|
||||
<tgroup align="left" cols="5" colsep="1" rowsep="1">
|
||||
<colspec colname="c1" colwidth="30%"/>
|
||||
<colspec colname="c2" colwidth="15%"/>
|
||||
<colspec colname="c3" colwidth="15%"/>
|
||||
<colspec colname="c4" colwidth="20%"/>
|
||||
<colspec colname="c5" colwidth="20%"/>
|
||||
<thead>
|
||||
<row><entry>Name</entry><entry>Disk Format</entry><entry>Container
|
||||
Format</entry><entry>Size</entry><entry>Status</entry></row></thead>
|
||||
<tbody>
|
||||
<row><entry>image_from_volume1</entry><entry>raw</entry>
|
||||
<entry>bare</entry><entry><emphasis>1085276160</emphasis></entry><entry>active</entry></row>
|
||||
</tbody>
|
||||
</tgroup>
|
||||
</table>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a new volume using the previous image (<literal>image_id
|
||||
3020a21d-ba37-4495-8899-07fc201161b9</literal> in this example) as
|
||||
the source. Set the target volume size to 1 GB; this is the size reported
|
||||
by the <command>cinder</command> tool when you uploaded the volume to
|
||||
the Image service:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder create --display-name volume2 \
|
||||
--image-id 3020a21d-ba37-4495-8899-07fc201161b9 1</userinput>
|
||||
<computeroutput>ERROR: Invalid input received: Size of specified image 2 is larger
|
||||
than volume size 1. (HTTP 400) (Request-ID: req-4b9369c0-dec5-4e16-a114-c0cdl6bSd210)</computeroutput></screen>
|
||||
<para>The attempt to create a new volume based on the size reported by the
|
||||
<command>cinder</command> tool will then fail.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="ts_eql_vol_size_solutions">
|
||||
<title>Solution</title>
|
||||
<para>To work around this problem, increase the target size of the new image to the next
|
||||
whole number. In the problem example, you created a 1 GB volume to be used
|
||||
as volume-backed image, so a new volume using this volume-backed image should
|
||||
use a size of 2 GB:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder create --display-name volume2 \
|
||||
--image-id 3020a21d-ba37-4495-8899-07fc201161b9 1</userinput>
|
||||
<computeroutput>+---------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+---------------------+--------------------------------------+
|
||||
| attachments | [] |
|
||||
| availability_zone | nova |
|
||||
| bootable | false |
|
||||
| created_at | 2014-03-21T19:25:31.564482 |
|
||||
| display_description | None |
|
||||
| display_name | volume2 |
|
||||
| id | 64e8eb18-d23f-437b-bcac-b3S2afa6843a |
|
||||
| image_id | 3020a21d-ba37-4495-8899-07fc20116lb9 |
|
||||
| metadata | [] |
|
||||
| size | 2 |
|
||||
| snapshot_id | None |
|
||||
| source_volid | None |
|
||||
| status | creating |
|
||||
| volume_type | None |
|
||||
+---------------------+--------------------------------------+</computeroutput></screen>
|
||||
<note><para>The dashboard suggests a suitable size when you create a new volume
|
||||
based on a volume-backed image.</para></note>
|
||||
|
||||
|
||||
<para>You can then check this new volume into the EQL array:</para>
|
||||
<screen><prompt>eql></prompt> <userinput>volume select volume-64e8eb18-d23f-437b-bcac-b352afa6843a</userinput>
|
||||
<prompt>eql (volume_volume-61e8eb18-d23f-437b-bcac-b352afa6843a)></prompt> <userinput>show</userinput>
|
||||
<computeroutput>______________________________ Volume Information _______________________________
|
||||
Name: volume-64e8eb18-d23f-437b-bcac-b352afa6843a
|
||||
Size: 2GB
|
||||
VolReserve: 2.01GB
|
||||
VolReserveInUse: 1.01GB
|
||||
ReplReserveInUse: 0MB
|
||||
iSCSI Alias: volume-64e8eb18-d23f-437b-bcac-b352afa6843a
|
||||
iSCSI Name: iqn.2001-05.com.equallogic:0-8a0906-e3091850e-eae000000b7S32cl-volume-64e8eb18-d23f-437b-bcac-b3S2afa6Bl3a
|
||||
ActualMembers: 1
|
||||
Snap-Warn: 10%
|
||||
Snap-Depletion: delete-oldest
|
||||
Description:
|
||||
Snap-Reserve: 100%
|
||||
Snap-Reserve-Avail: 100% (2GB)
|
||||
Permission: read-write
|
||||
DesiredStatus: online
|
||||
Status: online
|
||||
Connections: 1
|
||||
Snapshots: O
|
||||
Bind:
|
||||
Type: not-replicated
|
||||
ReplicationReserveSpace: 0MB</computeroutput></screen>
|
||||
</section>
|
||||
</section>
|
@ -1,42 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_failed_attach_vol_after_detach">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Failed to attach volume after detaching</title>
|
||||
<section
|
||||
xml:id="section_ts_failed_attach_vol_after_detach_problem">
|
||||
<title>Problem</title>
|
||||
<para>Failed to attach a volume after detaching the same
|
||||
volume.</para>
|
||||
</section>
|
||||
<section
|
||||
xml:id="section_ts_failed_attach_vol_after_detach_solution">
|
||||
<title>Solution</title>
|
||||
<para>You must change the device name on the
|
||||
<command>nova-attach</command> command. The VM might
|
||||
not clean up after a <command>nova-detach</command>
|
||||
command runs. This example shows how the
|
||||
<command>nova-attach</command> command fails when you
|
||||
use the <code>vdb</code>, <code>vdc</code>, or
|
||||
<code>vdd</code> device names:</para>
|
||||
<screen><prompt>#</prompt> <userinput>ls -al /dev/disk/by-path/</userinput>
|
||||
<computeroutput>total 0
|
||||
drwxr-xr-x 2 root root 200 2012-08-29 17:33 .
|
||||
drwxr-xr-x 5 root root 100 2012-08-29 17:33 ..
|
||||
lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0 -> ../../vda
|
||||
lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part1 -> ../../vda1
|
||||
lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part2 -> ../../vda2
|
||||
lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part5 -> ../../vda5
|
||||
lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:06.0-virtio-pci-virtio2 -> ../../vdb
|
||||
lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:08.0-virtio-pci-virtio3 -> ../../vdc
|
||||
lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4 -> ../../vdd
|
||||
lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4-part1 -> ../../vdd1</computeroutput></screen>
|
||||
<para>You might also have this problem after attaching and
|
||||
detaching the same volume from the same VM with the same
|
||||
mount point multiple times. In this case, restart the KVM
|
||||
host.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,26 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_failed_attach_vol_no_sysfsutils">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Failed to attach volume, systool is not installed</title>
|
||||
<section xml:id="section_ts_failed_attach_vol_no_sysfsutils_problem">
|
||||
<title>Problem</title>
|
||||
<para>This warning and error occurs if you do not have the required
|
||||
<filename>sysfsutils</filename> package installed on the compute node.</para>
|
||||
<programlisting>WARNING nova.virt.libvirt.utils [req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] systool is not installed
|
||||
ERROR nova.compute.manager [req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin]
|
||||
[instance: df834b5a-8c3f-477a-be9b-47c97626555c|instance: df834b5a-8c3f-477a-be9b-47c97626555c]
|
||||
Failed to attach volume 13d5c633-903a-4764-a5a0-3336945b1db1 at /dev/vdk.</programlisting>
|
||||
</section>
|
||||
<section xml:id="section_ts_failed_attach_vol_no_sysfsutils_solution">
|
||||
<title>Solution</title>
|
||||
<para>Run the following command on the compute node to install the
|
||||
<filename>sysfsutils</filename> packages.</para>
|
||||
<para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install sysfsutils</userinput></screen>
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
@ -1,23 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_failed_connect_vol_FC_SAN">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Failed to connect volume in FC SAN</title>
|
||||
<section xml:id="section_ts_failed_connect_vol_FC_SAN_problem">
|
||||
<title>Problem</title>
|
||||
<para>Compute node failed to connect to a volume in a Fibre Channel (FC) SAN configuration.
|
||||
The WWN may not be zoned correctly in your FC SAN that links the compute host to the
|
||||
storage array.</para>
|
||||
<programlisting>ERROR nova.compute.manager [req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin demo|req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin demo] [instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3]
|
||||
Failed to connect to volume 6f6a6a9c-dfcf-4c8d-b1a8-4445ff883200 while attaching at /dev/vdjTRACE nova.compute.manager [instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3]
|
||||
Traceback (most recent call last):…f07aa4c3d5f3\] ClientException: The server has either erred or is incapable of performing the requested operation.(HTTP 500)(Request-ID: req-71e5132b-21aa-46ee-b3cc-19b5b4ab2f00)</programlisting>
|
||||
</section>
|
||||
<section xml:id="section_ts_failed_connect_vol_FC_SAN_solution">
|
||||
<title>Solution</title>
|
||||
<para>The network administrator must configure the FC SAN fabric by correctly zoning the WWN
|
||||
(port names) from your compute node HBAs.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,31 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-model href="http://docbook.org/xml/5.0/rng/docbook.rng" schematypens="http://relaxng.org/ns/structure/1.0"?>
|
||||
<?xml-model href="http://docbook.org/xml/5.0/rng/docbook.rng" type="application/xml" schematypens="http://purl.oclc.org/dsdl/schematron"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_multipath_warn">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Multipath Call Failed Exit</title>
|
||||
<section xml:id="section_ts_multipath_warn_problem">
|
||||
<title>Problem</title>
|
||||
<para>Multipath call failed exit. This warning occurs in the Compute log if you do not have the
|
||||
optional <filename>multipath-tools</filename> package installed on the compute node.
|
||||
This is an optional package and the volume attachment does work without the multipath
|
||||
tools installed. If the <filename>multipath-tools</filename> package is installed on the
|
||||
compute node, it is used to perform the volume attachment. The IDs in your message are
|
||||
unique to your system.</para>
|
||||
<programlisting>WARNING nova.storage.linuxscsi [req-cac861e3-8b29-4143-8f1b-705d0084e571 admin
|
||||
admin|req-cac861e3-8b29-4143-8f1b-705d0084e571 admin admin] Multipath call failed exit
|
||||
(96)</programlisting>
|
||||
</section>
|
||||
<section xml:id="section_ts_multipath_warn_solution">
|
||||
<title>Solution</title>
|
||||
<para>Run the following command on the compute node to install the
|
||||
<filename>multipath-tools</filename> packages.</para>
|
||||
<para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install multipath-tools</userinput></screen>
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
@ -1,24 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_no_emulator_x86_64">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Cannot find suitable emulator for x86_64</title>
|
||||
<section xml:id="section_ts_no_emulator_x86_64_problem">
|
||||
<title>Problem</title>
|
||||
<para>When you attempt to create a VM, the error shows the VM is in the <code>BUILD</code>
|
||||
then <code>ERROR</code> state.</para>
|
||||
</section>
|
||||
<section xml:id="section_ts_no_emulator_x86_64_solution">
|
||||
<title>Solution</title>
|
||||
<para>On the KVM host, run <code>cat /proc/cpuinfo</code>. Make sure the <code>vmx</code>
|
||||
or <code>svm</code> flags are set.</para>
|
||||
<para>Follow the instructions in the
|
||||
<link xlink:href="http://docs.openstack.org/kilo/config-reference/content/kvm.html#section_kvm_enable">
|
||||
enabling KVM section</link> of the <citetitle>Configuration
|
||||
Reference</citetitle> to enable hardware virtualization
|
||||
support in your BIOS.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,25 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_non_existent_host">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Non-existent host</title>
|
||||
<section xml:id="section_ts_non_existent_host_problem">
|
||||
<title>Problem</title>
|
||||
<para>This error could be caused by a volume being exported outside of OpenStack using a
|
||||
host name different from the system name that OpenStack expects. This error could be
|
||||
displayed with the IQN if the host was exported using iSCSI.</para>
|
||||
<programlisting>2013-04-19 04:02:02.336 2814 ERROR cinder.openstack.common.rpc.common [-] Returning exception Not found (HTTP 404)
|
||||
NON_EXISTENT_HOST - HOST '10' was not found to caller.</programlisting>
|
||||
</section>
|
||||
<section xml:id="section_ts_non_existent_host_solution">
|
||||
<title>Solution</title>
|
||||
<para>Host names constructed by the driver use just the local hostname, not the fully
|
||||
qualified domain name (FQDN) of the Compute host. For example, if the FQDN was
|
||||
<emphasis>myhost.example.com</emphasis>, just <emphasis>myhost</emphasis> would be
|
||||
used as the 3PAR hostname. IP addresses are not allowed as host names on the 3PAR
|
||||
storage server.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,21 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_non_existent_vlun">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Non-existent VLUN</title>
|
||||
<section xml:id="section_ts_non_existent_vlun_problem">
|
||||
<title>Problem</title>
|
||||
<para>This error occurs if the 3PAR host exists with the correct host name that the
|
||||
OpenStack Block Storage drivers expect but the volume was created in a different Domain.</para>
|
||||
<programlisting>HTTPNotFound: Not found (HTTP 404) NON_EXISTENT_VLUN - VLUN 'osv-DqT7CE3mSrWi4gZJmHAP-Q' was not found.</programlisting>
|
||||
</section>
|
||||
<section xml:id="section_ts_non_existent_vlun_solution">
|
||||
<title>Solution</title>
|
||||
<para>The <code>hp3par_domain</code> configuration items either need to be updated to use
|
||||
the domain the 3PAR host currently resides in, or the 3PAR host needs to be moved to the
|
||||
domain that the volume was created in.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,31 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-model href="http://docbook.org/xml/5.0/rng/docbook.rng" schematypens="http://relaxng.org/ns/structure/1.0"?>
|
||||
<?xml-model href="http://docbook.org/xml/5.0/rng/docbook.rng" type="application/xml" schematypens="http://purl.oclc.org/dsdl/schematron"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_ts_vol_attach_miss_sg_scan">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Failed to Attach Volume, Missing sg_scan</title>
|
||||
<section xml:id="section_ts_vol_attach_miss_sg_scan_problem">
|
||||
<title>Problem</title>
|
||||
<para>Failed to attach volume to an instance,
|
||||
<filename>sg_scan</filename> file not found. This
|
||||
warning and error occur when the
|
||||
<package>sg3-utils</package> package is not installed
|
||||
on the compute node. The IDs in your message are unique to
|
||||
your system:</para>
|
||||
<screen><computeroutput>ERROR nova.compute.manager [req-cf2679fd-dd9e-4909-807f-48fe9bda3642 admin admin|req-cf2679fd-dd9e-4909-807f-48fe9bda3642 admin admin]
|
||||
[instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5|instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5]
|
||||
Failed to attach volume 4cc104c4-ac92-4bd6-9b95-c6686746414a at /dev/vdcTRACE nova.compute.manager
|
||||
[instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5|instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5]
|
||||
Stdout: '/usr/local/bin/nova-rootwrap: Executable not found: /usr/bin/sg_scan</computeroutput></screen>
|
||||
</section>
|
||||
<section xml:id="section_ts_vol_attach_miss_sg_scan_solution">
|
||||
<title>Solution</title>
|
||||
<para>Run this command on the compute node to install the
|
||||
<package>sg3-utils</package> package:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install sg3-utils</userinput></screen>
|
||||
</section>
|
||||
</section>
|
@ -1,38 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="volume-backup-restore-export-import">
|
||||
<title>Export and import backup metadata</title>
|
||||
<para>A volume backup can only be restored on the same Block Storage service. This is because
|
||||
restoring a volume from a backup requires metadata available on the database used by the
|
||||
Block Storage service.</para>
|
||||
<note>
|
||||
<para>For information about how to back up and restore a volume, see <xref
|
||||
linkend="volume-backup-restore"/>.</para>
|
||||
</note>
|
||||
<para>You can, however, export the metadata of a volume backup. To do so, run this command as an
|
||||
OpenStack <literal>admin</literal> user (presumably, after creating a volume backup):</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-export <replaceable>BACKUP_ID</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>BACKUP_ID</replaceable> is the volume backup's ID. This command should
|
||||
return the backup's corresponding database information as encoded string metadata.</para>
|
||||
<para>Exporting and storing this encoded string metadata allows you to completely restore the
|
||||
backup, even in the event of a catastrophic database failure. This will preclude the need to
|
||||
back up the entire Block Storage database, particularly if you only need to keep complete
|
||||
backups of a small subset of volumes.</para>
|
||||
<para>If you have placed encryption on your volumes, the encryption will still be in place when
|
||||
you restore the volume if a UUID encryption key is specified when creating volumes. Using
|
||||
backup metadata support, UUID keys set up for a volume (or volumes) will remain valid
|
||||
when you restore a backed-up volume. The restored volume will remain
|
||||
encrypted, and will be accessible with your credentials.</para>
|
||||
<para>In addition, having a volume backup and its backup metadata also provides volume
|
||||
portability. Specifically, backing up a volume and exporting its metadata will allow you to
|
||||
restore the volume on a completely different Block Storage database, or even on a different
|
||||
cloud service. To do so, first import the backup metadata to the Block Storage database and
|
||||
then restore the backup.</para>
|
||||
<para>To import backup metadata, run the following command as an OpenStack
|
||||
<literal>admin</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-import <replaceable>METADATA</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>METADATA</replaceable> is the backup metadata exported earlier.</para>
|
||||
<para>Once you have imported the backup metadata into a Block Storage database, restore the
|
||||
volume (<xref linkend="volume-backup-restore"/>).</para>
|
||||
</section>
|
@ -1,103 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="volume-backup-restore">
|
||||
<title>Back up and restore volumes</title>
|
||||
<para>The <command>cinder</command> command-line interface provides the tools for creating a
|
||||
volume backup. You can restore a volume from a backup as long as the backup's associated
|
||||
database information (or backup metadata) is intact in the Block Storage database.</para>
|
||||
<para>Run this command to create a backup of a volume:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-create [--incremental] <replaceable>VOLUME</replaceable></userinput></screen>
|
||||
<para>Where <replaceable>VOLUME</replaceable> is the name or ID of the volume,
|
||||
and <option>incremental</option> is a flag that indicates whether an
|
||||
incremental backup should be performed.</para>
|
||||
<para>Without the <option>incremental</option> flag, a full backup is
|
||||
created by default. With the <option>incremental</option> flag, an
|
||||
incremental backup is created.</para>
|
||||
<note><para>The <option>incremental</option> flag is only available for
|
||||
block storage API v2. You have to specify [--os-volume-api-version 2]
|
||||
in the <command>cinder</command> command-line interface to use this
|
||||
parameter.</para></note>
|
||||
<para>The incremental backup is based on a parent backup which is an
|
||||
existing backup with the latest timestamp. The parent backup can be a full
|
||||
backup or an incremental backup depending on the timestamp.</para>
|
||||
<note><para>The first backup of a volume has to be a full backup. Attempting
|
||||
to do an incremental backup without any existing backups will fail.
|
||||
</para></note>
|
||||
<para>A new configure option <option>backup_swift_block_size</option>
|
||||
is introduced into <filename>cinder.conf</filename> for the default Swift
|
||||
backup driver. This is the size in bytes that changes are tracked for
|
||||
incremental backups. The existing <option>backup_swift_object_size
|
||||
</option> option, the size in bytes of Swift backup objects, has to be a
|
||||
multiple of <option>backup_swift_block_size</option>. The default is 32768
|
||||
for <option>backup_swift_block_size</option>, and the default is 52428800
|
||||
for <option>backup_swift_object_size</option>.
|
||||
</para>
|
||||
<para>This command also returns a backup ID. Use this backup ID when restoring the volume:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder backup-restore <replaceable>BACKUP_ID</replaceable></userinput></screen>
|
||||
<para>When restoring from a full backup, it is a full restore.</para>
|
||||
<para>When restoring from an incremental backup, a list of backups is
|
||||
built based on the IDs of the parent backups. A full restore is
|
||||
performed based on the full backup first, then restore is done based
|
||||
on the incremental backup, laying on top of it in order.</para>
|
||||
<para>Because volume backups are dependent on the Block Storage database, you must also back up
|
||||
your Block Storage database regularly to ensure data recovery.</para>
|
||||
<note>
|
||||
<para>Alternatively, you can export and save the metadata of selected volume backups. Doing so
|
||||
precludes the need to back up the entire Block Storage database. This is useful if you need
|
||||
only a small subset of volumes to survive a catastrophic database failure.</para>
|
||||
<para>If you specify a UUID encryption key when setting up the volume specifications, the
|
||||
backup metadata ensures that the key will remain valid when you back up and restore
|
||||
the volume.</para>
|
||||
<para>For more information about how to export and import volume backup metadata, see <xref
|
||||
linkend="volume-backup-restore-export-import"/>.</para>
|
||||
</note>
|
||||
<para>By default, the swift object store is used for the backup repository.</para>
|
||||
<para>
|
||||
If instead you want to use an NFS export as the backup repository,
|
||||
add the following configuration options to the
|
||||
<literal>[DEFAULT]</literal> section of the
|
||||
<filename>cinder.conf</filename> file and restart the Block
|
||||
Storage services:
|
||||
</para>
|
||||
<programlisting language="ini">backup_driver = cinder.backup.drivers.nfs
|
||||
backup_share = <replaceable>HOST</replaceable>:<replaceable>EXPORT_PATH</replaceable></programlisting>
|
||||
<para>
|
||||
For the <option>backup_share</option> option, replace
|
||||
<replaceable>HOST</replaceable> with the DNS resolvable host name or
|
||||
the IP address of the storage server for the NFS share, and
|
||||
<replaceable>EXPORT_PATH</replaceable> with the path to that
|
||||
share. If your environment requires that non-default mount
|
||||
options be specified for the share, set these as follows:
|
||||
</para>
|
||||
<programlisting language="ini">backup_mount_options = <replaceable>MOUNT_OPTIONS</replaceable></programlisting>
|
||||
<para>
|
||||
<replaceable>MOUNT_OPTIONS</replaceable> is a comma-separated
|
||||
string of NFS mount options as detailed in the NFS man page.
|
||||
</para>
|
||||
<para>There are several other options whose default values may be overriden as appropriate for your environment:
|
||||
</para>
|
||||
<programlisting language="ini">backup_compression_algorithm = zlib
|
||||
backup_sha_block_size_bytes = 32768
|
||||
backup_file_size = 1999994880</programlisting>
|
||||
<para>
|
||||
The option <option>backup_compression_algorithm</option> can be
|
||||
set to <literal>bz2</literal> or <literal>None</literal>. The
|
||||
latter can be a useful setting when the server providing the share
|
||||
for the backup repository itself performs deduplication or
|
||||
compression on the backup data.
|
||||
</para>
|
||||
<para>
|
||||
The option <option>backup_file_size</option> must be a multiple of
|
||||
<option>backup_sha_block_size_bytes</option>. It is effectively
|
||||
the maximum file size to be used, given your environment, to hold
|
||||
backup data. Volumes larger than this will be stored in multiple
|
||||
files in the backup repository. The
|
||||
<option>backup_sha_block_size_bytes</option> option determines the size
|
||||
of blocks from the cinder volume being backed up on which digital
|
||||
signatures are calculated in order to enable incremental
|
||||
backup capability.
|
||||
</para>
|
||||
</section>
|
@ -1,189 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="volume-migration">
|
||||
<title>Migrate volumes</title>
|
||||
<para>OpenStack has the ability to migrate volumes between
|
||||
back-ends which support its volume-type. Migrating a volume
|
||||
transparently moves its data from the current back-end for the
|
||||
volume to a new one. This is an administrator function, and
|
||||
can be used for functions including storage evacuation (for
|
||||
maintenance or decommissioning), or manual optimizations (for
|
||||
example, performance, reliability, or cost).</para>
|
||||
<para>These workflows are possible for a migration:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>If the storage can migrate the volume on its own, it
|
||||
is given the opportunity to do so. This allows the
|
||||
Block Storage driver to enable optimizations that the
|
||||
storage might be able to perform. If the back-end is
|
||||
not able to perform the migration, the Block Storage
|
||||
uses one of two generic flows, as follows.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>If the volume is not attached, the Block Storage
|
||||
service creates a volume and copies the data from the
|
||||
original to the new volume.</para>
|
||||
<note>
|
||||
<para>While most back-ends support this function, not all do.
|
||||
See the driver documentation in the <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/"
|
||||
><citetitle>OpenStack Configuration
|
||||
Reference</citetitle></link> for more
|
||||
details.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>If the volume is attached to a VM instance, the
|
||||
Block Storage creates a volume, and calls Compute to
|
||||
copy the data from the original to the new volume.
|
||||
Currently this is supported only by the Compute
|
||||
libvirt driver.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>As an example, this scenario shows two LVM back-ends and
|
||||
migrates an attached volume from one to the other. This
|
||||
scenario uses the third migration flow.</para>
|
||||
<para>First, list the available back-ends:</para>
|
||||
<screen><prompt>#</prompt> <userinput>cinder get-pools</userinput>
|
||||
<computeroutput>+----------+----------------------------------------------------+
|
||||
| Property | Value |
|
||||
+----------+----------------------------------------------------+
|
||||
| name | server1@lvmstorage-1#lvmstorage-1 |
|
||||
+----------+----------------------------------------------------+
|
||||
+----------+----------------------------------------------------+
|
||||
| Property | Value |
|
||||
+----------+----------------------------------------------------+
|
||||
| name | server2@lvmstorage-2#lvmstorage-2 |
|
||||
+----------+----------------------------------------------------+
|
||||
</computeroutput></screen>
|
||||
<note>
|
||||
<para>Only Block Storage V2 API supports <command>get-pools</command>.</para>
|
||||
</note>
|
||||
<para>You can also get available back-ends like following:</para>
|
||||
<screen><prompt>#</prompt> <userinput>cinder-manage host list</userinput>
|
||||
<computeroutput>server1@lvmstorage-1 zone1
|
||||
server2@lvmstorage-2 zone1</computeroutput></screen>
|
||||
<para>But it needs to add pool name in the end. For example, <literal>server1@lvmstorage-1#zone1</literal>.</para>
|
||||
<para>Next, as the admin user, you can see the current status of
|
||||
the volume (replace the example ID with your own):</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder show 6088f80a-f116-4331-ad48-9afb0dfb196c</userinput>
|
||||
<computeroutput>+--------------------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+--------------------------------+--------------------------------------+
|
||||
| attachments | [...] |
|
||||
| availability_zone | zone1 |
|
||||
| bootable | False |
|
||||
| created_at | 2013-09-01T14:53:22.000000 |
|
||||
| display_description | test |
|
||||
| display_name | test |
|
||||
| id | 6088f80a-f116-4331-ad48-9afb0dfb196c |
|
||||
| metadata | {} |
|
||||
| os-vol-host-attr:host | server1@lvmstorage-1#lvmstorage-1 |
|
||||
| os-vol-mig-status-attr:migstat | None |
|
||||
| os-vol-mig-status-attr:name_id | None |
|
||||
| os-vol-tenant-attr:tenant_id | 6bdd8f41203e4149b5d559769307365e |
|
||||
| size | 2 |
|
||||
| snapshot_id | None |
|
||||
| source_volid | None |
|
||||
| status | in-use |
|
||||
| volume_type | None |
|
||||
+--------------------------------+--------------------------------------+</computeroutput></screen>
|
||||
<para>Note these attributes:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>os-vol-host-attr:host</literal> - the
|
||||
volume's current back-end.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>os-vol-mig-status-attr:migstat</literal> -
|
||||
the status of this volume's migration (<literal>None</literal>
|
||||
means that a migration is not currently in progress).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>os-vol-mig-status-attr:name_id</literal> -
|
||||
the volume ID that this volume's name on the back-end
|
||||
is based on. Before a volume is ever migrated, its
|
||||
name on the back-end storage may be based on the
|
||||
volume's ID (see the <option>volume_name_template</option>
|
||||
configuration parameter). For example, if
|
||||
<option>volume_name_template</option> is kept as the default
|
||||
value (<literal>volume-%s</literal>), your first LVM back-end
|
||||
has a logical volume named
|
||||
<literal>volume-6088f80a-f116-4331-ad48-9afb0dfb196c</literal>.
|
||||
During the course of a migration, if you create a
|
||||
volume and copy over the data, the volume get the new name but keeps its
|
||||
original ID. This is
|
||||
exposed by the <literal>name_id</literal>
|
||||
attribute.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<para>If you plan to decommission a block storage node, you must
|
||||
stop the <systemitem class="service">cinder</systemitem> volume
|
||||
service on the node after performing the migration.</para>
|
||||
<para os="rhel;centos;fedora;opensuse;sles">On nodes that run
|
||||
CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, or SUSE Linux
|
||||
Enterprise, run:</para>
|
||||
<screen os="rhel;centos;fedora"><prompt>#</prompt> <userinput>service openstack-cinder-volume stop</userinput>
|
||||
<prompt>#</prompt> <userinput>chkconfig openstack-cinder-volume off</userinput></screen>
|
||||
<para os="debian;ubuntu">On nodes that run Ubuntu or Debian, run:
|
||||
</para>
|
||||
<screen os="debian;ubuntu"><prompt>#</prompt> <userinput>service cinder-volume stop</userinput>
|
||||
<prompt>#</prompt> <userinput>chkconfig cinder-volume off</userinput></screen>
|
||||
<para>Stopping the <systemitem>cinder</systemitem> volume service
|
||||
will prevent volumes from being allocated to the node.</para>
|
||||
</note>
|
||||
<para>Migrate this volume to the second LVM back-end:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder migrate 6088f80a-f116-4331-ad48-9afb0dfb196c server2@lvmstorage-2</userinput></screen>
|
||||
<para>You can use the <command>cinder show</command> command to
|
||||
see the status of the migration. While migrating, the
|
||||
<literal>migstat</literal> attribute shows states such as
|
||||
<literal>migrating</literal> or
|
||||
<literal>completing</literal>. On error,
|
||||
<literal>migstat</literal> is set to
|
||||
<literal>None</literal> and the <literal>host</literal>
|
||||
attribute shows the original host. On success, in this
|
||||
example, the output looks like:</para>
|
||||
<screen><computeroutput>+--------------------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+--------------------------------+--------------------------------------+
|
||||
| attachments | [...] |
|
||||
| availability_zone | zone1 |
|
||||
| bootable | False |
|
||||
| created_at | 2013-09-01T14:53:22.000000 |
|
||||
| display_description | test |
|
||||
| display_name | test |
|
||||
| id | 6088f80a-f116-4331-ad48-9afb0dfb196c |
|
||||
| metadata | {} |
|
||||
| os-vol-host-attr:host | server2@lvmstorage-2#lvmstorage-2 |
|
||||
| os-vol-mig-status-attr:migstat | None |
|
||||
| os-vol-mig-status-attr:name_id | 133d1f56-9ffc-4f57-8798-d5217d851862 |
|
||||
| os-vol-tenant-attr:tenant_id | 6bdd8f41203e4149b5d559769307365e |
|
||||
| size | 2 |
|
||||
| snapshot_id | None |
|
||||
| source_volid | None |
|
||||
| status | in-use |
|
||||
| volume_type | None |
|
||||
+--------------------------------+--------------------------------------+</computeroutput></screen>
|
||||
<para>Note that <literal>migstat</literal> is None,
|
||||
<literal>host</literal> is the new host, and
|
||||
<literal>name_id</literal> holds the ID of the volume
|
||||
created by the migration. If you look at the second LVM
|
||||
back end, you find the logical volume
|
||||
<literal>volume-133d1f56-9ffc-4f57-8798-d5217d851862</literal>.</para>
|
||||
<note>
|
||||
<para>The migration is not visible to non-admin users (for
|
||||
example, through the volume <literal>status</literal>).
|
||||
However, some operations are not allowed while a
|
||||
migration is taking place, such as attaching/detaching a
|
||||
volume and deleting a volume. If a user performs such an
|
||||
action during a migration, an error is returned.</para>
|
||||
</note>
|
||||
<note>
|
||||
<para>Migrating volumes that have snapshots are currently not
|
||||
allowed.</para>
|
||||
</note>
|
||||
</section>
|
@ -1,70 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="volume_number_weigher">
|
||||
<title>Configure and use volume number weigher</title>
|
||||
<para>OpenStack Block Storage enables you to choose a volume back end
|
||||
according
|
||||
to <option>free_capacity</option> and <option>allocated_capacity</option>.
|
||||
The volume number weigher feature lets the scheduler choose a volume back end based
|
||||
on its volume number in the volume back end. This can provide another means to improve
|
||||
the volume back ends' I/O balance and the volumes' I/O performance.</para>
|
||||
<simplesect>
|
||||
<title>Enable volume number weigher</title>
|
||||
<para>To enable a volume number weigher, set the <option>scheduler_default_weighers</option>
|
||||
to <literal>VolumeNumberWeigher</literal> flag in the <filename>cinder.conf</filename> file to
|
||||
define <literal>VolumeNumberWeigher</literal> as the selected weigher.
|
||||
</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Configure multiple-storage back ends</title>
|
||||
<para>To configure <literal>VolumeNumberWeigher</literal>, use <literal>LVMISCSIDriver</literal> as the volume driver.</para>
|
||||
<para>This configuration defines two LVM volume groups: <literal>stack-volumes</literal> with 10 GB capacity
|
||||
and <literal>stack-volumes-1</literal> with 60 GB capacity.
|
||||
This example configuration defines two back ends:</para>
|
||||
<programlisting language="ini">scheduler_default_weighers=VolumeNumberWeigher
|
||||
enabled_backends=lvmdriver-1,lvmdriver-2
|
||||
[lvmdriver-1]
|
||||
volume_group=stack-volumes
|
||||
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name=LVM_iSCSI
|
||||
|
||||
[lvmdriver-2]
|
||||
volume_group=stack-volumes-1
|
||||
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
|
||||
volume_backend_name=LVM_iSCSI</programlisting>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Volume type</title>
|
||||
<para>Define a volume type in Block Storage:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder type-create lvm</userinput></screen>
|
||||
<para>Create an extra specification that links
|
||||
the volume type to a back-end name:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder type-key lvm set volume_backend_name=LVM_iSCSI</userinput></screen>
|
||||
<para>This example creates a <literal>lvm</literal> volume
|
||||
type with <literal>volume_backend_name=LVM_iSCSI</literal>
|
||||
as extra specifications.</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Usage</title>
|
||||
<para>To create six 1-GB volumes, run the <command>cinder create --volume-type lvm 1</command> command
|
||||
six times:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cinder create --volume-type lvm 1</userinput></screen>
|
||||
<para>This command creates three volumes in <literal>stack-volumes</literal> and three volumes in <literal>stack-volumes-1</literal>.</para>
|
||||
<para>List the available volumes:</para>
|
||||
<screen><prompt>#</prompt> <userinput>lvs</userinput>
|
||||
<computeroutput> LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
|
||||
volume-3814f055-5294-4796-b5e6-1b7816806e5d stack-volumes -wi-a---- 1.00g
|
||||
volume-72cf5e79-99d2-4d23-b84e-1c35d3a293be stack-volumes -wi-a---- 1.00g
|
||||
volume-96832554-0273-4e9d-902b-ad421dfb39d1 stack-volumes -wi-a---- 1.00g
|
||||
volume-169386ef-3d3e-4a90-8439-58ceb46889d9 stack-volumes-1 -wi-a---- 1.00g
|
||||
volume-460b0bbb-d8a0-4bc3-9882-a129a5fe8652 stack-volumes-1 -wi-a---- 1.00g
|
||||
volume-9a08413b-0dbc-47c9-afb8-41032ab05a41 stack-volumes-1 -wi-a---- 1.00g</computeroutput></screen>
|
||||
</simplesect>
|
||||
</section>
|
@ -1,174 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="managing-volumes">
|
||||
<title>Block Storage</title>
|
||||
<para>The OpenStack Block Storage service works through the
|
||||
interaction of a series of daemon processes named <systemitem
|
||||
class="daemon">cinder-*</systemitem> that reside
|
||||
persistently on the host machine or machines. The binaries can
|
||||
all be run from a single node, or spread across multiple
|
||||
nodes. They can also be run on the same node as other
|
||||
OpenStack services.</para>
|
||||
<section xml:id="section_block-storage-intro">
|
||||
<title>Introduction to Block Storage</title>
|
||||
<para>To administer the OpenStack Block Storage service, it is
|
||||
helpful to understand a number of concepts. You must make
|
||||
certain choices when you configure the Block Storage
|
||||
service in OpenStack. The bulk of the options come down to
|
||||
two choices, single node or multi-node install. You can
|
||||
read a longer discussion about storage decisions in <link
|
||||
xlink:href="http://docs.openstack.org/openstack-ops/content/storage_decision.html"
|
||||
>Storage Decisions</link> in the <citetitle>OpenStack
|
||||
Operations Guide</citetitle>.</para>
|
||||
<para>OpenStack Block Storage enables you to add extra
|
||||
block-level storage to your OpenStack Compute instances.
|
||||
This service is similar to the Amazon EC2 Elastic Block
|
||||
Storage (EBS) offering.</para>
|
||||
</section>
|
||||
<?hard-pagebreak?>
|
||||
<xi:include
|
||||
href="blockstorage/section_increase-api-throughput.xml"/>
|
||||
<section xml:id="section_manage-volumes">
|
||||
<title>Manage volumes</title>
|
||||
<para>The default OpenStack Block Storage service
|
||||
implementation is an iSCSI solution that uses Logical
|
||||
Volume Manager (LVM) for Linux.</para>
|
||||
<note>
|
||||
<para>The OpenStack Block Storage service is not a shared
|
||||
storage solution like a Storage Area Network (SAN) of
|
||||
NFS volumes, where you can attach a volume to multiple
|
||||
servers. With the OpenStack Block Storage service, you
|
||||
can attach a volume to only one instance at a
|
||||
time.</para>
|
||||
<para>The OpenStack Block Storage service also provides
|
||||
drivers that enable you to use several vendors'
|
||||
back-end storage devices, in addition to or instead of
|
||||
the base LVM implementation.</para>
|
||||
</note>
|
||||
<para>This high-level procedure shows you how to create and
|
||||
attach a volume to a server instance.</para>
|
||||
<procedure>
|
||||
<title>To create and attach a volume to an
|
||||
instance</title>
|
||||
<step>
|
||||
<para>Configure the OpenStack Compute and the
|
||||
OpenStack Block Storage services through the
|
||||
<filename>cinder.conf</filename> file.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Use the <command>cinder create</command> command
|
||||
to create a volume. This command creates an LV
|
||||
into the volume group (VG)
|
||||
<literal>cinder-volumes</literal>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Use the <command>nova volume-attach</command>
|
||||
command to attach the volume to an instance. This
|
||||
command creates a unique iSCSI IQN that is exposed
|
||||
to the compute node.</para>
|
||||
<substeps>
|
||||
<step>
|
||||
<para>The compute node, which runs the
|
||||
instance, now has an active ISCSI session
|
||||
and new local storage (usually a
|
||||
<filename>/dev/sdX</filename>
|
||||
disk).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>libvirt uses that local storage as
|
||||
storage for the instance. The instance
|
||||
gets a new disk (usually a
|
||||
<filename>/dev/vdX</filename>
|
||||
disk).</para>
|
||||
</step>
|
||||
</substeps>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>For this particular walk through, one cloud controller
|
||||
runs <systemitem class="service">nova-api</systemitem>,
|
||||
<systemitem class="service"
|
||||
>nova-scheduler</systemitem>, <systemitem
|
||||
class="service">nova-objectstore</systemitem>,
|
||||
<literal>nova-network</literal> and
|
||||
<literal>cinder-*</literal> services. Two additional
|
||||
compute nodes run <systemitem class="service"
|
||||
>nova-compute</systemitem>. The walk through uses a
|
||||
custom partitioning scheme that carves out 60 GB of
|
||||
space and labels it as LVM. The network uses the
|
||||
<literal>FlatManager</literal> and
|
||||
<literal>NetworkManager</literal> settings for
|
||||
OpenStack Compute.</para>
|
||||
<para>The network mode does not interfere with OpenStack Block
|
||||
Storage operations, but you must set up networking for
|
||||
Block Storage to work. For details, see <xref
|
||||
linkend="ch_networking"/>.</para>
|
||||
<para>To set up Compute to use volumes, ensure that Block
|
||||
Storage is installed along with <package>lvm2</package>.
|
||||
This guide describes how to troubleshoot your installation
|
||||
and back up your Compute volumes.</para>
|
||||
<section xml:id="boot-from-volume">
|
||||
<title>Boot from volume</title>
|
||||
<para>In some cases, you can store and run instances from
|
||||
inside volumes. For information, see the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/cli_nova_launch_instance_from_volume.html"
|
||||
>Launch an instance from a volume</link> section
|
||||
in the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/"
|
||||
><citetitle>OpenStack End User
|
||||
Guide</citetitle></link>.</para>
|
||||
</section>
|
||||
<?hard-pagebreak?>
|
||||
<xi:include href="blockstorage/section_nfs_backend.xml"/>
|
||||
<xi:include href="blockstorage/section_glusterfs_backend.xml"/>
|
||||
<xi:include href="blockstorage/section_multi_backend.xml"/>
|
||||
<xi:include
|
||||
href="blockstorage/section_backup-block-storage-disks.xml"/>
|
||||
<xi:include href="blockstorage/section_volume-migration.xml"/>
|
||||
<xi:include href="blockstorage/section_glusterfs_removal.xml"/>
|
||||
<xi:include href="blockstorage/section_volume-backups.xml"/>
|
||||
<xi:include
|
||||
href="blockstorage/section_volume-backups-export-import.xml"/>
|
||||
<section xml:id="using-lio-iscsi">
|
||||
<title>Use LIO iSCSI support</title>
|
||||
<para>The default <option>iscsi_helper</option> tool is
|
||||
<literal>tgtadm</literal>. To use LIO iSCSI,
|
||||
install the <literal>python-rtslib</literal> package,
|
||||
and set <literal>iscsi_helper=lioadm</literal> in the
|
||||
<filename>cinder.conf</filename> file.</para>
|
||||
<para>Once configured, you can use the
|
||||
<command>cinder-rtstool</command> command to
|
||||
manage the volumes. This command enables you to
|
||||
create, delete, and verify volumes and determine
|
||||
targets and add iSCSI initiators to the system.</para>
|
||||
</section>
|
||||
<xi:include href="blockstorage/section_volume_number_weigher.xml"/>
|
||||
<xi:include href="blockstorage/section_consistency_groups.xml"/>
|
||||
<xi:include href="blockstorage/section_driver_filter_weighing.xml"/>
|
||||
<xi:include href="blockstorage/section_ratelimit-volume-copy-bandwidth.xml"/>
|
||||
<xi:include href="blockstorage/section_over_subscription.xml"/>
|
||||
</section>
|
||||
<section xml:id="troubleshooting-cinder-install">
|
||||
<title>Troubleshoot your installation</title>
|
||||
<para>This section provides useful tips to help you
|
||||
troubleshoot your Block Storage installation.</para>
|
||||
<xi:include href="blockstorage/section_ts_cinder_config.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_multipath_warn.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_eql_volume_size.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_vol_attach_miss_sg_scan.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_HTTP_bad_req_in_cinder_vol_log.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_duplicate_3par_host.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_failed_attach_vol_after_detach.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_failed_attach_vol_no_sysfsutils.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_failed_connect_vol_FC_SAN.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_no_emulator_x86_64.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_non_existent_host.xml"/>
|
||||
<xi:include href="blockstorage/section_ts_non_existent_vlun.xml"/>
|
||||
</section>
|
||||
</chapter>
|
@ -1,513 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ch_introduction-to-openstack-compute">
|
||||
<title>Compute</title>
|
||||
<para>The OpenStack Compute service allows you to control an
|
||||
Infrastructure-as-a-Service (IaaS) cloud computing platform.
|
||||
It gives you control over instances and networks, and allows
|
||||
you to manage access to the cloud through users and
|
||||
projects.</para>
|
||||
<para>Compute does not include virtualization software.
|
||||
Instead, it defines drivers that interact with underlying
|
||||
virtualization mechanisms that run on your host operating
|
||||
system, and exposes functionality over a web-based API.</para>
|
||||
<section xml:id="section_system-architecture">
|
||||
<title>System architecture</title>
|
||||
<para>OpenStack Compute contains several main components.</para>
|
||||
<para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>The <glossterm>cloud controller</glossterm> represents the global state
|
||||
and interacts with the other components. The <literal>API server</literal>
|
||||
acts as the web services front end for the cloud controller. The
|
||||
<literal>compute controller</literal> provides compute server resources
|
||||
and usually also contains the Compute service.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The <literal>object store</literal> is an optional component that provides
|
||||
storage services; you can also instead use OpenStack Object Storage.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>An <literal>auth manager</literal> provides authentication and
|
||||
authorization services when used with the Compute system; you can also
|
||||
instead use OpenStack Identity as a separate authentication service.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A
|
||||
<literal>volume controller</literal> provides fast and
|
||||
permanent block-level storage for the compute servers.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The <literal>network controller</literal> provides virtual networks to
|
||||
enable compute servers to interact with each other and with the public
|
||||
network. You can also instead use OpenStack Networking.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The
|
||||
<literal>scheduler</literal> is used to select the
|
||||
most suitable compute controller to host an
|
||||
instance.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>Compute uses a messaging-based, <literal>shared nothing</literal> architecture. All
|
||||
major components exist on multiple servers, including the compute, volume, and network
|
||||
controllers, and the object store or image service. The state of the entire system is
|
||||
stored in a database. The cloud controller communicates with the internal object store
|
||||
using HTTP, but it communicates with the scheduler, network controller, and volume
|
||||
controller using AMQP (advanced message queuing protocol). To avoid blocking a
|
||||
component while waiting for a response, Compute uses asynchronous calls, with a callback
|
||||
that is triggered when a response is received.</para>
|
||||
<section xml:id="section_hypervisors">
|
||||
<title>Hypervisors</title>
|
||||
<para xlink:href="https://www.docker.io/">Compute controls hypervisors through an API
|
||||
server. Selecting the best hypervisor to use can be difficult, and you must take budget,
|
||||
resource constraints, supported features, and required technical specifications into
|
||||
account. However, the majority of OpenStack development is done on systems using KVM and
|
||||
Xen-based hypervisors. For a detailed list of features and support across different
|
||||
hypervisors, see <link xlink:href="http://wiki.openstack.org/HypervisorSupportMatrix"
|
||||
>http://wiki.openstack.org/HypervisorSupportMatrix</link>.</para>
|
||||
<para>You can also orchestrate clouds using multiple
|
||||
hypervisors in different availability zones. Compute
|
||||
supports the following hypervisors:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><link xlink:href="https://wiki.openstack.org/wiki/Ironic">Baremetal</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><link xlink:href="https://www.docker.io">Docker</link></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><link
|
||||
xlink:href="http://www.microsoft.com/en-us/server-cloud/hyper-v-server/default.aspx"
|
||||
>Hyper-V</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><link xlink:href="http://www.linux-kvm.org/page/Main_Page">Kernel-based
|
||||
Virtual Machine (KVM)</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><link xlink:href="https://linuxcontainers.org/">Linux Containers (LXC)</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><link xlink:href="http://wiki.qemu.org/Manual">Quick Emulator (QEMU)</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><link xlink:href="http://user-mode-linux.sourceforge.net/">User Mode Linux
|
||||
(UML)</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><link
|
||||
xlink:href="http://www.vmware.com/products/vsphere-hypervisor/support.html"
|
||||
>VMware vSphere</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><link xlink:href="http://www.xen.org/support/documentation.html">Xen</link>
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>For more information about hypervisors, see the <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/section_compute-hypervisors.html"
|
||||
>Hypervisors</link> section in the
|
||||
<citetitle>OpenStack Configuration
|
||||
Reference</citetitle>.</para>
|
||||
</section>
|
||||
<section xml:id="section_users-and-projects">
|
||||
<title>Tenants, users, and roles</title>
|
||||
<para>The Compute system is designed to be used by different
|
||||
consumers in the form of tenants on a shared system, and
|
||||
role-based access assignments. Roles control the actions
|
||||
that a user is allowed to perform.</para>
|
||||
<para>Tenants are isolated resource containers that form the
|
||||
principal organizational structure within the Compute
|
||||
service. They consist of an individual VLAN, and volumes,
|
||||
instances, images, keys, and users. A user can specify the
|
||||
tenant by appending <literal>:project_id</literal> to
|
||||
their access key. If no tenant is specified in the API
|
||||
request, Compute attempts to use a tenant with the same ID
|
||||
as the user.</para>
|
||||
<para>For tenants, you can use quota controls to limit the:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Number of volumes that can be launched.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Number of processor cores and the amount of RAM that can be allocated.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Floating IP addresses assigned to any instance when it launches. This allows
|
||||
instances to have the same publicly accessible IP addresses.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Fixed IP addresses assigned to the same instance when it launches. This allows
|
||||
instances to have the same publicly or privately accessible IP addresses.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Roles control the actions a user is allowed to perform.
|
||||
By default, most actions do not require a particular role,
|
||||
but you can configure them by editing the
|
||||
<filename>policy.json</filename> file for user roles.
|
||||
For example, a rule can be defined so that a user must
|
||||
have the <literal>admin</literal> role in order to be
|
||||
able to allocate a public IP address.</para>
|
||||
<para>A tenant limits users' access to particular images. Each
|
||||
user is assigned a user name and password. Keypairs
|
||||
granting access to an instance are enabled for each user,
|
||||
but quotas are set, so that each tenant can control
|
||||
resource consumption across available hardware
|
||||
resources.</para>
|
||||
<note>
|
||||
<para>Earlier versions of OpenStack used the term
|
||||
<systemitem class="service">project</systemitem>
|
||||
instead of <systemitem class="service"
|
||||
>tenant</systemitem>. Because of this legacy
|
||||
terminology, some command-line tools use
|
||||
<parameter>--project_id</parameter> where you
|
||||
would normally expect to enter a tenant ID.</para>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="section_storage-and-openstack-compute">
|
||||
<title>Block storage</title>
|
||||
<para>OpenStack provides two classes of the block storage:
|
||||
ephemeral storage and persistent volume.</para>
|
||||
|
||||
<simplesect>
|
||||
<title>Ephemeral storage</title>
|
||||
<para>An ephemeral storage includes a root ephemeral volume
|
||||
and an additional ephemeral volume.</para>
|
||||
|
||||
<para>The root disk is associated with an instance,
|
||||
and exists only for the life of this very
|
||||
instance. Generally, it is used
|
||||
to store an instance`s root file system, persists across
|
||||
the guest operating system reboots, and is removed
|
||||
on an instance deletion. The amount of the root ephemeral
|
||||
volume is defined by the flavor of an instance.</para>
|
||||
|
||||
<para>In addition to the ephemeral root volume, all default types of flavors,
|
||||
except <literal>m1.tiny</literal>, which is
|
||||
the smallest one, provide an additional ephemeral block
|
||||
device sized between 20 and 160 GB (a configurable
|
||||
value to suit an environment).
|
||||
It is represented as a raw block device with
|
||||
no partition table or file system. A cloud-aware operating
|
||||
system can discover, format, and mount such a
|
||||
storage device. OpenStack Compute defines the default file system for
|
||||
different operating systems as Ext4 for Linux distributions,
|
||||
VFAT for non-Linux and non-Windows operating systems, and
|
||||
NTFS for Windows. However, it is possible to specify
|
||||
any other filesystem type by using <option>virt_mkfs</option> or
|
||||
<option>default_ephemeral_format</option> configuration options.</para>
|
||||
<note>
|
||||
<para>For example, the <systemitem class="service">cloud-init</systemitem> package
|
||||
included into an Ubuntu's stock cloud image, by default,
|
||||
formats this space as an Ext4 file system and mounts
|
||||
it on <filename>/mnt</filename>.
|
||||
This is a cloud-init feature, and is not an OpenStack mechanism.
|
||||
OpenStack only provisions the raw storage.</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
|
||||
<simplesect>
|
||||
<title>Persistent volume</title>
|
||||
<para>A persistent volume is represented by a persistent virtualized block device
|
||||
independent of any particular instance, and provided by OpenStack Block Storage.</para>
|
||||
|
||||
<para>Only a single configured instance can access a persistent
|
||||
volume. Multiple instances cannot access a persistent
|
||||
volume. This type of configuration requires a traditional
|
||||
network file system to allow multiple instances
|
||||
accessing the persistent volume. It also requires a
|
||||
traditional network file system like NFS, CIFS, or a
|
||||
cluster file system such as GlusterFS. These systems can be
|
||||
built within an OpenStack cluster, or provisioned outside
|
||||
of it, but OpenStack software does not provide these
|
||||
features.</para>
|
||||
|
||||
<para>You can configure a persistent volume as bootable and use
|
||||
it to provide a persistent virtual instance similar to
|
||||
the traditional non-cloud-based virtualization system.
|
||||
It is still possible for the resulting instance to keep
|
||||
ephemeral storage, depending on the flavor selected. In this
|
||||
case, the root file system can be on the persistent volume,
|
||||
and its state is maintained, even if the instance is shut down.
|
||||
For more information about this type of configuration, see
|
||||
the <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration Reference</citetitle></link>.
|
||||
</para>
|
||||
<note>
|
||||
<para>A persistent volume does not provide concurrent access
|
||||
from multiple instances. That type of configuration
|
||||
requires a traditional network file system like NFS, or
|
||||
CIFS, or a cluster file system such as GlusterFS.
|
||||
These systems can be built within an OpenStack cluster,
|
||||
or provisioned outside of it, but OpenStack software
|
||||
does not provide these features.</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
</section>
|
||||
|
||||
<section xml:id="instance-mgmt-ec2compat">
|
||||
<title>EC2 compatibility API</title>
|
||||
<para>In addition to the native compute API, OpenStack provides
|
||||
an EC2-compatible API. This API allows EC2 legacy workflows
|
||||
built for EC2 to work with OpenStack. For more information and
|
||||
configuration options about this compatibility API, see the <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration Reference</citetitle></link>.
|
||||
</para>
|
||||
<para>Numerous third-party tools and language-specific SDKs
|
||||
can be used to interact with OpenStack clouds, using both
|
||||
native and compatibility APIs. Some of the more popular
|
||||
third-party tools are:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>Euca2ools</term>
|
||||
<listitem>
|
||||
<para>A popular open source command-line tool for
|
||||
interacting with the EC2 API. This is
|
||||
convenient for multi-cloud environments where
|
||||
EC2 is the common API, or for transitioning
|
||||
from EC2-based clouds to OpenStack. For more
|
||||
information, see the <link
|
||||
xlink:href="https://www.eucalyptus.com/docs/eucalyptus/4.1.2/index.html#shared/euca2ools_section.html"
|
||||
>euca2ools site</link>.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Hybridfox</term>
|
||||
<listitem>
|
||||
<para>A Firefox browser add-on that provides a
|
||||
graphical interface to many popular public and
|
||||
private cloud technologies, including
|
||||
OpenStack. For more information, see the <link
|
||||
xlink:href="http://code.google.com/p/hybridfox/"
|
||||
> hybridfox site</link>.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>boto</term>
|
||||
<listitem>
|
||||
<para>A Python library for interacting with Amazon
|
||||
Web Services. It can be used to access
|
||||
OpenStack through the EC2 compatibility API.
|
||||
For more information, see the <link
|
||||
xlink:href="https://github.com/boto/boto">
|
||||
boto project page on GitHub</link>.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>fog</term>
|
||||
<listitem>
|
||||
<para>A Ruby cloud services library. It provides
|
||||
methods for interacting with a large number of
|
||||
cloud and virtualization platforms, including
|
||||
OpenStack. For more information, see the <link
|
||||
xlink:href="https://rubygems.org/gems/fog"
|
||||
> fog site</link>.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>php-opencloud</term>
|
||||
<listitem>
|
||||
<para>A PHP SDK designed to work with most
|
||||
OpenStack- based cloud deployments, as well as
|
||||
Rackspace public cloud. For more information,
|
||||
see the <link
|
||||
xlink:href="http://www.php-opencloud.com">
|
||||
php-opencloud site</link>.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</section>
|
||||
<section xml:id="section_instance-building-blocks">
|
||||
<title>Building blocks</title>
|
||||
<para>In OpenStack the base operating system is usually copied
|
||||
from an image stored in the OpenStack Image service. This
|
||||
is the most common case and results in an ephemeral
|
||||
instance that starts from a known template state and loses
|
||||
all accumulated states on virtual machine deletion. It is
|
||||
also possible to put an operating system on a persistent
|
||||
volume in the OpenStack Block Storage volume system. This
|
||||
gives a more traditional persistent system that
|
||||
accumulates states which are preserved on the OpenStack
|
||||
Block Storage volume across the deletion and re-creation
|
||||
of the virtual machine. To get a list of available images
|
||||
on your system, run:
|
||||
<screen><prompt>$</prompt> <userinput>nova image-list</userinput>
|
||||
<?db-font-size 50%?><computeroutput>+--------------------------------------+-------------------------------+--------+--------------------------------------+
|
||||
| ID | Name | Status | Server |
|
||||
+--------------------------------------+-------------------------------+--------+--------------------------------------+
|
||||
| aee1d242-730f-431f-88c1-87630c0f07ba | Ubuntu 14.04 cloudimg amd64 | ACTIVE | |
|
||||
| 0b27baa1-0ca6-49a7-b3f4-48388e440245 | Ubuntu 14.10 cloudimg amd64 | ACTIVE | |
|
||||
| df8d56fc-9cea-4dfd-a8d3-28764de3cb08 | jenkins | ACTIVE | |
|
||||
+--------------------------------------+-------------------------------+--------+--------------------------------------+</computeroutput></screen>
|
||||
</para>
|
||||
<para>The displayed image attributes are:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term><literal>ID</literal></term>
|
||||
<listitem>
|
||||
<para>Automatically generated UUID of the
|
||||
image</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><literal>Name</literal></term>
|
||||
<listitem>
|
||||
<para>Free form, human-readable name for
|
||||
image</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><literal>Status</literal></term>
|
||||
<listitem>
|
||||
<para>The status of the image. Images marked
|
||||
<literal>ACTIVE</literal> are available
|
||||
for use.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><literal>Server</literal></term>
|
||||
<listitem>
|
||||
<para>For images that are created as snapshots of
|
||||
running instances, this is the UUID of the
|
||||
instance the snapshot derives from. For
|
||||
uploaded images, this field is blank.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
<para>Virtual hardware templates are called
|
||||
<literal>flavors</literal>. The default installation
|
||||
provides five flavors. By default, these are configurable
|
||||
by admin users, however that behavior can be changed by
|
||||
redefining the access controls for
|
||||
<literal>compute_extension:flavormanage</literal>
|
||||
in <filename>/etc/nova/policy.json</filename> on the
|
||||
<filename>compute-api</filename> server.</para>
|
||||
<para>For a list of flavors that are available on your
|
||||
system:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova flavor-list</userinput>
|
||||
<computeroutput>+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
|
||||
| ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
|
||||
+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
|
||||
| 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
|
||||
| 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
|
||||
| 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
|
||||
| 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
|
||||
| 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
|
||||
+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+</computeroutput></screen>
|
||||
</section>
|
||||
<section xml:id="section_compute-service-arch">
|
||||
<title>Compute service architecture</title>
|
||||
<para>These basic categories describe the service
|
||||
architecture and information about the cloud controller.</para>
|
||||
<simplesect>
|
||||
<title>API server</title>
|
||||
<para>At the heart of the cloud framework is an API server,
|
||||
which makes command and control of the hypervisor, storage,
|
||||
and networking programmatically available to users.</para>
|
||||
<para>The API endpoints are basic HTTP web services
|
||||
which handle authentication, authorization, and
|
||||
basic command and control functions using various
|
||||
API interfaces under the Amazon, Rackspace, and
|
||||
related models. This enables API compatibility
|
||||
with multiple existing tool sets created for
|
||||
interaction with offerings from other vendors.
|
||||
This broad compatibility prevents vendor
|
||||
lock-in.</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Message queue</title>
|
||||
<para>A messaging queue brokers the interaction
|
||||
between compute nodes (processing), the networking
|
||||
controllers (software which controls network
|
||||
infrastructure), API endpoints, the scheduler
|
||||
(determines which physical hardware to allocate to
|
||||
a virtual resource), and similar components.
|
||||
Communication to and from the cloud controller is handled
|
||||
by HTTP requests through multiple API
|
||||
endpoints.</para>
|
||||
<para>A typical message passing event begins with the API
|
||||
server receiving a request from a user. The API server
|
||||
authenticates the user and ensures that they are permitted
|
||||
to issue the subject command. The availability of objects
|
||||
implicated in the request is evaluated and, if available,
|
||||
the request is routed to the queuing engine for the
|
||||
relevant workers. Workers continually listen to the queue
|
||||
based on their role, and occasionally their type host name.
|
||||
When an applicable work request arrives on the queue, the
|
||||
worker takes assignment of the task and begins executing it.
|
||||
Upon completion, a response is dispatched to the queue
|
||||
which is received by the API server and relayed to the
|
||||
originating user. Database entries are queried, added, or
|
||||
removed as necessary during the process.</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Compute worker</title>
|
||||
<para>Compute workers manage computing instances on
|
||||
host machines. The API dispatches commands to
|
||||
compute workers to complete these tasks:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Run instances</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Terminate instances</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Reboot instances</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Attach volumes</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Detach volumes</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Get console output</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Network Controller</title>
|
||||
<para>The Network Controller manages the networking
|
||||
resources on host machines. The API server
|
||||
dispatches commands through the message queue,
|
||||
which are subsequently processed by Network
|
||||
Controllers. Specific operations include:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Allocate fixed IP addresses</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Configuring VLANs for projects</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Configuring networks for compute
|
||||
nodes</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</simplesect>
|
||||
</section>
|
||||
</section>
|
||||
<xi:include href="compute/section_compute-images-instances.xml"/>
|
||||
<xi:include href="compute/section_compute-networking-nova.xml"/>
|
||||
<xi:include href="compute/section_compute-system-admin.xml"/>
|
||||
<xi:include href="../common/section_support-compute.xml"/>
|
||||
</chapter>
|
@ -1,32 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="ch_install-dashboard">
|
||||
<title>Dashboard</title>
|
||||
<para>The OpenStack dashboard is a web-based interface that allows you to manage OpenStack
|
||||
resources and services. The dashboard allows you to interact with the OpenStack Compute
|
||||
cloud controller using the OpenStack APIs. For more information about installing and
|
||||
configuring the dashboard, see the <citetitle>OpenStack Installation Guide</citetitle> for
|
||||
your operating system.</para>
|
||||
<para>Dashboard resources:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>To customize the dashboard, see <xref linkend="dashboard-custom-brand"/>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>To set up session storage for the dashboard, see <xref
|
||||
linkend="dashboard-sessions"/>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>To deploy the dashboard, see the <link
|
||||
xlink:href="http://docs.openstack.org/developer/horizon/topics/deployment.html">
|
||||
Horizon documentation</link>.</para>
|
||||
</listitem>
|
||||
<listitem xml:id="launch_instances">
|
||||
<para>To launch instances with the dashboard, see the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/index.html">
|
||||
<citetitle>OpenStack End User Guide</citetitle></link>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<xi:include href="../common/section_dashboard_customizing.xml"/>
|
||||
<xi:include href="../common/section_dashboard_sessions.xml"/>
|
||||
</chapter>
|
@ -1,379 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="ch_admin-openstack-database">
|
||||
<title>Database</title>
|
||||
<para>The Database service module provides database management features.</para>
|
||||
<section xml:id="section_database-introduction">
|
||||
<title>Introduction</title>
|
||||
<para>The Database service provides scalable and reliable cloud provisioning functionality for both relational and non-relational database engines. Users can quickly and easily use database features without the burden of handling complex administrative tasks. Cloud users and database administrators can provision and manage multiple database instances as needed.</para>
|
||||
<para>The Database service provides resource isolation at high performance levels, and automates complex administrative tasks such as deployment, configuration, patching, backups, restores, and monitoring.</para>
|
||||
</section>
|
||||
<section xml:id="section_create-datastore">
|
||||
<title>Create a datastore</title>
|
||||
<para>An administrative user can create datastores for a variety of databases.</para>
|
||||
<para>This section assumes you do not yet have a MySQL datastore, and shows you how to create a MySQL datastore and populate it with a MySQL 5.5 datastore version.</para>
|
||||
<procedure>
|
||||
<title>To create a datastore</title>
|
||||
<step>
|
||||
<title>Create a trove image</title>
|
||||
<para>Create an image for the type of database you want to use, for example, MySQL, MongoDB, Cassandra, and so on.</para>
|
||||
<para>This image must have the trove guest agent installed, and it must have the <filename>trove-guestagent.conf</filename> file configured to connect to your OpenStack environment. To configure <filename>trove-guestagent.conf</filename>, add the following lines to <filename>trove-guestagent.conf</filename> on the guest instance you are using to build your image:</para>
|
||||
<programlisting language="ini">rabbit_host = <replaceable>controller</replaceable>
|
||||
rabbit_password = <replaceable>RABBIT_PASS</replaceable>
|
||||
nova_proxy_admin_user = admin
|
||||
nova_proxy_admin_pass = <replaceable>ADMIN_PASS</replaceable>
|
||||
nova_proxy_admin_tenant_name = service
|
||||
trove_auth_url = http://<replaceable>controller</replaceable>:35357/v2.0</programlisting>
|
||||
<para>This example assumes you have created a MySQL 5.5 image called <literal>mysql-5.5.qcow2</literal>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<title>Register image with Image service</title>
|
||||
<para>You need to register your guest image with the Image service.</para>
|
||||
<para>In this example, you use the glance <command>image-create</command> command to register a <literal>mysql-5.5.qcow2</literal> image.</para>
|
||||
<screen>$<prompt></prompt> <userinput>glance image-create --name mysql-5.5 --disk-format qcow2 --container-format bare --is-public True < mysql-5.5.qcow2</userinput>
|
||||
<computeroutput>+------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+------------------+--------------------------------------+
|
||||
| checksum | d41d8cd98f00b204e9800998ecf8427e |
|
||||
| container_format | bare |
|
||||
| created_at | 2014-05-23T21:01:18 |
|
||||
| deleted | False |
|
||||
| deleted_at | None |
|
||||
| disk_format | qcow2 |
|
||||
| id | bb75f870-0c33-4907-8467-1367f8cb15b6 |
|
||||
| is_public | True |
|
||||
| min_disk | 0 |
|
||||
| min_ram | 0 |
|
||||
| name | mysql-5.5 |
|
||||
| owner | 1448da1223124bb291f5ae8e9af4270d |
|
||||
| protected | False |
|
||||
| size | 0 |
|
||||
| status | active |
|
||||
| updated_at | 2014-05-23T21:01:22 |
|
||||
| virtual_size | None |
|
||||
+------------------+--------------------------------------+</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<title>Create the datastore</title>
|
||||
<para>Create the datastore that will house the new image. To do this, use the trove-manage <command>datastore_update</command> command.</para>
|
||||
<para>This example uses the following arguments:</para>
|
||||
<informaltable rules="all" width="80%">
|
||||
<col width="20%"/>
|
||||
<col width="40%"/>
|
||||
<col width="40%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Argument</th>
|
||||
<th>Description</th>
|
||||
<th>In this example:</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
<para>config file</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>The configuration file to use.</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>--config-file=/etc/trove/trove.conf</option></para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>name</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>Name you want to use for this datastore.</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>mysql</option></para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>default version</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>You can attach multiple versions/images to a datastore. For example, you might have a MySQL 5.5 version and a MySQL 5.6 version. You can designate one version as the default, which the system uses if a user does not explicitly request a specific version.</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>""</option></para>
|
||||
<para>At this point, you do not yet have a default version, so pass in an empty string.</para>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</informaltable>
|
||||
<para>Example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>trove-manage --config-file=/etc/trove/trove.conf datastore_update mysql ""</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<title>Add a version to the new datastore</title>
|
||||
<para>Now that you have a <literal>mysql</literal> datastore, you can add a version to it, using the trove-manage <command>datastore_version_update</command> command. The version indicates which guest image to use.</para>
|
||||
<para>This example uses the following arguments:</para>
|
||||
<informaltable rules="all" width="80%">
|
||||
<col width="20%"/>
|
||||
<col width="40%"/>
|
||||
<col width="40%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Argument</th>
|
||||
<th>Description</th>
|
||||
<th>In this example:</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
<para>config file</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>The configuration file to use.</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>--config-file=/etc/trove/trove.conf</option></para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>datastore</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>The name of the datastore you just created via trove-manage <command>datastore_update</command>.</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>mysql</option></para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>version name</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>The name of the version you are adding to the datastore.</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>mysql-5.5</option></para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>datastore manager</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>Which datastore manager to use for this version. Typically, the datastore manager is identified by one of the following strings, depending on the database:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>mysql</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>redis</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>mongodb</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>cassandra</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>couchbase</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>percona</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>mysql</option></para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>glance ID</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>The ID of the guest image you just added to the Identity Service. You can get this ID by using the glance <command>image-show</command> <replaceable>IMAGE_NAME</replaceable> command.</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>bb75f870-0c33-4907-8467-1367f8cb15b6</option></para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>packages</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>If you want to put additional packages on each guest that you create with this datastore version, you can list the package names here.</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>""</option></para>
|
||||
<para>In this example, the guest image already contains all the required packages, so leave this argument empty.</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>active</para>
|
||||
</td>
|
||||
<td>
|
||||
<para>Set this to either 1 or 0:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>1</literal> = active</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>0</literal> = disabled</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</td>
|
||||
<td>
|
||||
<para><option>1</option></para>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</informaltable>
|
||||
<para>Example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>trove-manage --config-file=/etc/trove/trove.conf datastore_version_update \
|
||||
mysql mysql-5.5 mysql <replaceable>GLANCE_ID</replaceable> "" 1</userinput></screen>
|
||||
<formalpara>
|
||||
<title>Optional.</title>
|
||||
<para>Set your new version as the default version. To do this, use the trove-manage <command>datastore_update</command> command again, this time specifying the version you just created.</para>
|
||||
</formalpara>
|
||||
<screen><prompt>$</prompt> <userinput>trove-manage --config-file=/etc/trove/trove.conf datastore_update mysql "mysql-5.5"</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<title>Load validation rules for configuration groups</title>
|
||||
<note>
|
||||
<title>Applies only to MySQL and Percona datastores</title>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>If you just created a MySQL or Percona datastore, then you need to load the appropriate validation rules, as described in this step.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>If you just created a different datastore, skip this step.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
<formalpara>
|
||||
<title>Background.</title>
|
||||
<para>You can manage database configuration tasks by using configuration groups. Configuration groups let you set configuration parameters, in bulk, on one or more databases.</para>
|
||||
</formalpara>
|
||||
<para>When you set up a configuration group using the trove <command>configuration-create</command> command, this command compares the configuration values you are setting against a list of valid configuration values that are stored in the <filename>validation-rules.json</filename> file.</para>
|
||||
<informaltable rules="all" width="80%">
|
||||
<col width="20%"/>
|
||||
<col width="40%"/>
|
||||
<col width="40%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Operating System</th>
|
||||
<th>Location of <filename>validation-rules.json</filename></th>
|
||||
<th>Notes</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
<para>Ubuntu 14.04</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><filename>/usr/lib/python2.7/dist-packages/trove/templates/<replaceable>DATASTORE_NAME</replaceable></filename></para>
|
||||
</td>
|
||||
<td>
|
||||
<para><replaceable>DATASTORE_NAME</replaceable> is the name of either the MySQL datastore or the Percona datastore. This is typically either <literal>mysql</literal> or <literal>percona</literal>.</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para>RHEL 7, CentOS 7, Fedora 20, and Fedora 21</para>
|
||||
</td>
|
||||
<td>
|
||||
<para><filename>/usr/lib/python2.7/site-packages/trove/templates/<replaceable>DATASTORE_NAME</replaceable></filename></para>
|
||||
</td>
|
||||
<td>
|
||||
<para><replaceable>DATASTORE_NAME</replaceable> is the name of either the MySQL datastore or the Percona datastore. This is typically either <literal>mysql</literal> or <literal>percona</literal>.</para>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</informaltable>
|
||||
<para>Therefore, as part of creating a datastore, you need to load the <filename>validation-rules.json</filename> file, using the trove-manage <command>db_load_datastore_config_parameters</command> command. This command takes the following arguments:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Datastore name</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Datastore version</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Full path to the <filename>validation-rules.json</filename> file</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>This example loads the <filename>validation-rules.json</filename> file for a MySQL database on Ubuntu 14.04:</para>
|
||||
<screen><prompt>$</prompt> <userinput>trove-manage db_load_datastore_config_parameters mysql "mysql-5.5" \
|
||||
/usr/lib/python2.7/dist-packages/trove/templates/mysql/validation-rules.json</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<title>Validate datastore</title>
|
||||
<para>To validate your new datastore and version, start by listing the datastores on your system:</para>
|
||||
<screen>$<prompt></prompt> <userinput>trove datastore-list</userinput>
|
||||
<computeroutput>+--------------------------------------+--------------+
|
||||
| id | name |
|
||||
+--------------------------------------+--------------+
|
||||
| 10000000-0000-0000-0000-000000000001 | Legacy MySQL |
|
||||
| e5dc1da3-f080-4589-a4c2-eff7928f969a | mysql |
|
||||
+--------------------------------------+--------------+</computeroutput></screen>
|
||||
<para>Take the ID of the <literal>mysql</literal> datastore and pass it in with the <command>datastore-version-list</command> command:</para>
|
||||
<screen>$<prompt></prompt> <userinput>trove datastore-version-list DATASTORE_ID</userinput>
|
||||
<computeroutput>+--------------------------------------+-----------+
|
||||
| id | name |
|
||||
+--------------------------------------+-----------+
|
||||
| 36a6306b-efd8-4d83-9b75-8b30dd756381 | mysql-5.5 |
|
||||
+--------------------------------------+-----------+</computeroutput></screen></step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="section_configure-cluster">
|
||||
<title>Configure a cluster</title>
|
||||
<para>An administrative user can configure various characteristics of a MongoDB cluster.</para>
|
||||
<simplesect>
|
||||
<title>Query routers and config servers</title>
|
||||
<formalpara>
|
||||
<title>Background.</title>
|
||||
<para>Each cluster includes at least one query router and one config server. Query routers and config servers count against your quota. When you delete a cluster, the system deletes the associated query router(s) and config server(s).</para>
|
||||
</formalpara>
|
||||
<formalpara>
|
||||
<title>Configuration.</title>
|
||||
<para>By default, the system creates one query router and one config server per cluster. You can change this by editing the <filename>/etc/trove/trove.conf</filename> file. These settings are in the <literal>[mongodb]</literal> section of the file:</para>
|
||||
</formalpara>
|
||||
<informaltable rules="all" width="60%">
|
||||
<col width="50%"/>
|
||||
<col width="50%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Setting</th>
|
||||
<th>Valid values are:</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
<para><option>num_config_servers_per_cluster</option></para>
|
||||
</td>
|
||||
<td>
|
||||
<para>1 or 3</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<para><option>num_query_routers_per_cluster</option></para>
|
||||
</td>
|
||||
<td>
|
||||
<para>1 or 3</para>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</informaltable>
|
||||
</simplesect>
|
||||
</section>
|
||||
</chapter>
|
@ -1,258 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ch-identity-mgmt-config">
|
||||
<title>Identity management</title>
|
||||
<para>OpenStack Identity, code-named keystone, is the
|
||||
default identity management system for OpenStack. After you
|
||||
install Identity, you configure it through the
|
||||
<filename>etc/keystone.conf</filename> configuration file and,
|
||||
possibly, a separate logging configuration file. You initialize
|
||||
data into Identity by using the
|
||||
<command>keystone</command> command-line client.</para>
|
||||
<section xml:id="keystone-admin-concepts">
|
||||
<title>Identity concepts</title>
|
||||
<xi:include
|
||||
href="../common/section_keystone-concepts-user-management.xml"/>
|
||||
<xi:include
|
||||
href="../common/section_keystone-concepts-service-management.xml"/>
|
||||
<xi:include
|
||||
href="../common/section_keystone-concepts-group-management.xml"/>
|
||||
</section>
|
||||
<xi:include href="../common/section_keystone_certificates-for-pki.xml"/>
|
||||
<xi:include href="../common/section_keystone-ssl-config.xml"/>
|
||||
<xi:include href="../common/section_keystone-external-auth.xml"/>
|
||||
<xi:include href="../common/section_keystone_config_ldap.xml"/>
|
||||
<xi:include href="identity/section_keystone-token-binding.xml"/>
|
||||
<xi:include href="identity/section_keystone-trusts.xml"/>
|
||||
<xi:include href="identity/section_caching-layer.xml"/>
|
||||
<section xml:id="user-crud">
|
||||
<title>User CRUD</title>
|
||||
<para>Identity provides a user CRUD (Create, Read, Update, and Delete)
|
||||
filter that can be added to the <literal>public_api</literal> pipeline.
|
||||
The user CRUD filter enables users to use a HTTP PATCH to change their
|
||||
own password. To enable this extension you should define a
|
||||
<literal>user_crud_extension</literal> filter, insert it after
|
||||
the <literal>*_body</literal> middleware and before the
|
||||
<literal>public_service</literal> application in the
|
||||
<literal>public_api</literal> WSGI pipeline in
|
||||
<filename>keystone-paste.ini</filename>. For example:</para>
|
||||
<programlisting language="ini"><?db-font-size 75%?>[filter:user_crud_extension]
|
||||
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
|
||||
|
||||
[pipeline:public_api]
|
||||
pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension user_crud_extension public_service</programlisting>
|
||||
<para>Each user can then change their own password with a HTTP PATCH:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl -X PATCH http://localhost:5000/v2.0/OS-KSCRUD/users/<replaceable>USERID</replaceable> -H "Content-type: application/json" \
|
||||
-H "X_Auth_Token: <replaceable>AUTHTOKENID</replaceable>" -d '{"user": {"password": "ABCD", "original_password": "DCBA"}}'</userinput></screen>
|
||||
<para>In addition to changing their password, all current
|
||||
tokens for the user are invalidated.</para>
|
||||
<note><para>Only use a KVS back end for tokens when testing.</para></note>
|
||||
</section>
|
||||
<section xml:id="keystone-logging">
|
||||
<title>Logging</title>
|
||||
<para>You configure logging externally to the rest of Identity. The name of
|
||||
the file specifying the logging configuration is set using the
|
||||
<literal>log_config</literal> option in the <literal>[DEFAULT]</literal>
|
||||
section of the <filename>keystone.conf</filename> file. To route logging
|
||||
through syslog, set <literal>use_syslog=true</literal> in the
|
||||
<literal>[DEFAULT]</literal> section.</para>
|
||||
<para>A sample logging configuration file is available with the project in
|
||||
<filename>etc/logging.conf.sample</filename>. Like
|
||||
other OpenStack projects, Identity uses the Python
|
||||
logging module, which provides extensive configuration options
|
||||
that let you define the output levels and formats.</para>
|
||||
</section>
|
||||
<section xml:id="running-keystone">
|
||||
<title>Start the Identity services</title>
|
||||
<para>To start the services for Identity, run the
|
||||
following command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>keystone-all</userinput></screen>
|
||||
<para>This command starts two wsgi.Server instances configured by
|
||||
the <filename>keystone.conf</filename> file as described
|
||||
previously. One of these wsgi servers is
|
||||
<literal>admin</literal> (the administration API) and the
|
||||
other is <literal>main</literal> (the primary/public API
|
||||
interface). Both run in a single process.</para>
|
||||
</section>
|
||||
<section xml:id="example-usage">
|
||||
<title>Example usage</title>
|
||||
<para>The <literal>keystone</literal> client is set up to expect
|
||||
commands in the general form of <literal>keystone</literal>
|
||||
<literal>command</literal>
|
||||
<literal>argument</literal>, followed by flag-like keyword
|
||||
arguments to provide additional (often optional) information.
|
||||
For example, the command <literal>user-list</literal> and
|
||||
<literal>tenant-create</literal> can be invoked as
|
||||
follows:</para>
|
||||
<programlisting language="bash"><?db-font-size 65%?># Using token auth env variables
|
||||
export OS_SERVICE_ENDPOINT=http://127.0.0.1:5000/v2.0/
|
||||
export OS_SERVICE_TOKEN=secrete_token
|
||||
keystone user-list
|
||||
keystone tenant-create --name demo
|
||||
|
||||
# Using token auth flags
|
||||
keystone --os-token secrete --os-endpoint http://127.0.0.1:5000/v2.0/ user-list
|
||||
keystone --os-token secrete --os-endpoint http://127.0.0.1:5000/v2.0/ tenant-create --name=demo
|
||||
|
||||
# Using user + password + project_name env variables
|
||||
export OS_USERNAME=admin
|
||||
export OS_PASSWORD=secrete
|
||||
export OS_PROJECT_NAME=admin
|
||||
openstack user list
|
||||
openstack project create demo
|
||||
|
||||
# Using user + password + project-name flags
|
||||
openstack --os-username admin --os-password secrete --os-project-name admin user list
|
||||
openstack --os-username admin --os-password secrete --os-project-name admin project create demo</programlisting>
|
||||
</section>
|
||||
<section xml:id="auth-token-middleware-with-username-and-password">
|
||||
<title>Authentication middleware with user name and
|
||||
password</title>
|
||||
<para>You can also configure Identity authentication
|
||||
middleware using the <option>admin_user</option> and
|
||||
<option>admin_password</option> options.</para>
|
||||
<note><para>The <option>admin_token</option> option is deprecated,
|
||||
and no longer used for configuring auth_token middleware.</para></note>
|
||||
<para>For services that have a separate paste-deploy .ini file,
|
||||
you can configure the authentication middleware in the
|
||||
<literal>[keystone_authtoken]</literal> section of the main
|
||||
configuration file, such as <filename>nova.conf</filename>.</para>
|
||||
<para>And set the following values in
|
||||
<filename>nova.conf</filename> as follows:</para>
|
||||
<programlisting language="ini"><?db-font-size 75%?>[DEFAULT]
|
||||
...
|
||||
auth_strategy=keystone
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_uri = http://<replaceable>controller</replaceable>:5000/v2.0
|
||||
identity_uri = http://<replaceable>controller</replaceable>:35357
|
||||
admin_user = admin
|
||||
admin_password = SuperSekretPassword
|
||||
admin_tenant_name = service</programlisting>
|
||||
<note>
|
||||
<para>The middleware parameters in the paste config take
|
||||
priority. You must remove them to use the values in the
|
||||
[keystone_authtoken] section.</para>
|
||||
</note>
|
||||
<note>
|
||||
<para>Comment out any <literal>auth_host</literal>,
|
||||
<literal>auth_port</literal>, and
|
||||
<literal>auth_protocol</literal> options because the
|
||||
<literal>identity_uri</literal> option replaces them.</para>
|
||||
</note>
|
||||
<para>This sample paste config filter makes use of the
|
||||
<option>admin_user</option> and
|
||||
<option>admin_password</option> options:</para>
|
||||
<programlisting language="ini"><?db-font-size 75%?>[filter:authtoken]
|
||||
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||
auth_uri = http://<replaceable>controller</replaceable>:5000/v2.0
|
||||
identity_uri = http://<replaceable>controller</replaceable>:35357
|
||||
auth_token = 012345SECRET99TOKEN012345
|
||||
admin_user = admin
|
||||
admin_password = keystone123</programlisting>
|
||||
<note>
|
||||
<para>Using this option requires an admin tenant/role
|
||||
relationship. The admin user is granted access to the admin
|
||||
role on the admin tenant.</para>
|
||||
</note>
|
||||
<note>
|
||||
<para>Comment out any <literal>auth_host</literal>,
|
||||
<literal>auth_port</literal>, and
|
||||
<literal>auth_protocol</literal> options because the
|
||||
<literal>identity_uri</literal> option replaces them.</para>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="identity-service-api-protection-with-role-based-access-control">
|
||||
<title>Identity API protection with role-based access control (RBAC)
|
||||
</title>
|
||||
<para>Like most OpenStack projects, Identity supports the protection of
|
||||
its APIs by defining policy rules based on an RBAC approach. Identity
|
||||
stores a reference to a policy JSON file in the main Identity
|
||||
configuration file, <filename>keystone.conf</filename>. Typically this
|
||||
file is named <filename>policy.json</filename>, and it contains the rules
|
||||
for which roles have access to certain actions in defined services.</para>
|
||||
<para>Each Identity API v3 call has a line in the policy file that dictates which
|
||||
level of governance of access applies.</para>
|
||||
<programlisting language="ini"><replaceable>API_NAME</replaceable>: <replaceable>RULE_STATEMENT</replaceable> or <replaceable>MATCH_STATEMENT</replaceable></programlisting>
|
||||
<para>Where:</para>
|
||||
<para><code><replaceable>RULE_STATEMENT</replaceable></code> can contain <code><replaceable>RULE_STATEMENT</replaceable></code> or <code><replaceable>MATCH_STATEMENT</replaceable></code>.</para>
|
||||
<para><code><replaceable>MATCH_STATEMENT</replaceable></code> is a set of identifiers that must match between the token
|
||||
provided by the caller of the API and the parameters or target entities of
|
||||
the API call in question. For example:</para>
|
||||
<programlisting language="ini">"identity:create_user": [["role:admin", "domain_id:%(user.domain_id)s"]]</programlisting>
|
||||
<para>Indicates that to create a user, you must have the admin role in your token and
|
||||
the <literal>domain_id</literal> in your token (which implies this must be a domain-scoped token)
|
||||
must match the <literal>domain_id</literal> in the user object that you are trying to
|
||||
create. In other words, you must have the admin role on the domain in which
|
||||
you are creating the user, and the token that you use must be scoped to that
|
||||
domain.</para>
|
||||
<para>Each component of a match statement uses this format:</para>
|
||||
<programlisting language="ini"><replaceable>ATTRIB_FROM_TOKEN</replaceable>:<replaceable>CONSTANT</replaceable> or <replaceable>ATTRIB_RELATED_TO_API_CALL</replaceable></programlisting>
|
||||
<para>The Identity service expects these attributes:</para>
|
||||
<para>Attributes from token: <literal>user_id</literal>, the <literal>domain_id</literal> or <literal>project_id</literal> depending on
|
||||
the scope, and the list of roles you have within that scope.</para>
|
||||
<para>Attributes related to API call: Any parameters passed into the API call
|
||||
are available, along with any filters specified in the query string. You
|
||||
reference attributes of objects passed with an object.attribute syntax
|
||||
(such as, <literal>user.domain_id</literal>). The target objects of an API are
|
||||
also available using a target.object.attribute syntax. For
|
||||
instance:</para>
|
||||
<programlisting language="ini">"identity:delete_user": [["role:admin", "domain_id:%(target.user.domain_id)s"]]</programlisting>
|
||||
<para>would ensure that Identity only deletes the user object in the same
|
||||
domain as the provided token.</para>
|
||||
<para>Every target object has an `id` and a `name` available as
|
||||
`target.<replaceable>OBJECT</replaceable>.id` and `target.<replaceable>OBJECT</replaceable>.name`. Identity
|
||||
retrieves other attributes from the database, and the attributes vary
|
||||
between object types. The Identity service filters out some database
|
||||
fields, such as user passwords.</para>
|
||||
<para>List of object attributes:</para>
|
||||
<programlisting language="ini">role:
|
||||
target.role.id
|
||||
target.role.name
|
||||
|
||||
user:
|
||||
target.user.default_project_id
|
||||
target.user.description
|
||||
target.user.domain_id
|
||||
target.user.enabled
|
||||
target.user.id
|
||||
target.user.name
|
||||
|
||||
group:
|
||||
target.group.description
|
||||
target.group.domain_id
|
||||
target.group.id
|
||||
target.group.name
|
||||
|
||||
domain:
|
||||
target.domain.enabled
|
||||
target.domain.id
|
||||
target.domain.name
|
||||
|
||||
project:
|
||||
target.project.description
|
||||
target.project.domain_id
|
||||
target.project.enabled
|
||||
target.project.id
|
||||
target.project.name</programlisting>
|
||||
<para>The default <filename>policy.json</filename> file supplied provides a
|
||||
somewhat basic example of API protection, and does not assume any
|
||||
particular use of domains. Refer to
|
||||
<filename>policy.v3cloudsample.json</filename> as an example of
|
||||
multi-domain configuration installations where a cloud provider wants to
|
||||
delegate administration of the contents of a domain to a particular admin
|
||||
domain. This example policy file also shows the use of an admin_domain to
|
||||
allow a cloud provider to enable cloud administrators to have wider access
|
||||
across the APIs.</para>
|
||||
<para>A clean installation could start with the standard policy file, to allow
|
||||
creation of the admin_domain with the first users within it. You could
|
||||
then obtain the domain_id of the admin domain, paste the ID into a
|
||||
modified version of <filename>policy.v3cloudsample.json</filename>, and
|
||||
then enable it as the main policy file.</para>
|
||||
</section>
|
||||
<?hard-pagebreak?>
|
||||
<xi:include href="../common/section_identity-troubleshooting.xml"/>
|
||||
</chapter>
|
@ -1,22 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ch_networking">
|
||||
<title>Networking</title>
|
||||
<para>Learn OpenStack Networking concepts, architecture, and basic and advanced
|
||||
<command>neutron</command> and <command>nova</command> command-line interface (CLI)
|
||||
commands.</para>
|
||||
<xi:include href="networking/section_networking_introduction.xml"/>
|
||||
<xi:include href="networking/section_networking_config-plugins.xml"/>
|
||||
<xi:include href="networking/section_networking_config-agents.xml"/>
|
||||
<xi:include href="networking/section_networking_arch.xml"/>
|
||||
<xi:include href="networking/section_networking-config-identity.xml"/>
|
||||
<xi:include href="networking/section_networking-adv-config.xml"/>
|
||||
<xi:include href="networking/section_networking-multi-dhcp-agents.xml"/>
|
||||
<xi:include href="networking/section_networking-use.xml"/>
|
||||
<xi:include href="networking/section_networking_adv_features.xml"/>
|
||||
<xi:include href="networking/section_networking_adv_operational_features.xml"/>
|
||||
<xi:include href="networking/section_networking_auth.xml"/>
|
||||
</chapter>
|
@ -1,20 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ch_admin-openstack-object-storage">
|
||||
<title>Object Storage</title>
|
||||
<xi:include href="../common/section_objectstorage-intro.xml"/>
|
||||
<xi:include href="../common/section_objectstorage-features.xml"/>
|
||||
<xi:include href="../common/section_objectstorage-characteristics.xml"/>
|
||||
<xi:include href="../common/section_objectstorage-components.xml"/>
|
||||
<xi:include href="../common/section_objectstorage-ringbuilder.xml"/>
|
||||
<xi:include href="../common/section_objectstorage-arch.xml"/>
|
||||
<xi:include href="../common/section_objectstorage-replication.xml"/>
|
||||
<xi:include href="../common/section_objectstorage-account-reaper.xml"/>
|
||||
<xi:include href="../common/section_objectstorage_tenant-specific-image-storage.xml"/>
|
||||
<xi:include href="section_object-storage-monitoring.xml"/>
|
||||
<xi:include href="section_object-storage-admin.xml"/>
|
||||
<xi:include href="../common/section_objectstorage-troubleshoot.xml"/>
|
||||
</chapter>
|
@ -1,45 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="ch_admin-openstack-orchestration">
|
||||
<title>Orchestration</title>
|
||||
<para>Orchestration is an orchestration engine that provides the possibility
|
||||
to launch multiple composite cloud applications based on templates in
|
||||
the form of text files that can be treated like code. A native
|
||||
Heat Orchestration Template (HOT) format is evolving, but it also
|
||||
endeavors to provide compatibility with the AWS CloudFormation template
|
||||
format, so that many existing CloudFormation templates can be launched
|
||||
on OpenStack.</para>
|
||||
<section xml:id="section_orchestration-introduction">
|
||||
<title>Introduction</title>
|
||||
<para>Orchestration is a tool for orchestrating clouds that
|
||||
automatically configures and deploys resources in stacks.
|
||||
Such deployments can be simple — like deploying WordPress on Ubuntu
|
||||
with a SQL back end. And they can be quite complex, like launching
|
||||
a group of servers that autoscale: starting and stopping based on
|
||||
realtime CPU loading information from the Telemetry module.</para>
|
||||
<para>Orchestration stacks are defined with templates, which are
|
||||
non-procedural documents describing tasks in terms of resources,
|
||||
parameters, inputs, constraints and dependencies. When
|
||||
Orchestration module was originally introduced, it worked with
|
||||
AWS CloudFormation templates, which are in JSON format.</para>
|
||||
<para>Now, Orchestration also executes HOT (Heat Orchestration
|
||||
Template) templates, written in YAML: a terse notation that loosely
|
||||
follows Python/Ruby-type structural conventions (colons, returns,
|
||||
indentation) so it’s more easily to write, parse, grep, generate with
|
||||
tools, and maintain with source-code management systems.</para>
|
||||
<para>Orchestration can be accessed via the CLI, and using RESTful queries.
|
||||
Orchestration module provides both an OpenStack-native REST API
|
||||
and a CloudFormation-compatible Query API. Orchestration is also
|
||||
integrated with OpenStack dashboard in order to launching stacks from
|
||||
templates through web-interface.</para>
|
||||
<para>For more details how to use Orchestration module command-line see
|
||||
<link xlink:href="http://docs.openstack.org/cli-reference/content/heatclient_commands.html">
|
||||
OpenStack Command line interface reference</link>
|
||||
</para>
|
||||
</section>
|
||||
<xi:include href="orchestration/section_orchestration-auth-model.xml"/>
|
||||
<xi:include href="orchestration/section_orchestration-stack-domain-users.xml"/>
|
||||
</chapter>
|
@ -1,55 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="ch_admin-openstack-telemetry">
|
||||
<title>Telemetry</title>
|
||||
<para>The Telemetry module is the metering service in OpenStack.</para>
|
||||
<section xml:id="section_telemetry-introduction">
|
||||
<title>Introduction</title>
|
||||
<para>Even in the cloud industry, providers must use a multi-step
|
||||
process for billing. The required steps to bill for usage in a
|
||||
cloud environment are metering, rating, and billing. Because the
|
||||
provider's requirements may be far too specific for a shared
|
||||
solution, rating and billing solutions cannot be designed in a
|
||||
common module that satisfies all. Providing users with measurements
|
||||
on cloud services is required to meet the "measured service"
|
||||
definition of cloud computing.</para>
|
||||
<para>The Telemetry module was originally designed to support billing
|
||||
systems for OpenStack cloud resources. This project only covers the
|
||||
metering portion of the required processing for billing. This module
|
||||
collects information about the system and stores it in the form of
|
||||
samples in order to provide data about anything that can be billed.
|
||||
</para>
|
||||
<para>In addition to system measurements, the Telemetry module also
|
||||
captures event notifications triggered when various actions are
|
||||
executed in the OpenStack system. This data is captured as Events
|
||||
and stored alongside metering data.</para>
|
||||
<para>The list of meters is continuously growing, which makes it
|
||||
possible to use the data collected by Telemetry for different
|
||||
purposes, other than billing. For example, the autoscaling feature
|
||||
in the Orchestration module can be triggered by alarms this module
|
||||
sets and then gets notified within Telemetry.</para>
|
||||
<para>The sections in this document contain information about the
|
||||
architecture and usage of Telemetry. The first section contains a
|
||||
brief summary about the system architecture used in a typical
|
||||
OpenStack deployment. The second section describes the data collection
|
||||
mechanisms. You can also read about alarming to understand how alarm
|
||||
definitions can be posted to Telemetry and what actions can happen if
|
||||
an alarm is raised. The last section contains a troubleshooting
|
||||
guide, which mentions error situations and possible solutions for the
|
||||
problems.</para>
|
||||
<para>You can retrieve the collected samples three different ways: with
|
||||
the REST API, with the command line interface, or with the Metering tab
|
||||
on an OpenStack dashboard.</para>
|
||||
</section>
|
||||
<xi:include href="telemetry/section_telemetry-system-architecture.xml"/>
|
||||
<xi:include href="telemetry/section_telemetry-data-collection.xml"/>
|
||||
<xi:include href="telemetry/section_telemetry-data-retrieval.xml"/>
|
||||
<xi:include href="telemetry/section_telemetry-alarms.xml"/>
|
||||
<xi:include href="telemetry/section_telemetry-measurements.xml"/>
|
||||
<xi:include href="telemetry/section_telemetry-events.xml"/>
|
||||
<xi:include href="telemetry/section_telemetry-troubleshooting-guide.xml"/>
|
||||
<xi:include href="telemetry/section_telemetry-best-practices.xml"/>
|
||||
</chapter>
|
@ -1,71 +0,0 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="section_configuring-compute-to-use-ipv6-addresses">
|
||||
<title>Configure Compute to use IPv6 addresses</title>
|
||||
<para>If you are using OpenStack Compute with
|
||||
<systemitem>nova-network</systemitem>, you can put Compute into dual-stack
|
||||
mode, so that it uses both IPv4 and IPv6 addresses for communication. In
|
||||
dual-stack mode, instances can acquire their IPv6 global unicast address
|
||||
by using a stateless address auto-configuration mechanism [RFC 4862/2462].
|
||||
IPv4/IPv6 dual-stack mode works with both <literal>VlanManager</literal>
|
||||
and <literal>FlatDHCPManager</literal> networking modes.
|
||||
</para>
|
||||
<para>In <literal>VlanManager</literal> networking mode, each project uses a
|
||||
different 64-bit global routing prefix. In
|
||||
<literal>FlatDHCPManager</literal> mode, all instances use one 64-bit
|
||||
global routing prefix.
|
||||
</para>
|
||||
<para>This configuration was tested with virtual machine images that have an
|
||||
IPv6 stateless address auto-configuration capability. This capability is
|
||||
required for any VM to run with an IPv6 address. You must use an EUI-64
|
||||
address for stateless address auto-configuration. Each node that executes
|
||||
a <literal>nova-*</literal> service must have
|
||||
<literal>python-netaddr</literal> and <literal>radvd</literal> installed.
|
||||
</para>
|
||||
<procedure>
|
||||
<title>Switch into IPv4/IPv6 dual-stack mode</title>
|
||||
<step>
|
||||
<para>For every node running a <literal>nova-*</literal> service,
|
||||
install <systemitem>python-netaddr</systemitem>:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install python-netaddr</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>For every node running <literal>nova-network</literal>, install
|
||||
<literal>radvd</literal> and configure IPv6 networking:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install radvd</userinput>
|
||||
<prompt>#</prompt> <userinput>echo 1 > /proc/sys/net/ipv6/conf/all/forwarding</userinput>
|
||||
<prompt>#</prompt> <userinput>echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>On all nodes, edit the <filename>nova.conf</filename> file and
|
||||
specify <literal>use_ipv6 = True</literal>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart all <literal>nova-*</literal> services.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<note>
|
||||
<para>You can add a fixed range for IPv6 addresses to the <command>nova network-create</command>
|
||||
command. Specify <option>public</option> or <option>private</option> after the
|
||||
<parameter>network-create</parameter> parameter.
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create public --fixed-range-v4 <replaceable>FIXED_RANGE_V4</replaceable> --vlan <replaceable>VLAN_ID</replaceable> --vpn <replaceable>VPN_START</replaceable> --fixed-range-v6 <replaceable>FIXED_RANGE_V6</replaceable></userinput></screen>
|
||||
<para>You can set the IPv6 global routing prefix by using the
|
||||
<parameter>--fixed_range_v6</parameter> parameter. The default value for
|
||||
the parameter is <literal>fd00::/48</literal>.
|
||||
</para>
|
||||
<para>When you use <literal>FlatDHCPManager</literal>, the command
|
||||
uses the original <parameter>--fixed_range_v6</parameter> value. For
|
||||
example:
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create public --fixed-range-v4 10.0.2.0/24 --fixed-range-v6 fd00:1::/48</userinput></screen>
|
||||
<para>When you use <literal>VlanManager</literal>, the command increments
|
||||
the subnet ID to create subnet prefixes. Guest VMs use this prefix to
|
||||
generate their IPv6 global unicast address. For example:
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create public --fixed-range-v4 10.0.1.0/24 --vlan 100 --vpn 1000 --fixed-range-v6 fd00:1::/48</userinput></screen>
|
||||
</note>
|
||||
<xi:include href="../../common/tables/nova-ipv6.xml"/>
|
||||
</section>
|
@ -1,441 +0,0 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="section_configuring-compute-migrations">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Configure migrations</title>
|
||||
<note>
|
||||
<para>Only cloud administrators can perform live migrations. If your cloud
|
||||
is configured to use cells, you can perform live migration within but
|
||||
not between cells.
|
||||
</para>
|
||||
</note>
|
||||
<para>Migration enables an administrator to move a virtual-machine instance
|
||||
from one compute host to another. This feature is useful when a compute
|
||||
host requires maintenance. Migration can also be useful to redistribute
|
||||
the load when many VM instances are running on a specific physical machine.
|
||||
</para>
|
||||
<para>The migration types are:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Non-live migration</emphasis> (sometimes
|
||||
referred to simply as 'migration'). The instance is shut down for a
|
||||
period of time to be moved to another hypervisor. In this case, the
|
||||
instance recognizes that it was rebooted.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Live migration</emphasis> (or 'true live
|
||||
migration'). Almost no instance downtime. Useful when the instances
|
||||
must be kept running during the migration. The different types of live
|
||||
migration are:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Shared storage-based live migration</emphasis>.
|
||||
Both hypervisors have access to shared storage.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Block live migration</emphasis>. No
|
||||
shared storage is required. Incompatible with read-only devices
|
||||
such as CD-ROMs and <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/cli_config_drive.html"
|
||||
>Configuration Drive (config_drive)</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Volume-backed live migration</emphasis>.
|
||||
Instances are backed by volumes rather than ephemeral disk, no
|
||||
shared storage is required, and migration is supported (currently
|
||||
only available for libvirt-based hypervisors).
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>The following sections describe how to configure your hosts and
|
||||
compute nodes for migrations by using the KVM and XenServer hypervisors.
|
||||
</para>
|
||||
<section xml:id="configuring-migrations-kvm-libvirt">
|
||||
<title>KVM-Libvirt</title>
|
||||
<section xml:id="configuring-migrations-kvm-shared-storage">
|
||||
<title>Shared storage</title>
|
||||
<itemizedlist>
|
||||
<title>Prerequisites</title>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Hypervisor:</emphasis> KVM with libvirt</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Shared storage:</emphasis>
|
||||
<filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename>
|
||||
(for example, <filename>/var/lib/nova/instances</filename>) has to
|
||||
be mounted by shared storage. This guide uses NFS but other options,
|
||||
including the <link
|
||||
xlink:href="http://gluster.org/community/documentation//index.php/OSConnect">
|
||||
OpenStack Gluster Connector</link> are available.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Instances:</emphasis> Instance can be
|
||||
migrated with iSCSI-based volumes.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Because the Compute service does not use the libvirt live
|
||||
migration functionality by default, guests are suspended before
|
||||
migration and might experience several minutes of downtime. For
|
||||
details, see <xref linkend="true-live-migration-kvm-libvirt"/>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>This guide assumes the default value for
|
||||
<option>instances_path</option> in your <filename>nova.conf</filename> file
|
||||
(<filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename>).
|
||||
If you have changed the <literal>state_path</literal> or
|
||||
<literal>instances_path</literal> variables, modify the commands
|
||||
accordingly.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>You must specify <literal>vncserver_listen=0.0.0.0</literal>
|
||||
or live migration will not work correctly.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>You must specify the <literal>instances_path</literal>
|
||||
in each node that runs nova-compute.
|
||||
The mount point for <literal>instances_path</literal> must be the same value for each node,
|
||||
or live migration will not work correctly.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
<section xml:id="section_example-compute-install">
|
||||
<title>Example Compute installation environment</title>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Prepare at least three servers. In this example, we refer to
|
||||
the servers as <literal>HostA</literal>, <literal>HostB</literal>,
|
||||
and <literal>HostC</literal>:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>HostA</literal> is the
|
||||
<firstterm baseform="cloud controller">Cloud Controller</firstterm>,
|
||||
and should run these services: <systemitem class="service">
|
||||
nova-api</systemitem>, <systemitem class="service">nova-scheduler</systemitem>,
|
||||
<literal>nova-network</literal>, <systemitem class="service">
|
||||
cinder-volume</systemitem>, and <literal>nova-objectstore</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>HostB</literal> and <literal>HostC</literal> are
|
||||
the <firstterm baseform="compute node">compute nodes</firstterm>
|
||||
that run <systemitem class="service">nova-compute</systemitem>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Ensure that <literal><replaceable>NOVA-INST-DIR</replaceable></literal>
|
||||
(set with <literal>state_path</literal> in the <filename>nova.conf</filename>
|
||||
file) is the same on all hosts.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>In this example, <literal>HostA</literal> is the NFSv4 server
|
||||
that exports <filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename>
|
||||
directory. <literal>HostB</literal> and <literal>HostC</literal>
|
||||
are NFSv4 clients that mount <literal>HostA</literal>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<procedure>
|
||||
<title>Configuring your system</title>
|
||||
<step>
|
||||
<para>Configure your DNS or <filename>/etc/hosts</filename> and
|
||||
ensure it is consistent across all hosts. Make sure that the three
|
||||
hosts can perform name resolution with each other. As a test, use
|
||||
the <command>ping</command> command to ping each host from one
|
||||
another.
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>ping HostA</userinput>
|
||||
<prompt>$</prompt> <userinput>ping HostB</userinput>
|
||||
<prompt>$</prompt> <userinput>ping HostC</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Ensure that the UID and GID of your Compute and libvirt users
|
||||
are identical between each of your servers. This ensures that the
|
||||
permissions on the NFS mount works correctly.
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Export <filename><replaceable>NOVA-INST-DIR</replaceable>/instances</filename>
|
||||
from <literal>HostA</literal>, and ensure it is readable and
|
||||
writable by the Compute user on <literal>HostB</literal> and
|
||||
<literal>HostC</literal>.
|
||||
</para>
|
||||
<para>For more information, see: <link
|
||||
xlink:href="https://help.ubuntu.com/community/SettingUpNFSHowTo"
|
||||
>SettingUpNFSHowTo</link> or <link
|
||||
xlink:href="http://www.cyberciti.biz/faq/centos-fedora-rhel-nfs-v4-configuration/"
|
||||
>CentOS/Red Hat: Setup NFS v4.0 File Server</link></para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure the NFS server at <literal>HostA</literal> by adding
|
||||
the following line to the <filename>/etc/exports</filename> file:
|
||||
</para>
|
||||
<programlisting><replaceable>NOVA-INST-DIR</replaceable>/instances HostA/255.255.0.0(rw,sync,fsid=0,no_root_squash)</programlisting>
|
||||
<para>Change the subnet mask (<literal>255.255.0.0</literal>) to the
|
||||
appropriate value to include the IP addresses of <literal>HostB</literal>
|
||||
and <literal>HostC</literal>. Then restart the NFS server:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>/etc/init.d/nfs-kernel-server restart</userinput>
|
||||
<prompt>#</prompt> <userinput>/etc/init.d/idmapd restart</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>On both compute nodes, enable the 'execute/search' bit on your
|
||||
shared directory to allow qemu to be able to use the images within
|
||||
the directories. On all hosts, run the following command:
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>chmod o+x <replaceable>NOVA-INST-DIR</replaceable>/instances</userinput> </screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure NFS on <literal>HostB</literal> and <literal>HostC</literal>
|
||||
by adding the following line to the <filename>/etc/fstab</filename>
|
||||
file:
|
||||
</para>
|
||||
<programlisting>HostA:/ /<replaceable>NOVA-INST-DIR</replaceable>/instances nfs4 defaults 0 0</programlisting>
|
||||
<para>Ensure that you can mount the exported directory:
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>mount -a -v</userinput></screen>
|
||||
<para>Check that <literal>HostA</literal> can see the
|
||||
<filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename>"
|
||||
directory:
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>ls -ld <filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename></userinput>
|
||||
<computeroutput>drwxr-xr-x 2 nova nova 4096 2012-05-19 14:34 nova-install-dir/instances/</computeroutput></screen>
|
||||
<para>Perform the same check on <literal>HostB</literal>
|
||||
and <literal>HostC</literal>, paying special attention to the
|
||||
permissions (Compute should be able to write):
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>ls -ld <filename><replaceable>NOVA-INST-DIR</replaceable>/instances/</filename></userinput>
|
||||
<computeroutput>drwxr-xr-x 2 nova nova 4096 2012-05-07 14:34 nova-install-dir/instances/</computeroutput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>df -k</userinput>
|
||||
<computeroutput>Filesystem 1K-blocks Used Available Use% Mounted on
|
||||
/dev/sda1 921514972 4180880 870523828 1% /
|
||||
none 16498340 1228 16497112 1% /dev
|
||||
none 16502856 0 16502856 0% /dev/shm
|
||||
none 16502856 368 16502488 1% /var/run
|
||||
none 16502856 0 16502856 0% /var/lock
|
||||
none 16502856 0 16502856 0% /lib/init/rw
|
||||
HostA: 921515008 101921792 772783104 12% /var/lib/nova/instances ( <--- this line is important.)</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Update the libvirt configurations so that the calls can be
|
||||
made securely. These methods enable remote access over TCP and are
|
||||
not documented here.
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>SSH tunnel to libvirtd's UNIX socket</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>libvirtd TCP socket, with GSSAPI/Kerberos for auth+data
|
||||
encryption
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>libvirtd TCP socket, with TLS for encryption and x509
|
||||
client certs for authentication
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>libvirtd TCP socket, with TLS for encryption and Kerberos
|
||||
for authentication
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Restart libvirt. After you run the command, ensure that
|
||||
libvirt is successfully restarted:
|
||||
</para>
|
||||
<screen><prompt>#</prompt> <userinput>stop libvirt-bin && start libvirt-bin</userinput>
|
||||
<prompt>$</prompt> <userinput>ps -ef | grep libvirt</userinput>
|
||||
<computeroutput>root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure your firewall to allow libvirt to communicate
|
||||
between nodes.
|
||||
</para>
|
||||
<para>By default, libvirt listens on TCP port 16509, and an
|
||||
ephemeral TCP range from 49152 to 49261 is used for the KVM
|
||||
communications. Based on the secure remote access TCP configuration
|
||||
you chose, be careful which ports you open, and always understand
|
||||
who has access. For information about ports that are used with
|
||||
libvirt, see <link xlink:href="http://libvirt.org/remote.html#Remote_libvirtd_configuration">
|
||||
the libvirt documentation</link>.
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>You can now configure options for live migration. In most
|
||||
cases, you will not need to configure any options. The following
|
||||
chart is for advanced users only.
|
||||
</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<xi:include href="../../common/tables/nova-livemigration.xml"/>
|
||||
</section>
|
||||
<section xml:id="true-live-migration-kvm-libvirt">
|
||||
<title>Enabling true live migration</title>
|
||||
<para>Prior to the Kilo release, the Compute service did not use the
|
||||
libvirt live migration function by default. To enable this
|
||||
function, add the following line to the
|
||||
<literal>[libvirt]</literal> section of the
|
||||
<filename>nova.conf</filename> file:
|
||||
</para>
|
||||
<programlisting>live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_TUNNELLED</programlisting>
|
||||
<para>On versions older than Kilo, the Compute service does not use
|
||||
libvirt's live migration by default because there is a risk that
|
||||
the migration process will never end. This can happen if the guest
|
||||
operating system uses blocks on the disk faster than they can be
|
||||
migrated.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
<section xml:id="configuring-migrations-kvm-block-migration">
|
||||
<title>Block Migration</title>
|
||||
<para>Configuring KVM for block migration is exactly the same as
|
||||
the above configuration in <xref
|
||||
linkend="configuring-migrations-kvm-shared-storage" />, except
|
||||
that
|
||||
<literal><replaceable>NOVA-INST-DIR</replaceable>/instances</literal>
|
||||
is local to each host rather than shared. No NFS client or
|
||||
server configuration is required.</para>
|
||||
<note>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>To use block migration, you must use the
|
||||
<parameter>--block-migrate</parameter> parameter with the live migration
|
||||
command.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Block migration is incompatible with read-only devices
|
||||
such as CD-ROMs and <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/cli_config_drive.html"
|
||||
>Configuration Drive (config_drive)</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Since the ephemeral drives are copied over the network in
|
||||
block migration, migrations of instances with heavy I/O loads may
|
||||
never complete if the drives are writing faster than the data can
|
||||
be copied over the network.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</section>
|
||||
</section>
|
||||
<!--status: good, right place-->
|
||||
<section xml:id="configuring-migrations-xenserver">
|
||||
<title>XenServer</title>
|
||||
<section xml:id="configuring-migrations-xenserver-shared-storage">
|
||||
<title>Shared storage</title>
|
||||
<itemizedlist>
|
||||
<title>Prerequisites</title>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Compatible XenServer hypervisors</emphasis>.
|
||||
For more information, see the <link
|
||||
xlink:href="http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/reference.html#pooling_homogeneity_requirements"
|
||||
>Requirements for Creating Resource Pools</link> section of the
|
||||
<citetitle>XenServer Administrator's Guide</citetitle>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Shared storage</emphasis>. An NFS export,
|
||||
visible to all XenServer hosts.
|
||||
</para>
|
||||
<note>
|
||||
<para>For the supported NFS versions, see the <link
|
||||
xlink:href="http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/reference.html#id1002701"
|
||||
>NFS VHD</link> section of the <citetitle>XenServer Administrator's
|
||||
Guide</citetitle>.
|
||||
</para>
|
||||
</note>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>To use shared storage live migration with XenServer hypervisors,
|
||||
the hosts must be joined to a XenServer pool. To create that pool, a
|
||||
host aggregate must be created with specific metadata. This metadata
|
||||
is used by the XAPI plug-ins to establish the pool.
|
||||
</para>
|
||||
<procedure>
|
||||
<title>Using shared storage live migration with XenServer hypervisors</title>
|
||||
<step>
|
||||
<para>Add an NFS VHD storage to your master XenServer, and set it as
|
||||
the default storage repository. For more information, see NFS
|
||||
VHD in the <citetitle>XenServer Administrator's Guide</citetitle>.
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Configure all compute nodes to use the default storage
|
||||
repository (<literal>sr</literal>) for pool operations. Add this
|
||||
line to your <filename>nova.conf</filename> configuration files
|
||||
on all compute nodes:
|
||||
</para>
|
||||
<programlisting>sr_matching_filter=default-sr:true</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a host aggregate. This command creates the aggregate,
|
||||
and then displays a table that contains the ID of the new
|
||||
aggregate:
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-create <replaceable>POOL_NAME</replaceable> <replaceable>AVAILABILITY_ZONE</replaceable></userinput></screen>
|
||||
<para>Add metadata to the aggregate, to mark it as a hypervisor
|
||||
pool:
|
||||
</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-set-metadata <replaceable>AGGREGATE_ID</replaceable> hypervisor_pool=true</userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-set-metadata <replaceable>AGGREGATE_ID</replaceable> operational_state=created</userinput></screen>
|
||||
<para>Make the first compute node part of that aggregate:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-add-host <replaceable>AGGREGATE_ID</replaceable> <replaceable>MASTER_COMPUTE_NAME</replaceable></userinput></screen>
|
||||
<para>The host is now part of a XenServer pool.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Add hosts to the pool:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova aggregate-add-host <replaceable>AGGREGATE_ID</replaceable> <replaceable>COMPUTE_HOST_NAME</replaceable></userinput></screen>
|
||||
<note>
|
||||
<para>The added compute node and the host will shut down to join
|
||||
the host to the XenServer pool. The operation will fail if any
|
||||
server other than the compute node is running or suspended on
|
||||
the host.</para>
|
||||
</note>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<!-- End of Shared Storage -->
|
||||
<section xml:id="configuring-migrations-xenserver-block-migration">
|
||||
<title>Block migration</title>
|
||||
<itemizedlist>
|
||||
<title>Prerequisites</title>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Compatible XenServer hypervisors</emphasis>.
|
||||
The hypervisors must support the Storage XenMotion feature. See
|
||||
your XenServer manual to make sure your edition has this feature.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>To use block migration, you must use the <!--CHANGE THIS ==-->
|
||||
<parameter>--block-migrate</parameter> parameter with the live migration
|
||||
command.</para>
|
||||
<!--Made the CHANGE THIS note a comment. Please revert if incorrect. LKB-->
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Block migration works only with EXT local storage storage
|
||||
repositories, and the server must not have any volumes attached.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</section>
|
||||
<!-- End of Block migration -->
|
||||
</section>
|
||||
</section>
|
||||
<!-- End of configuring migrations -->
|
@ -1,113 +0,0 @@
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]><section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="configuring-compute-service-groups">
|
||||
<title>Configure Compute service groups</title>
|
||||
<para>The Compute service must know the status of each compute node to
|
||||
effectively manage and use them. This can include events like a user
|
||||
launching a new VM, the scheduler sending a request to a live node, or a
|
||||
query to the ServiceGroup API to determine if a node is live.</para>
|
||||
<para>When a compute worker running the
|
||||
<systemitem class="service">nova-compute</systemitem> daemon starts, it
|
||||
calls the <systemitem>join</systemitem> API to join the compute group. Any
|
||||
service (such as the scheduler) can query the group's membership and the
|
||||
status of its nodes. Internally, the <systemitem>ServiceGroup</systemitem>
|
||||
client driver automatically updates the compute worker status.</para>
|
||||
|
||||
<!-- I don't understand what "available" means in this context. Can someone please help? LKB
|
||||
<para>The database, ZooKeeper, and Memcache drivers are available.</para>-->
|
||||
|
||||
<section xml:id="database-servicegroup-driver">
|
||||
<title>Database ServiceGroup driver</title>
|
||||
<para>By default, Compute uses the database driver to track if a node is
|
||||
live. In a compute worker, this driver periodically sends a
|
||||
<command>db update</command> command to the database, saying <quote>I'm
|
||||
OK</quote> with a timestamp. Compute uses a pre-defined timeout
|
||||
(<literal>service_down_time</literal>) to determine if a node is dead.</para>
|
||||
<para>The driver has limitations, which can be problematic depending on
|
||||
your environment. If a lot of compute worker nodes need to be checked,
|
||||
the database can be put under heavy load, which can cause the timeout to
|
||||
trigger, and a live node could incorrectly be considered dead. By default,
|
||||
the timeout is 60 seconds. Reducing the timeout value can help in this
|
||||
situation, but you must also make the database update more frequently,
|
||||
which again increases the database workload.</para>
|
||||
<para>The database contains data that is both transient (such as whether
|
||||
the node is alive) and persistent (such as entries for VM owners). With
|
||||
the ServiceGroup abstraction, Compute can treat each type separately.</para>
|
||||
</section>
|
||||
<section xml:id="zookeeper-servicegroup-driver">
|
||||
<title>ZooKeeper ServiceGroup driver</title>
|
||||
|
||||
<para>The ZooKeeper ServiceGroup driver works by using ZooKeeper ephemeral
|
||||
nodes. ZooKeeper, unlike databases, is a distributed system, with its
|
||||
load divided among several servers. On a compute worker node, the driver
|
||||
can establish a ZooKeeper session, then create an ephemeral znode
|
||||
in the group directory. Ephemeral znodes have the same lifespan as the
|
||||
session. If the worker node or the
|
||||
<systemitem class="service">nova-compute</systemitem> daemon crashes, or
|
||||
a network partition is in place between the worker and the ZooKeeper
|
||||
server quorums, the ephemeral znodes are removed automatically. The
|
||||
driver can be given group membership by running the <command>ls</command>
|
||||
command in the group directory.</para>
|
||||
<para>The ZooKeeper driver requires the ZooKeeper servers and client
|
||||
libraries. Setting up ZooKeeper servers is outside the scope of this
|
||||
guide (for more information, see
|
||||
<link xlink:href="http://zookeeper.apache.org/">Apache Zookeeper</link>).
|
||||
These client-side Python libraries must be installed on every compute node:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term><literal>python-zookeeper</literal></term>
|
||||
<listitem><para>The official Zookeeper Python binding</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><literal>evzookeeper</literal></term>
|
||||
<listitem><para>This library makes the binding work with the eventlet
|
||||
threading model.</para></listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
<para>This example assumes the ZooKeeper server addresses and ports are
|
||||
<literal>192.168.2.1:2181</literal>, <literal>192.168.2.2:2181</literal>,
|
||||
and <literal>192.168.2.3:2181</literal>.</para>
|
||||
<para>These values in the <filename>/etc/nova/nova.conf</filename> file are
|
||||
required on every node for the ZooKeeper driver:</para>
|
||||
<programlisting language="ini"># Driver for the ServiceGroup service
|
||||
servicegroup_driver="zk"
|
||||
|
||||
[zookeeper]
|
||||
address="192.168.2.1:2181,192.168.2.2:2181,192.168.2.3:2181"</programlisting>
|
||||
<para>To customize the Compute Service groups, use these configuration
|
||||
option settings:</para>
|
||||
<xi:include href="../../common/tables/nova-zookeeper.xml"/>
|
||||
</section>
|
||||
<section xml:id="memcache-servicegroup-driver">
|
||||
<title>Memcache ServiceGroup driver</title>
|
||||
<para>The <systemitem>memcache</systemitem> ServiceGroup driver uses
|
||||
<systemitem>memcached</systemitem>, a distributed memory object
|
||||
caching system that is used to increase site performance. For more
|
||||
details, see <link xlink:href="http://memcached.org/">memcached.org</link>.</para>
|
||||
<para>To use the <systemitem>memcache</systemitem> driver, you must
|
||||
install <systemitem>memcached</systemitem>. You might already have
|
||||
it installed, as the same driver is also used for the OpenStack
|
||||
Object Storage and OpenStack dashboard. If you need to install
|
||||
<systemitem>memcached</systemitem>, see the instructions in the <link
|
||||
xlink:href="http://docs.openstack.org/"><citetitle>OpenStack
|
||||
Installation Guide</citetitle></link>.</para>
|
||||
<para>These values in the <filename>/etc/nova/nova.conf</filename> file
|
||||
are required on every node for the <systemitem>memcache</systemitem>
|
||||
driver:</para>
|
||||
<programlisting language="ini"># Driver for the ServiceGroup service
|
||||
servicegroup_driver="mc"
|
||||
|
||||
# Memcached servers. Use either a list of memcached servers to use for caching (list value),
|
||||
# or "<None>" for in-process caching (default).
|
||||
memcached_servers=<None>
|
||||
|
||||
# Timeout; maximum time since last check-in for up service (integer value).
|
||||
# Helps to define whether a node is dead
|
||||
service_down_time=60</programlisting>
|
||||
</section>
|
||||
</section>
|
@ -1,95 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_image-mgmt">
|
||||
<title>Image management</title>
|
||||
<para>The OpenStack Image service discovers, registers, and
|
||||
retrieves virtual machine images. The service also includes a
|
||||
RESTful API that allows you to query VM image metadata and
|
||||
retrieve the actual image with HTTP requests. For more
|
||||
information about the API, see the <link
|
||||
xlink:href="http://developer.openstack.org/api-ref.html"
|
||||
>OpenStack API Complete Reference</link> and the <link
|
||||
xlink:href="http://docs.openstack.org/developer/python-glanceclient/"
|
||||
>Python API</link>.</para>
|
||||
<para>The OpenStack Image service can be controlled using a
|
||||
command-line tool. For more information about using the
|
||||
OpenStack Image command-line tool, see the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/common/cli_manage_images.html"
|
||||
>Manage Images</link> section in the <citetitle>OpenStack
|
||||
End User Guide</citetitle>.</para>
|
||||
<para>Virtual images that have been made available through the
|
||||
Image service can be stored in a variety of ways. In order to
|
||||
use these services, you must have a working installation of
|
||||
the Image service, with a working endpoint, and users that
|
||||
have been created in OpenStack Identity. Additionally, you
|
||||
must meet the environment variables required by the Compute
|
||||
and Image service clients.</para>
|
||||
<para>The Image service supports these back-end stores:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>File system</term>
|
||||
<listitem>
|
||||
<para>The OpenStack Image service stores virtual
|
||||
machine images in the file system back end by
|
||||
default. This simple back end writes image files
|
||||
to the local file system.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Object Storage</term>
|
||||
<listitem>
|
||||
<para>The OpenStack highly available service for
|
||||
storing objects.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Block Storage</term>
|
||||
<listitem>
|
||||
<para>The OpenStack highly available service for
|
||||
storing blocks.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>VMware</term>
|
||||
<listitem>
|
||||
<para>ESX/ESXi or vCenter Server target system.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>S3</term>
|
||||
<listitem>
|
||||
<para>The Amazon S3 service.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>HTTP</term>
|
||||
<listitem>
|
||||
<para>OpenStack Image service can read virtual machine
|
||||
images that are available on the Internet using
|
||||
HTTP. This store is read only.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>RADOS block device (RBD)</term>
|
||||
<listitem>
|
||||
<para>Stores images inside of a Ceph storage cluster
|
||||
using Ceph's RBD interface.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Sheepdog</term>
|
||||
<listitem>
|
||||
<para>A distributed storage system for QEMU/KVM.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>GridFS</term>
|
||||
<listitem>
|
||||
<para>Stores images using MongoDB.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</section>
|
@ -1,133 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="section_compute-images-and-instances">
|
||||
<title>Images and instances</title>
|
||||
<para>Disk images provide templates for virtual machine file systems. The
|
||||
Image service controls storage and management of images.</para>
|
||||
<para>Instances are the individual virtual machines that run on physical
|
||||
compute nodes. Users can launch any number of instances from the same
|
||||
image. Each launched instance runs from a copy of the base image so that
|
||||
any changes made to the instance do not affect the base image. You can
|
||||
take snapshots of running instances to create an image based on the
|
||||
current disk state of a particular instance. The Compute service manages
|
||||
instances.</para>
|
||||
<para>When you launch an instance, you must choose a <literal>flavor</literal>,
|
||||
which represents a set of virtual resources. Flavors define how many
|
||||
virtual CPUs an instance has, the amount of RAM available to it, and the
|
||||
size of its ephemeral disks. Users must select from the set of available
|
||||
flavors defined on their cloud. OpenStack provides a number of predefined
|
||||
flavors that you can edit or add to.</para>
|
||||
<note>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>For more information about creating and troubleshooting
|
||||
images, see the <link xlink:href="http://docs.openstack.org/image-guide/content/">
|
||||
<citetitle>OpenStack Virtual Machine Image Guide</citetitle></link>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>For more information about image configuration options,
|
||||
see the <link xlink:href="http://docs.openstack.org/kilo/config-reference/content/ch_configuring-openstack-image-service.html">
|
||||
Image services</link> section of the <citetitle>OpenStack
|
||||
Configuration Reference</citetitle>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>For more information about flavors, see <xref linkend="customize-flavors"/>
|
||||
or <link xlink:href="http://docs.openstack.org/openstack-ops/content/flavors.html">
|
||||
Flavors</link> in the <citetitle>OpenStack Operations
|
||||
Guide</citetitle>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
<para>You can add and remove additional resources from running instances,
|
||||
such as persistent volume storage, or public IP addresses. The example
|
||||
used in this chapter is of a typical virtual system within an OpenStack
|
||||
cloud. It uses the <systemitem class="service">cinder-volume</systemitem>
|
||||
service, which provides persistent block storage, instead of the
|
||||
ephemeral storage provided by the selected instance flavor.</para>
|
||||
<para>This diagram shows the system state prior to launching an instance.
|
||||
The image store, fronted by the Image service (glance) has a number of
|
||||
predefined images. Inside the cloud, a compute node contains the
|
||||
available vCPU, memory, and local disk resources. Additionally, the
|
||||
<systemitem class="service">cinder-volume</systemitem> service provides
|
||||
a number of predefined volumes.</para>
|
||||
<figure xml:id="initial-instance-state-figure">
|
||||
<title>Base image state with no running instances</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref="../../common/figures/instance-life-1.png"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<para>To launch an instance select an image, flavor, and any optional
|
||||
attributes. The selected flavor provides a root volume, labeled
|
||||
<literal>vda</literal> in this diagram, and additional ephemeral storage,
|
||||
labeled <literal>vdb</literal>. In this example, the
|
||||
<systemitem class="service">cinder-volume</systemitem> store is mapped
|
||||
to the third virtual disk on this instance, <literal>vdc</literal>.</para>
|
||||
<figure xml:id="run-instance-state-figure">
|
||||
<title>Instance creation from image and runtime state</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref="../../common/figures/instance-life-2.png"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<para>The base image is copied from the image store to the local disk. The
|
||||
local disk is the first disk that the instance accesses, labeled
|
||||
<literal>vda</literal> in this diagram. Your instances will start up
|
||||
faster if you use smaller images, as less data needs to be copied across
|
||||
the network.</para>
|
||||
<para>A new empty ephemeral disk is also created, labeled
|
||||
<literal>vdb</literal> in this diagram. This disk is destroyed when you
|
||||
delete the instance.</para>
|
||||
<para>The compute node connects to the attached
|
||||
<systemitem class="service">cinder-volume</systemitem> using
|
||||
ISCSI. The <systemitem class="service">cinder-volume</systemitem>
|
||||
is mapped to the third disk, labeled <literal>vdc</literal> in this
|
||||
diagram. After the compute node provisions the vCPU and memory
|
||||
resources, the instance boots up from root volume <literal>vda</literal>.
|
||||
The instance runs, and changes data on the disks (highlighted in red
|
||||
on the diagram). If the volume store is located on a separate network,
|
||||
the <literal>my_block_storage_ip</literal> option specified in the
|
||||
storage node configuration file directs image traffic to the compute
|
||||
node.</para>
|
||||
<note>
|
||||
<para>Some details in this example scenario might be different in your
|
||||
environment. For example, you might use a different type of back-end
|
||||
storage, or different network protocols. One common variant is that
|
||||
the ephemeral storage used for volumes <literal>vda</literal> and
|
||||
<literal>vdb</literal> could be backed by network storage rather
|
||||
than a local disk.</para>
|
||||
</note>
|
||||
<para>When the instance is deleted, the state is reclaimed with the
|
||||
exception of the persistent volume. The ephemeral storage is purged;
|
||||
memory and vCPU resources are released. The image remains unchanged
|
||||
throughout this process.</para>
|
||||
<figure xml:id="end-instance-state-figure">
|
||||
<title>End state of image and volume after instance exits</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref="../../common/figures/instance-life-3.png"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<xi:include href="section_compute-image-mgt.xml"/>
|
||||
<xi:include href="../image/section_glance-property-protection.xml"/>
|
||||
<xi:include href="../image/section_glance-nova-image-download.xml"/>
|
||||
<xi:include href="section_compute-instance-building-blocks.xml"/>
|
||||
<xi:include href="section_compute-instance-mgt-tools.xml"/>
|
||||
<section xml:id="section_instance-scheduling-constraints">
|
||||
<title>Control where instances run</title>
|
||||
<para>The <link xlink:href="http://docs.openstack.org/kilo/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration Reference</citetitle></link>
|
||||
provides detailed information on controlling where your instances
|
||||
run, including ensuring a set of instances run on different compute
|
||||
nodes for service resiliency or on the same node for high performance
|
||||
inter-instance communications.</para>
|
||||
<para>Administrative users can specify which compute node their
|
||||
instances run on. To do this, specify the
|
||||
<parameter>--availability-zone <replaceable>AVAILABILITY_ZONE</replaceable>:<replaceable>COMPUTE_HOST</replaceable></parameter>
|
||||
parameter.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,75 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_compute-instance-building-blocks">
|
||||
<title>Instance building blocks</title>
|
||||
<para>In OpenStack, the base operating system is usually copied from an
|
||||
image stored in the OpenStack Image service. This results in an
|
||||
ephemeral instance that starts from a known template state and loses all
|
||||
accumulated states on shutdown.</para>
|
||||
<para>You can also put an operating system on a persistent volume in
|
||||
Compute or the Block Storage volume system. This gives a more traditional,
|
||||
persistent system that accumulates states that are preserved across
|
||||
restarts. To get a list of available images on your system, run:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova image-list</userinput>
|
||||
<?db-font-size 50%?><computeroutput>+--------------------------------------+-------------------------------+--------+--------------------------------------+
|
||||
| ID | Name | Status | Server |
|
||||
+--------------------------------------+-------------------------------+--------+--------------------------------------+
|
||||
| aee1d242-730f-431f-88c1-87630c0f07ba | Ubuntu 14.04 cloudimg amd64 | ACTIVE | |
|
||||
| 0b27baa1-0ca6-49a7-b3f4-48388e440245 | Ubuntu 14.10 cloudimg amd64 | ACTIVE | |
|
||||
| df8d56fc-9cea-4dfd-a8d3-28764de3cb08 | jenkins | ACTIVE | |
|
||||
+--------------------------------------+-------------------------------+--------+--------------------------------------+</computeroutput></screen>
|
||||
|
||||
<para>The displayed image attributes are:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term><literal>ID</literal></term>
|
||||
<listitem>
|
||||
<para>Automatically generated UUID of the image.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><literal>Name</literal></term>
|
||||
<listitem>
|
||||
<para>Free form, human-readable name for the image.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><literal>Status</literal></term>
|
||||
<listitem>
|
||||
<para>The status of the image. Images marked
|
||||
<literal>ACTIVE</literal> are available
|
||||
for use.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><literal>Server</literal></term>
|
||||
<listitem>
|
||||
<para>For images that are created as snapshots of
|
||||
running instances, this is the UUID of the
|
||||
instance the snapshot derives from. For
|
||||
uploaded images, this field is blank.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
<para>Virtual hardware templates are called <literal>flavors</literal>.
|
||||
The default installation provides five predefined flavors.</para>
|
||||
<para>For a list of flavors that are available on your system, run:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova flavor-list</userinput>
|
||||
<computeroutput>+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
|
||||
| ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
|
||||
+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
|
||||
| 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
|
||||
| 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
|
||||
| 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
|
||||
| 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
|
||||
| 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
|
||||
+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+</computeroutput></screen>
|
||||
<para>By default, administrative users can configure the flavors. You
|
||||
can change this behavior by redefining the access controls for
|
||||
<literal>compute_extension:flavormanage</literal> in
|
||||
<filename>/etc/nova/policy.json</filename> on the
|
||||
<filename>compute-api</filename> server.</para>
|
||||
</section>
|
@ -1,21 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_instance-mgmt">
|
||||
<title>Instance management tools</title>
|
||||
<para>OpenStack provides command-line, web interface, and API-based
|
||||
instance management tools. Third-party management tools are also
|
||||
available, using either the native API or the provided EC2-compatible
|
||||
API.</para>
|
||||
<para>The OpenStack <application>python-novaclient</application> package
|
||||
provides a basic command-line utility, which uses the
|
||||
<command>nova</command> command. This is available as a native package
|
||||
for most Linux distributions, or you can install the latest version
|
||||
using the <application>pip</application> python package installer:</para>
|
||||
<screen><prompt>#</prompt> <userinput>pip install python-novaclient</userinput></screen>
|
||||
<para>For more information about <application>python-novaclient</application>
|
||||
and other command-line tools, see the <link xlink:href="http://docs.openstack.org/user-guide/index.html">
|
||||
<citetitle>OpenStack End User Guide</citetitle></link>.</para>
|
||||
</section>
|
@ -1,808 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_networking-nova">
|
||||
<title>Networking with nova-network</title>
|
||||
<para>Understanding the networking configuration options helps you design
|
||||
the best configuration for your Compute instances.</para>
|
||||
<para>You can choose to either install and configure
|
||||
<systemitem class="service">nova-network</systemitem> or use the
|
||||
OpenStack Networking service (neutron). This section contains a brief
|
||||
overview of <systemitem class="service">nova-network</systemitem>. For
|
||||
more information about OpenStack Networking, see
|
||||
<xref linkend="ch_networking"/>.</para>
|
||||
<section xml:id="section_networking-options">
|
||||
<title>Networking concepts</title>
|
||||
<para>Compute assigns a private IP address to each VM instance. Compute
|
||||
makes a distinction between fixed IPs and
|
||||
<glossterm baseform="floating IP address">floating IP</glossterm>.
|
||||
Fixed IPs are IP addresses that are assigned to an instance on
|
||||
creation and stay the same until the instance is explicitly
|
||||
terminated. Floating IPs are addresses that can be dynamically
|
||||
associated with an instance. A floating IP address can be
|
||||
disassociated and associated with another instance at any time. A
|
||||
user can reserve a floating IP for their project.</para>
|
||||
<note>
|
||||
<para>Currently, Compute with
|
||||
<systemitem class="service">nova-network</systemitem> only supports
|
||||
Linux bridge networking that allows virtual interfaces to connect
|
||||
to the outside network through the physical interface.</para>
|
||||
</note>
|
||||
<para>The network controller with
|
||||
<systemitem class="service">nova-network</systemitem> provides
|
||||
virtual networks to enable compute servers to interact with each
|
||||
other and with the public network. Compute with
|
||||
<systemitem class="service">nova-network</systemitem> supports the
|
||||
following network modes, which are implemented as Network Manager
|
||||
types:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>Flat Network Manager</term>
|
||||
<listitem>
|
||||
<para>In this mode, a network administrator specifies a subnet.
|
||||
IP addresses for VM instances are assigned from the subnet,
|
||||
and then injected into the image on launch. Each instance
|
||||
receives a fixed IP address from the pool of available
|
||||
addresses. A system administrator must create the Linux
|
||||
networking bridge (typically named <literal>br100</literal>,
|
||||
although this is configurable) on the systems running the
|
||||
<systemitem class="service">nova-network</systemitem> service.
|
||||
All instances of the system are attached to the same bridge,
|
||||
which is configured manually by the network administrator.</para>
|
||||
<note>
|
||||
<para>Configuration injection currently only works on
|
||||
Linux-style systems that keep networking configuration in
|
||||
<filename>/etc/network/interfaces</filename>.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Flat DHCP Network Manager</term>
|
||||
<listitem>
|
||||
<para>In this mode, OpenStack starts a DHCP server
|
||||
(<systemitem>dnsmasq</systemitem>) to allocate IP addresses to
|
||||
VM instances from the specified subnet, in addition to
|
||||
manually configuring the networking bridge. IP addresses for
|
||||
VM instances are assigned from a subnet specified by the
|
||||
network administrator.</para>
|
||||
<para>Like flat mode, all instances are attached to a single
|
||||
bridge on the compute node. Additionally, a DHCP server
|
||||
configures instances depending on single-/multi-host mode,
|
||||
alongside each <systemitem class="service">nova-network</systemitem>.
|
||||
In this mode, Compute does a bit more configuration. It
|
||||
attempts to bridge into an Ethernet device
|
||||
(<literal>flat_interface</literal>, eth0 by default). For
|
||||
every instance, Compute allocates a fixed IP address and
|
||||
configures <systemitem>dnsmasq</systemitem> with the MAC ID
|
||||
and IP address for the VM. <systemitem>dnsmasq</systemitem>
|
||||
does not take part in the IP address allocation process, it
|
||||
only hands out IPs according to the mapping done by Compute.
|
||||
Instances receive their fixed IPs with the <command>dhcpdiscover</command>
|
||||
command. These IPs are not assigned to any of the host's
|
||||
network interfaces, only to the guest-side interface for the
|
||||
VM.</para>
|
||||
<para>In any setup with flat networking, the hosts providing
|
||||
the <systemitem class="service">nova-network</systemitem>
|
||||
service are responsible for forwarding traffic from the
|
||||
private network. They also run and configure
|
||||
<systemitem>dnsmasq</systemitem> as a DHCP server listening on
|
||||
this bridge, usually on IP address 10.0.0.1 (see
|
||||
<link linkend="section_dnsmasq">DHCP server: dnsmasq </link>).
|
||||
Compute can determine the NAT entries for each network,
|
||||
although sometimes NAT is not used, such as when the network
|
||||
has been configured with all public IPs, or if a hardware
|
||||
router is used (which is a high availability option). In this
|
||||
case, hosts need to have <literal>br100</literal> configured
|
||||
and physically connected to any other nodes that are hosting
|
||||
VMs. You must set the <literal>flat_network_bridge</literal>
|
||||
option or create networks with the bridge parameter in order
|
||||
to avoid raising an error. Compute nodes have iptables or
|
||||
ebtables entries created for each project and instance to
|
||||
protect against MAC ID or IP address spoofing and ARP
|
||||
poisoning.</para>
|
||||
<note>
|
||||
<para>In single-host Flat DHCP mode you will be able to ping
|
||||
VMs through their fixed IP from the
|
||||
<systemitem>nova-network</systemitem> node, but you cannot
|
||||
ping them from the compute nodes. This is expected behavior.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>VLAN Network Manager</term>
|
||||
<listitem>
|
||||
<para>This is the default mode for OpenStack Compute. In this
|
||||
mode, Compute creates a VLAN and bridge for each tenant. For
|
||||
multiple-machine installations, the VLAN Network Mode
|
||||
requires a switch that supports VLAN tagging (IEEE 802.1Q).
|
||||
The tenant gets a range of private IPs that are only
|
||||
accessible from inside the VLAN. In order for a user to
|
||||
access the instances in their tenant, a special VPN instance
|
||||
(code named <systemitem>cloudpipe</systemitem>) needs to be
|
||||
created. Compute generates a certificate and key for the
|
||||
user to access the VPN and starts the VPN automatically. It
|
||||
provides a private network segment for each tenant's
|
||||
instances that can be accessed through a dedicated VPN
|
||||
connection from the internet. In this mode, each tenant gets
|
||||
its own VLAN, Linux networking bridge, and subnet.</para>
|
||||
<para>The subnets are specified by the network administrator,
|
||||
and are assigned dynamically to a tenant when required. A
|
||||
DHCP server is started for each VLAN to pass out IP addresses
|
||||
to VM instances from the subnet assigned to the tenant. All
|
||||
instances belonging to one tenant are bridged into the same
|
||||
VLAN for that tenant. OpenStack Compute creates the Linux
|
||||
networking bridges and VLANs when required.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<para>These network managers can co-exist in a cloud system.
|
||||
However, because you cannot select the type of network for a
|
||||
given tenant, you cannot configure multiple network types in a
|
||||
single Compute installation.</para>
|
||||
<para>All network managers configure the network using network
|
||||
drivers. For example, the Linux L3 driver (<literal>l3.py</literal>
|
||||
and <literal>linux_net.py</literal>), which makes use of
|
||||
<literal>iptables</literal>, <literal>route</literal> and other
|
||||
network management facilities, and the libvirt
|
||||
<link xlink:href="http://libvirt.org/formatnwfilter.html">network
|
||||
filtering facilities</link>. The driver is not tied to any
|
||||
particular network manager; all network managers use the same
|
||||
driver. The driver usually initializes only when the first VM
|
||||
lands on this host node.</para>
|
||||
<para>All network managers operate in either single-host or
|
||||
multi-host mode. This choice greatly influences the network
|
||||
configuration. In single-host mode, a single
|
||||
<systemitem class="service">nova-network</systemitem> service
|
||||
provides a default gateway for VMs and hosts a single DHCP
|
||||
server (<systemitem>dnsmasq</systemitem>). In multi-host mode,
|
||||
each compute node runs its own
|
||||
<systemitem class="service">nova-network</systemitem> service.
|
||||
In both cases, all traffic between VMs and the internet flows
|
||||
through <systemitem class="service">nova-network</systemitem>.
|
||||
Each mode has benefits and drawbacks. For more on this, see the
|
||||
<citetitle>Network Topology</citetitle> section in the <link
|
||||
xlink:href="http://docs.openstack.org/openstack-ops/content/">
|
||||
<citetitle>OpenStack Operations Guide</citetitle></link>.</para>
|
||||
<para>All networking options require network connectivity to be
|
||||
already set up between OpenStack physical nodes. OpenStack does
|
||||
not configure any physical network interfaces. All network
|
||||
managers automatically create VM virtual interfaces. Some
|
||||
network managers can also create network bridges such as
|
||||
<literal>br100</literal>.</para>
|
||||
<para>The internal network interface is used for communication
|
||||
with VMs. The interface should not have an IP address attached
|
||||
to it before OpenStack installation, it serves only as a
|
||||
fabric where the actual endpoints are VMs and dnsmasq.
|
||||
Additionally, the internal network interface must be in
|
||||
<literal>promiscuous</literal> mode, so that it can receive
|
||||
packets whose target MAC address is the guest VM, not the host.</para>
|
||||
<para>All machines must have a public and internal network
|
||||
interface (controlled by these options:
|
||||
<literal>public_interface</literal> for the public interface,
|
||||
and <literal>flat_interface</literal> and
|
||||
<literal>vlan_interface</literal> for the internal interface
|
||||
with flat or VLAN managers). This guide refers to the public
|
||||
network as the external network and the private network as the
|
||||
internal or tenant network.</para>
|
||||
<para>For flat and flat DHCP modes, use the
|
||||
<command>nova network-create</command> command to create a
|
||||
network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create vmnet \
|
||||
--fixed-range-v4 10.0.0.0/16 --fixed-cidr 10.0.20.0/24 --bridge br100</userinput></screen>
|
||||
<para>This example uses the following parameters:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><parameter>--fixed-range-v4-</parameter> specifies the
|
||||
network subnet.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><parameter>--fixed-cidr</parameter> specifies a range of
|
||||
fixed IP addresses to allocate, and can be a subset of the
|
||||
<parameter>--fixed-range-v4</parameter> argument.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><parameter>--bridge</parameter> specifies the bridge
|
||||
device to which this network is connected on every compute
|
||||
node.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
<section xml:id="section_dnsmasq">
|
||||
<title>DHCP server: dnsmasq</title>
|
||||
<para>The Compute service uses <link xlink:href="http://www.thekelleys.org.uk/dnsmasq/doc.html">
|
||||
dnsmasq</link> as the DHCP server when using either Flat DHCP
|
||||
Network Manager or VLAN Network Manager. For Compute to operate in
|
||||
IPv4/IPv6 dual-stack mode, use at least <systemitem>dnsmasq</systemitem>
|
||||
v2.63. The <systemitem class="service">nova-network</systemitem>
|
||||
service is responsible for starting <systemitem>dnsmasq</systemitem>
|
||||
processes.</para>
|
||||
<para>The behavior of <systemitem>dnsmasq</systemitem> can be
|
||||
customized by creating a <systemitem>dnsmasq</systemitem>
|
||||
configuration file. Specify the configuration file using the
|
||||
<literal>dnsmasq_config_file</literal> configuration option:</para>
|
||||
<programlisting language="ini">dnsmasq_config_file=/etc/dnsmasq-nova.conf</programlisting>
|
||||
<para>For more information about creating a
|
||||
<systemitem>dnsmasq</systemitem> configuration file, see the
|
||||
<link xlink:href="http://docs.openstack.org/kilo/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration Reference</citetitle></link>, and
|
||||
<link xlink:href="http://www.thekelleys.org.uk/dnsmasq/docs/dnsmasq.conf.example">
|
||||
the dnsmasq documentation</link>.</para>
|
||||
<para><systemitem>dnsmasq</systemitem> also acts as a caching DNS
|
||||
server for instances. You can specify the DNS server that
|
||||
<systemitem>dnsmasq</systemitem> uses by setting the
|
||||
<literal>dns_server</literal> configuration option in
|
||||
<filename>/etc/nova/nova.conf</filename>. This example configures
|
||||
<systemitem>dnsmasq</systemitem> to use Google's public
|
||||
DNS server:</para>
|
||||
<programlisting language="ini">dns_server=8.8.8.8</programlisting>
|
||||
<para><systemitem>dnsmasq</systemitem> logs to
|
||||
<systemitem>syslog</systemitem> (typically
|
||||
<filename>/var/log/syslog</filename> or <filename>/var/log/messages</filename>,
|
||||
depending on Linux distribution). Logs can be useful for
|
||||
troubleshooting, especially in a situation where VM instances boot
|
||||
successfully but are not reachable over the network.</para>
|
||||
<para>Administrators can specify the starting point IP address to
|
||||
reserve with the DHCP server (in the format
|
||||
<replaceable>n</replaceable>.<replaceable>n</replaceable>.<replaceable>n</replaceable>.<replaceable>n</replaceable>)
|
||||
with this command:</para>
|
||||
<screen><prompt>$</prompt><userinput>nova-manage fixed reserve --address <replaceable>IP_ADDRESS</replaceable></userinput></screen>
|
||||
<para>This reservation only affects which IP address the VMs start at, not
|
||||
the fixed IP addresses that <systemitem class="service">nova-network</systemitem>
|
||||
places on the bridges.</para>
|
||||
</section>
|
||||
|
||||
<xi:include href="section_compute-configure-ipv6.xml"/>
|
||||
|
||||
<section xml:id="section_metadata-service">
|
||||
<title>Metadata service</title>
|
||||
<para>Compute uses a metadata service for virtual machine instances to
|
||||
retrieve instance-specific data. Instances access the metadata service
|
||||
at <literal>http://169.254.169.254</literal>. The metadata service
|
||||
supports two sets of APIs: an OpenStack metadata API and an
|
||||
EC2-compatible API. Both APIs are versioned by date.</para>
|
||||
<para>To retrieve a list of supported versions for the OpenStack
|
||||
metadata API, make a GET request to
|
||||
<literal>http://169.254.169.254/openstack</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/openstack</userinput>
|
||||
<computeroutput>2012-08-10
|
||||
2013-04-04
|
||||
2013-10-17
|
||||
latest</computeroutput></screen>
|
||||
<para>To list supported versions for the EC2-compatible metadata API,
|
||||
make a GET request to <literal>http://169.254.169.254</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254</userinput>
|
||||
<computeroutput>1.0
|
||||
2007-01-19
|
||||
2007-03-01
|
||||
2007-08-29
|
||||
2007-10-10
|
||||
2007-12-15
|
||||
2008-02-01
|
||||
2008-09-01
|
||||
2009-04-04
|
||||
latest</computeroutput></screen>
|
||||
<para>If you write a consumer for one of these APIs, always attempt to
|
||||
access the most recent API version supported by your consumer first,
|
||||
then fall back to an earlier version if the most recent one is not
|
||||
available.</para>
|
||||
<para>Metadata from the OpenStack API is distributed in JSON format. To
|
||||
retrieve the metadata, make a GET request to
|
||||
<literal>http://169.254.169.254/openstack/2012-08-10/meta_data.json</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/openstack/2012-08-10/meta_data.json</userinput></screen>
|
||||
<programlisting language="json"><xi:include href="../../common/samples/list_metadata.json" parse="text"/></programlisting>
|
||||
<para>Instances also retrieve user data (passed as the
|
||||
<literal>user_data</literal> parameter in the API call or by the
|
||||
<literal>--user_data</literal> flag in the <command>nova boot</command>
|
||||
command) through the metadata service, by making a GET request to
|
||||
<literal>http://169.254.169.254/openstack/2012-08-10/user_data</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/openstack/2012-08-10/user_data</userinput>
|
||||
<computeroutput>#!/bin/bash
|
||||
echo 'Extra user data here'</computeroutput></screen>
|
||||
<para>The metadata service has an API that is compatible with version
|
||||
2009-04-04 of the <link xlink:href="http://docs.amazonwebservices.com/AWSEC2/2009-04-04/UserGuide/AESDG-chapter-instancedata.html">
|
||||
Amazon EC2 metadata service</link>. This means that virtual machine
|
||||
images designed for EC2 will work properly with OpenStack.</para>
|
||||
<para>The EC2 API exposes a separate URL for each metadata element.
|
||||
Retrieve a listing of these elements by making a GET query to
|
||||
<literal>http://169.254.169.254/2009-04-04/meta-data/</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/2009-04-04/meta-data/</userinput>
|
||||
<computeroutput>ami-id
|
||||
ami-launch-index
|
||||
ami-manifest-path
|
||||
block-device-mapping/
|
||||
hostname
|
||||
instance-action
|
||||
instance-id
|
||||
instance-type
|
||||
kernel-id
|
||||
local-hostname
|
||||
local-ipv4
|
||||
placement/
|
||||
public-hostname
|
||||
public-ipv4
|
||||
public-keys/
|
||||
ramdisk-id
|
||||
reservation-id
|
||||
security-groups</computeroutput></screen>
|
||||
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/2009-04-04/meta-data/block-device-mapping/</userinput>
|
||||
<computeroutput>ami</computeroutput></screen>
|
||||
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/2009-04-04/meta-data/placement/</userinput>
|
||||
<computeroutput>availability-zone</computeroutput></screen>
|
||||
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/2009-04-04/meta-data/public-keys/</userinput>
|
||||
<computeroutput>0=mykey</computeroutput></screen>
|
||||
|
||||
<para>Instances can retrieve the public SSH key (identified by keypair
|
||||
name when a user requests a new instance) by making a GET request to
|
||||
<literal>http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key</userinput>
|
||||
<computeroutput>ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDYVEprvtYJXVOBN0XNKVVRNCRX6BlnNbI+USLGais1sUWPwtSg7z9K9vhbYAPUZcq8c/s5S9dg5vTHbsiyPCIDOKyeHba4MUJq8Oh5b2i71/3BISpyxTBH/uZDHdslW2a+SrPDCeuMMoss9NFhBdKtDkdG9zyi0ibmCP6yMdEX8Q== Generated by Nova</computeroutput></screen>
|
||||
|
||||
<para>Instances can retrieve user data by making a GET request to
|
||||
<literal>http://169.254.169.254/2009-04-04/user-data</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl http://169.254.169.254/2009-04-04/user-data</userinput>
|
||||
<computeroutput>#!/bin/bash
|
||||
echo 'Extra user data here'</computeroutput></screen>
|
||||
|
||||
<para>The metadata service is implemented by either the
|
||||
<systemitem class="service">nova-api</systemitem> service or the
|
||||
<systemitem class="service">nova-api-metadata</systemitem> service.
|
||||
Note that the <systemitem class="service">nova-api-metadata</systemitem>
|
||||
service is generally only used when running in multi-host mode, as it
|
||||
retrieves instance-specific metadata. If you are running the
|
||||
<systemitem class="service">nova-api</systemitem> service, you must
|
||||
have <literal>metadata</literal> as one of the elements listed in the
|
||||
<literal>enabled_apis</literal> configuration option in
|
||||
<filename>/etc/nova/nova.conf</filename>. The default
|
||||
<literal>enabled_apis</literal> configuration setting includes the
|
||||
metadata service, so you should not need to modify it.</para>
|
||||
<para>Hosts access the service at <literal>169.254.169.254:80</literal>,
|
||||
and this is translated to <literal>metadata_host:metadata_port</literal>
|
||||
by an iptables rule established by the
|
||||
<systemitem class="service">nova-network</systemitem> service. In
|
||||
multi-host mode, you can set <option>metadata_host</option> to
|
||||
<literal>127.0.0.1</literal>.</para>
|
||||
<para>For instances to reach the metadata service, the
|
||||
<systemitem class="service">nova-network</systemitem> service must
|
||||
configure iptables to NAT port <literal>80</literal> of the
|
||||
<literal>169.254.169.254</literal> address to the IP address specified
|
||||
in <option>metadata_host</option> (this defaults to <literal>$my_ip</literal>,
|
||||
which is the IP address of the <systemitem class="service">nova-network</systemitem>
|
||||
service) and port specified in <option>metadata_port</option> (which
|
||||
defaults to <literal>8775</literal>) in
|
||||
<filename>/etc/nova/nova.conf</filename>.</para>
|
||||
<note>
|
||||
<para>The <literal>metadata_host</literal> configuration option must
|
||||
be an IP address, not a host name.
|
||||
</para>
|
||||
</note>
|
||||
<para>The default Compute service settings assume that
|
||||
<systemitem class="service">nova-network</systemitem> and
|
||||
<systemitem class="service">nova-api</systemitem> are running on the
|
||||
same host. If this is not the case, in the
|
||||
<filename>/etc/nova/nova.conf</filename> file on the host running
|
||||
<systemitem class="service">nova-network</systemitem>, set the
|
||||
<literal>metadata_host</literal> configuration option to the IP
|
||||
address of the host where <systemitem class="service">nova-api</systemitem>
|
||||
is running.</para>
|
||||
|
||||
<xi:include href="../../common/tables/nova-metadata.xml"/>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="section_enable-ping-and-ssh-on-vms">
|
||||
<title>Enable ping and SSH on VMs</title>
|
||||
<para>You need to enable <command>ping</command> and
|
||||
<command>ssh</command> on your VMs for network access. This can be
|
||||
done with either the <command>nova</command> or
|
||||
<command>euca2ools</command> commands.</para>
|
||||
<note>
|
||||
<para>Run these commands as root only if the credentials used to
|
||||
interact with <systemitem class="service">nova-api</systemitem>
|
||||
are in <filename>/root/.bashrc</filename>. If the EC2 credentials
|
||||
in the <filename>.bashrc</filename> file are for an unprivileged
|
||||
user, you must run these commands as that user instead.</para>
|
||||
</note>
|
||||
<para>Enable ping and SSH with <command>nova</command> commands:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0</userinput>
|
||||
<prompt>$</prompt> <userinput>nova secgroup-add-rule default tcp 22 22 0.0.0.0/0</userinput> </screen>
|
||||
|
||||
<para>Enable ping and SSH with <command>euca2ools</command>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>euca-authorize -P icmp -t -1:-1 -s 0.0.0.0/0 default</userinput>
|
||||
<prompt>$</prompt> <userinput>euca-authorize -P tcp -p 22 -s 0.0.0.0/0 default</userinput> </screen>
|
||||
|
||||
<para>If you have run these commands and still cannot ping or SSH your
|
||||
instances, check the number of running <literal>dnsmasq</literal>
|
||||
processes, there should be two. If not, kill the processes and restart
|
||||
the service with these commands:
|
||||
command:</para>
|
||||
<screen><prompt>#</prompt> <userinput>killall dnsmasq</userinput>
|
||||
<prompt>#</prompt> <userinput>service nova-network restart</userinput></screen>
|
||||
</section>
|
||||
|
||||
<section xml:id="nova-associate-public-ip">
|
||||
<title>Configure public (floating) IP addresses</title>
|
||||
<para>This section describes how to configure floating IP addresses
|
||||
with <systemitem class="service">nova-network</systemitem>. For
|
||||
information about doing this with OpenStack Networking, see
|
||||
<xref linkend="section_l3_router_and_nat"/>.</para>
|
||||
|
||||
<section xml:id="private-and-public-IP-addresses">
|
||||
<title>Private and public IP addresses</title>
|
||||
<para>In this section, the term
|
||||
<glossterm baseform="floating IP address">floating IP address</glossterm>
|
||||
is used to refer to an IP address, usually public, that you can
|
||||
dynamically add to a running virtual instance.</para>
|
||||
<para>Every virtual instance is automatically assigned a private IP
|
||||
address. You can choose to assign a public (or floating) IP address
|
||||
instead. OpenStack Compute uses network address translation (NAT) to
|
||||
assign floating IPs to virtual instances.</para>
|
||||
<para>To be able to assign a floating IP address, edit the
|
||||
<filename>/etc/nova/nova.conf</filename> file to specify which
|
||||
interface the <systemitem class="service">nova-network</systemitem>
|
||||
service should bind public IP addresses to:</para>
|
||||
<programlisting language="ini">public_interface=<replaceable>VLAN100</replaceable></programlisting>
|
||||
<para>If you make changes to the <filename>/etc/nova/nova.conf</filename>
|
||||
file while the <systemitem class="service">nova-network</systemitem>
|
||||
service is running, you will need to restart the service to pick up
|
||||
the changes.</para>
|
||||
<note>
|
||||
<title>Traffic between VMs using floating IPs</title>
|
||||
<para>Floating IPs are implemented by using a source NAT (SNAT rule
|
||||
in iptables), so security groups can sometimes display inconsistent
|
||||
behavior if VMs use their floating IP to communicate with other VMs,
|
||||
particularly on the same physical host. Traffic from VM to VM
|
||||
across the fixed network does not have this issue, and so this is
|
||||
the recommended setup. To ensure that traffic does not get SNATed
|
||||
to the floating range, explicitly set:</para>
|
||||
<programlisting language="ini">dmz_cidr=x.x.x.x/y</programlisting>
|
||||
<para>The <literal>x.x.x.x/y</literal> value specifies the range of
|
||||
floating IPs for each pool of floating IPs that you define. This
|
||||
configuration is also required if the VMs in the source group have
|
||||
floating IPs.</para>
|
||||
</note>
|
||||
</section>
|
||||
|
||||
<section xml:id="Enabling_ip_forwarding">
|
||||
<title>Enable IP forwarding</title>
|
||||
<para>IP forwarding is disabled by default on most Linux
|
||||
distributions. You will need to enable it in order to use floating
|
||||
IPs.</para>
|
||||
<note>
|
||||
<para>IP forwarding only needs to be enabled on the nodes that run
|
||||
<systemitem class="service">nova-network</systemitem>. However,
|
||||
you will need to enable it on all compute nodes if you use
|
||||
<literal>multi_host</literal> mode.</para>
|
||||
</note>
|
||||
<para>To check if IP forwarding is enabled, run:</para>
|
||||
<screen><prompt>$</prompt> <userinput>cat /proc/sys/net/ipv4/ip_forward</userinput>
|
||||
<computeroutput>0</computeroutput></screen>
|
||||
<para>Alternatively, run:</para>
|
||||
<screen><prompt>$</prompt> <userinput>sysctl net.ipv4.ip_forward</userinput>
|
||||
<computeroutput>net.ipv4.ip_forward = 0</computeroutput></screen>
|
||||
<para>In these examples, IP forwarding is disabled.</para>
|
||||
<para>To enable IP forwarding dynamically, run:</para>
|
||||
<screen><prompt>#</prompt> <userinput>sysctl -w net.ipv4.ip_forward=1</userinput></screen>
|
||||
<para>Alternatively, run:</para>
|
||||
<screen><prompt>#</prompt> <userinput>echo 1 > /proc/sys/net/ipv4/ip_forward</userinput></screen>
|
||||
<para>To make the changes permanent, edit the
|
||||
<filename>/etc/sysctl.conf</filename> file and update the IP
|
||||
forwarding setting:</para>
|
||||
<programlisting language="ini">net.ipv4.ip_forward = 1</programlisting>
|
||||
<para>Save the file and run this command to apply the changes:</para>
|
||||
<screen><prompt>#</prompt> <userinput>sysctl -p</userinput></screen>
|
||||
<para>You can also apply the changes by restarting the network
|
||||
service:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>on Ubuntu, Debian:</para>
|
||||
<screen><prompt>#</prompt> <userinput>/etc/init.d/networking restart</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>on RHEL, Fedora, CentOS, openSUSE and SLES:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service network restart</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
<section xml:id="create_list_of_available_floating_ips">
|
||||
<title>Create a list of available floating IP addresses</title>
|
||||
<para>Compute maintains a list of floating IP addresses that are
|
||||
available for assigning to instances. Use the
|
||||
<command>nova-manage floating create</command> command to add
|
||||
entries to the list:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage floating create --pool nova --ip_range 68.99.26.170/31</userinput></screen>
|
||||
<para>Use these <command>nova-manage</command> commands to perform
|
||||
floating IP operations:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage floating list</userinput></screen>
|
||||
<para>Lists the floating IP addresses in the pool.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage floating create --pool <replaceable>POOL_NAME</replaceable> --ip_range <replaceable>CIDR</replaceable></userinput></screen>
|
||||
<para>Creates specific floating IPs for either a single address
|
||||
or a subnet.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage floating delete <replaceable>CIDR</replaceable></userinput></screen>
|
||||
<para>Removes floating IP addresses using the same parameters as
|
||||
the create command.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>For more information about how administrators can associate
|
||||
floating IPs with instances, see
|
||||
<link xlink:href="http://docs.openstack.org/user-guide-admin/cli_admin_manage_ip_addresses.html">
|
||||
Manage IP addresses</link> in the <citetitle>OpenStack Admin User
|
||||
Guide</citetitle>.</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="Automatically_adding_floating_IPs">
|
||||
<title>Automatically add floating IPs</title>
|
||||
<para>You can configure <systemitem class="service">nova-network</systemitem>
|
||||
to automatically allocate and assign a floating IP address to
|
||||
virtual instances when they are launched. Add this line to the
|
||||
<filename>/etc/nova/nova.conf</filename> file:</para>
|
||||
<programlisting language="ini">auto_assign_floating_ip=True</programlisting>
|
||||
<para>Save the file, and restart
|
||||
<systemitem class="service">nova-network</systemitem></para>
|
||||
<note>
|
||||
<para>If this option is enabled, but all floating IP addresses have
|
||||
already been allocated, the <command>nova boot</command> command
|
||||
will fail.</para>
|
||||
</note>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section xml:id="section_remove-network-from-project">
|
||||
<title>Remove a network from a project</title>
|
||||
<para>You cannot delete a network that has been associated to a
|
||||
project. This section describes the procedure for dissociating it
|
||||
so that it can be deleted.</para>
|
||||
<para>In order to disassociate the network, you will need the ID of
|
||||
the project it has been associated to. To get the project ID, you
|
||||
will need to be an administrator.</para>
|
||||
<para>
|
||||
Disassociate the network from the project using the
|
||||
<command>scrub</command> command, with the project ID as the final
|
||||
parameter:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage project scrub --project <replaceable>ID</replaceable></userinput></screen>
|
||||
</section>
|
||||
|
||||
<section xml:id="section_use-multi-nics">
|
||||
<title>Multiple interfaces for instances (multinic)</title>
|
||||
<para>The multinic feature allows you to use more than one interface
|
||||
with your instances. This is useful in several scenarios:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>SSL Configurations (VIPs)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Services failover/HA</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Bandwidth Allocation</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Administrative/Public access to your instances</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Each VIP represents a separate network with its own IP block.
|
||||
Every network mode has its own set of changes regarding multinic
|
||||
usage:</para>
|
||||
<figure>
|
||||
<title>multinic flat manager</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata scale="40" fileref="../../common/figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-manager.jpg"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<figure>
|
||||
<title>multinic flatdhcp manager</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata scale="40" fileref="../../common/figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-DHCP-manager.jpg"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<figure>
|
||||
<title>multinic VLAN manager</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata scale="40" fileref="../../common/figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-VLAN-manager.jpg"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
|
||||
<section xml:id="using-multiple-nics-usage">
|
||||
<title>Using multinic</title>
|
||||
<para>In order to use multinic, create two networks, and attach them
|
||||
to the tenant (named <literal>project</literal> on the command
|
||||
line):</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova network-create first-net --fixed-range-v4 20.20.0.0/24 --project-id $your-project</userinput>
|
||||
<prompt>$</prompt> <userinput>nova network-create second-net --fixed-range-v4 20.20.10.0/24 --project-id $your-project</userinput></screen>
|
||||
<para>Each new instance will now receive two IP addresses from their
|
||||
respective DHCP servers:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova list</userinput>
|
||||
<computeroutput>+-----+------------+--------+----------------------------------------+
|
||||
| ID | Name | Status | Networks |
|
||||
+-----+------------+--------+----------------------------------------+
|
||||
| 124 | Server 124 | ACTIVE | network2=20.20.0.3; private=20.20.10.14|
|
||||
+-----+------------+--------+----------------------------------------+</computeroutput></screen>
|
||||
<note>
|
||||
<para>Make sure you start the second interface on the instance, or
|
||||
it won't be reachable through the second IP.</para>
|
||||
</note>
|
||||
<para>This example demonstrates how to set up the interfaces within
|
||||
the instance. This is the configuration that needs to be applied
|
||||
inside the image.</para>
|
||||
<para>Edit the <filename>/etc/network/interfaces</filename> file:</para>
|
||||
<programlisting language="bash"># The loopback network interface
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
|
||||
auto eth1
|
||||
iface eth1 inet dhcp</programlisting>
|
||||
<para>If the Virtual Network Service Neutron is installed, you can
|
||||
specify the networks to attach to the interfaces by using the
|
||||
<literal>--nic</literal> flag with the <command>nova</command>
|
||||
command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova boot --image ed8b2a37-5535-4a5f-a615-443513036d71 --flavor 1 --nic net-id=<replaceable>NETWORK1_ID</replaceable> --nic net-id=<replaceable>NETWORK2_ID</replaceable> test-vm1</userinput></screen>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section xml:id="section_network-troubleshoot">
|
||||
<title>Troubleshooting Networking</title>
|
||||
<simplesect>
|
||||
<!-- I think this would be better as a qandaset, what do others think? LKB-->
|
||||
<title>Cannot reach floating IPs</title>
|
||||
<para>If you cannot reach your instances through the floating IP
|
||||
address:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Check that the default security group allows ICMP (ping)
|
||||
and SSH (port 22), so that you can reach the instances:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova secgroup-list-rules default</userinput>
|
||||
<computeroutput>+-------------+-----------+---------+-----------+--------------+
|
||||
| IP Protocol | From Port | To Port | IP Range | Source Group |
|
||||
+-------------+-----------+---------+-----------+--------------+
|
||||
| icmp | -1 | -1 | 0.0.0.0/0 | |
|
||||
| tcp | 22 | 22 | 0.0.0.0/0 | |
|
||||
+-------------+-----------+---------+-----------+--------------+</computeroutput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Check the NAT rules have been added to
|
||||
<systemitem>iptables</systemitem> on the node that is
|
||||
running <systemitem>nova-network</systemitem>:</para>
|
||||
<screen><prompt>#</prompt> <userinput>iptables -L -nv -t nat</userinput>
|
||||
<computeroutput>-A nova-network-PREROUTING -d 68.99.26.170/32 -j DNAT --to-destination 10.0.0.3
|
||||
-A nova-network-floating-snat -s 10.0.0.3/32 -j SNAT --to-source 68.99.26.170</computeroutput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Check that the public address (<uri>68.99.26.170</uri>
|
||||
in this example), has been added to your public interface.
|
||||
You should see the address in the listing when you use
|
||||
the <command>ip addr</command> command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>ip addr</userinput>
|
||||
<computeroutput>2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
|
||||
link/ether xx:xx:xx:17:4b:c2 brd ff:ff:ff:ff:ff:ff
|
||||
inet 13.22.194.80/24 brd 13.22.194.255 scope global eth0
|
||||
inet 68.99.26.170/32 scope global eth0
|
||||
inet6 fe80::82b:2bf:fe1:4b2/64 scope link
|
||||
valid_lft forever preferred_lft forever</computeroutput></screen>
|
||||
<note>
|
||||
<para>You cannot <command>ssh</command> to an instance
|
||||
with a public IP from within the same server because
|
||||
the routing configuration does not allow it.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Use <command>tcpdump</command> to identify if packets
|
||||
are being routed to the inbound interface on the compute
|
||||
host. If the packets are reaching the compute hosts but
|
||||
the connection is failing, the issue may be that the
|
||||
packet is being dropped by reverse path filtering. Try
|
||||
disabling reverse-path filtering on the inbound interface.
|
||||
For example, if the inbound interface is
|
||||
<literal>eth2</literal>, run:</para>
|
||||
<screen><prompt>#</prompt> <userinput>sysctl -w net.ipv4.conf.<replaceable>ETH2</replaceable>.rp_filter=0</userinput></screen>
|
||||
<para>If this solves the problem, add the following line to
|
||||
<filename>/etc/sysctl.conf</filename> so that the
|
||||
reverse-path filter is persistent:</para>
|
||||
<programlisting language="ini">net.ipv4.conf.rp_filter=0</programlisting>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Temporarily disable the firewall</title>
|
||||
<para>To help debug networking issues with reaching VMs, you can
|
||||
disable the firewall by setting this option in
|
||||
<filename>/etc/nova/nova.conf</filename>:</para>
|
||||
<programlisting language="ini">firewall_driver=nova.virt.firewall.NoopFirewallDriver</programlisting>
|
||||
<para>We strongly recommend you remove this line to re-enable the
|
||||
firewall once your networking issues have been resolved.</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Packet loss from instances to nova-network server
|
||||
(VLANManager mode)</title>
|
||||
<para>If you can SSH to your instances but the network
|
||||
to your instance is slow, or if you find that running certain
|
||||
operations are slower than they should be (for example,
|
||||
<command>sudo</command>), packet loss could be occurring on the
|
||||
connection to the instance.</para>
|
||||
<para>Packet loss can be caused by Linux networking configuration
|
||||
settings related to bridges. Certain settings can cause packets
|
||||
to be dropped between the VLAN interface (for example,
|
||||
<literal>vlan100</literal>) and the associated bridge interface
|
||||
(for example, <literal>br100</literal>) on the host running
|
||||
<systemitem class="service">nova-network</systemitem>.</para>
|
||||
<para>One way to check whether this is the problem is to open
|
||||
three terminals and run the following commands:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>In the first terminal, on the host running
|
||||
<systemitem>nova-network</systemitem>, use
|
||||
<command>tcpdump</command> on the VLAN interface to monitor
|
||||
DNS-related traffic (UDP, port 53). As root, run:</para>
|
||||
<screen><prompt>#</prompt> <userinput>tcpdump -K -p -i vlan100 -v -vv udp port 53</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>In the second terminal, also on the host running
|
||||
<systemitem>nova-network</systemitem>, use
|
||||
<command>tcpdump</command> to monitor DNS-related traffic
|
||||
on the bridge interface. As root, run:</para>
|
||||
<screen><prompt>#</prompt> <userinput>tcpdump -K -p -i br100 -v -vv udp port 53</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>In the third terminal, SSH to the instance and
|
||||
generate DNS requests by using the
|
||||
<command>nslookup</command> command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nslookup www.google.com</userinput></screen>
|
||||
<para>The symptoms may be intermittent, so try running
|
||||
<command>nslookup</command> multiple times. If the
|
||||
network configuration is correct, the command should
|
||||
return immediately each time. If it is not correct, the
|
||||
command hangs for several seconds before returning.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>If the <command>nslookup</command> command sometimes
|
||||
hangs, and there are packets that appear in the first
|
||||
terminal but not the second, then the problem may be due
|
||||
to filtering done on the bridges. Try disabling
|
||||
filtering, and running these commands as root:</para>
|
||||
<screen><prompt>#</prompt> <userinput>sysctl -w net.bridge.bridge-nf-call-arptables=0</userinput>
|
||||
<prompt>#</prompt> <userinput>sysctl -w net.bridge.bridge-nf-call-iptables=0</userinput>
|
||||
<prompt>#</prompt> <userinput>sysctl -w net.bridge.bridge-nf-call-ip6tables=0</userinput></screen>
|
||||
<para>If this solves your issue, add the following line to
|
||||
<filename>/etc/sysctl.conf</filename> so that the changes
|
||||
are persistent:</para>
|
||||
<programlisting language="ini">net.bridge.bridge-nf-call-arptables=0
|
||||
net.bridge.bridge-nf-call-iptables=0
|
||||
net.bridge.bridge-nf-call-ip6tables=0</programlisting>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>KVM: Network connectivity works initially, then fails</title>
|
||||
<para>With KVM hypervisors, instances running Ubuntu 12.04
|
||||
sometimes lose network connectivity after functioning properly
|
||||
for a period of time. Try loading the <literal>vhost_net</literal>
|
||||
kernel module as a workaround for this issue (see <link
|
||||
xlink:href="https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/997978/">bug
|
||||
#997978</link>) . This kernel module may also <link
|
||||
xlink:href="http://www.linux-kvm.org/page/VhostNet">improve
|
||||
network performance</link> on KVM. To load the kernel module:</para>
|
||||
<screen><prompt>#</prompt> <userinput>modprobe vhost_net</userinput></screen>
|
||||
<note>
|
||||
<para>Loading the module has no effect on running instances.</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
</section>
|
||||
</section>
|
@ -1,372 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_nova-compute-node-down">
|
||||
<title>Recover from a failed compute node</title>
|
||||
<para>If Compute is deployed with a shared file system, and a node fails,
|
||||
there are several methods to quickly recover from the failure. This
|
||||
section discusses manual recovery.</para>
|
||||
<xi:include href="../../common/section_cli_nova_evacuate.xml"/>
|
||||
<section xml:id="nova-compute-node-down-manual-recovery">
|
||||
<title>Manual recovery</title>
|
||||
<para>To recover a KVM or libvirt compute node, see
|
||||
<xref linkend="nova-compute-node-down-manual-recovery" />. For all
|
||||
other hypervisors, use this procedure:</para>
|
||||
<procedure>
|
||||
<step>
|
||||
<para>Identify the VMs on the affected hosts. To do this, you can
|
||||
use a combination of <command>nova list</command> and
|
||||
<command>nova show</command> or <command>euca-describe-instances</command>.
|
||||
For example, this command displays information about instance
|
||||
<systemitem>i-000015b9</systemitem> that is running on node
|
||||
<systemitem>np-rcc54</systemitem>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>euca-describe-instances</userinput>
|
||||
<computeroutput>i-000015b9 at3-ui02 running nectarkey (376, np-rcc54) 0 m1.xxlarge 2012-06-19T00:48:11.000Z 115.146.93.60</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Query the Compute database to check the status of the host.
|
||||
This example converts an EC2 API instance ID into an OpenStack
|
||||
ID. If you use the <command>nova</command> commands, you can
|
||||
substitute the ID directly (the output in this example has been
|
||||
truncated):</para>
|
||||
<screen><prompt>mysql></prompt> <userinput>SELECT * FROM instances WHERE id = CONV('15b9', 16, 10) \G;</userinput>
|
||||
<computeroutput>*************************** 1. row ***************************
|
||||
created_at: 2012-06-19 00:48:11
|
||||
updated_at: 2012-07-03 00:35:11
|
||||
deleted_at: NULL
|
||||
...
|
||||
id: 5561
|
||||
...
|
||||
power_state: 5
|
||||
vm_state: shutoff
|
||||
...
|
||||
hostname: at3-ui02
|
||||
host: np-rcc54
|
||||
...
|
||||
uuid: 3f57699a-e773-4650-a443-b4b37eed5a06
|
||||
...
|
||||
task_state: NULL
|
||||
...</computeroutput></screen>
|
||||
<note>
|
||||
<para>The credentials for your database can be found in
|
||||
<filename>/etc/nova.conf</filename>.</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Decide which compute host the affected VM should be moved
|
||||
to, and run this database command to move the VM to the new
|
||||
host:</para>
|
||||
<screen><prompt>mysql></prompt> <userinput>UPDATE instances SET host = 'np-rcc46' WHERE uuid = '3f57699a-e773-4650-a443-b4b37eed5a06';</userinput></screen>
|
||||
</step>
|
||||
<step performance="optional">
|
||||
<para>If you are using a hypervisor that relies on libvirt (such
|
||||
as KVM), update the <literal>libvirt.xml</literal> file (found
|
||||
in <literal>/var/lib/nova/instances/[instance ID]</literal>) with
|
||||
these changes:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Change the <literal>DHCPSERVER</literal> value to the
|
||||
host IP address of the new compute host.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Update the VNC IP to <uri>0.0.0.0</uri>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>Reboot the VM:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova reboot 3f57699a-e773-4650-a443-b4b37eed5a06</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>The database update and <command>nova reboot</command> command
|
||||
should be all that is required to recover a VM from a failed host.
|
||||
However, if you continue to have problems try recreating the network
|
||||
filter configuration using <command>virsh</command>, restarting the
|
||||
Compute services, or updating the <literal>vm_state</literal> and
|
||||
<literal>power_state</literal> in the Compute database.</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="section_nova-uid-mismatch">
|
||||
<title>Recover from a UID/GID mismatch</title>
|
||||
<para>In some cases, files on your compute node can end up using the
|
||||
wrong UID or GID. This can happen when running OpenStack Compute,
|
||||
using a shared file system, or with an automated configuration tool.
|
||||
This can cause a number of problems, such as inability to perform
|
||||
live migrations, or start virtual machines.</para>
|
||||
<para>This procedure runs on <systemitem class="service">nova-compute</systemitem>
|
||||
hosts, based on the KVM hypervisor:</para>
|
||||
<procedure>
|
||||
<title>Recovering from a UID/GID mismatch</title>
|
||||
<step>
|
||||
<para>Set the nova UID in <filename>/etc/passwd</filename> to the
|
||||
same number on all hosts (for example, 112).</para>
|
||||
<note>
|
||||
<para>Make sure you choose UIDs or GIDs that are not in use for
|
||||
other users or groups.</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the <literal>libvirt-qemu</literal> UID in
|
||||
<filename>/etc/passwd</filename> to the same number on all hosts
|
||||
(for example, 119).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the <literal>nova</literal> group in
|
||||
<filename>/etc/group</filename> file to the same number on all
|
||||
hosts (for example, 120).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the <literal>libvirtd</literal> group in
|
||||
<filename>/etc/group</filename> file to the same number on all
|
||||
hosts (for example, 119).</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Stop the services on the compute node.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Change all the files owned by user or group
|
||||
<systemitem>nova</systemitem>. For example:</para>
|
||||
<screen><prompt>#</prompt> <userinput>find / -uid 108 -exec chown nova {} \; </userinput># note the 108 here is the old nova UID before the change
|
||||
<prompt>#</prompt> <userinput>find / -gid 120 -exec chgrp nova {} \;</userinput></screen>
|
||||
</step>
|
||||
<step performance="optional">
|
||||
<para>Repeat all steps for the <literal>libvirt-qemu</literal>
|
||||
files, if required.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the services.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Run the <command>find</command> command to verify that all
|
||||
files use the correct identifiers.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
|
||||
<section xml:id="section_nova-disaster-recovery-process">
|
||||
<title>Recover cloud after disaster</title>
|
||||
<para>This section covers procedures for managing your cloud after a
|
||||
disaster, and backing up persistent storage volumes. Backups are
|
||||
mandatory, even outside of disaster scenarios.</para>
|
||||
<para>For a definition of a disaster recovery plan (DRP), see <link
|
||||
xlink:href="http://en.wikipedia.org/wiki/Disaster_Recovery_Plan">
|
||||
http://en.wikipedia.org/wiki/Disaster_Recovery_Plan</link>.</para>
|
||||
<para>A disaster could happen to several components of your
|
||||
architecture (for example, a disk crash, network loss, or a power
|
||||
failure). In this example, the following components are configured:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>A cloud controller (<systemitem>nova-api</systemitem>,
|
||||
<systemitem>nova-objectstore</systemitem>,
|
||||
<systemitem>nova-network</systemitem>)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A compute node (<systemitem class="service">nova-compute</systemitem>)</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A storage area network (SAN) used by OpenStack Block Storage
|
||||
(<systemitem class="service">cinder-volumes</systemitem>)</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>The worst disaster for a cloud is power loss, which applies to
|
||||
all three components. Before a power loss:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Create an active iSCSI session from the SAN to the cloud
|
||||
controller (used for the <literal>cinder-volumes</literal>
|
||||
LVM's VG).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Create an active iSCSI session from the cloud controller to
|
||||
the compute node (managed by
|
||||
<systemitem class="service">cinder-volume</systemitem>).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Create an iSCSI session for every volume (so 14 EBS volumes
|
||||
requires 14 iSCSI sessions).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Create iptables or ebtables rules from the cloud controller
|
||||
to the compute node. This allows access from the cloud controller
|
||||
to the running instance.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Save the current state of the database, the current state of
|
||||
the running instances, and the attached volumes (mount point,
|
||||
volume ID, volume status, etc), at least from the cloud
|
||||
controller to the compute node.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>After power is recovered and all hardware components have
|
||||
restarted:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>The iSCSI session from the SAN to the cloud no longer
|
||||
exists.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The iSCSI session from the cloud controller to the compute
|
||||
node no longer exists.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The iptables and ebtables from the cloud controller to the
|
||||
compute node are recreated. This is because
|
||||
<systemitem>nova-network</systemitem> reapplies configurations
|
||||
on boot.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Instances are no longer running.</para>
|
||||
<para>Note that instances will not be lost, because neither
|
||||
<command>destroy</command> nor <command>terminate</command> was
|
||||
invoked. The files for the instances will remain on
|
||||
the compute node.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The database has not been updated.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<procedure>
|
||||
<title>Begin recovery</title>
|
||||
<warning>
|
||||
<para>Do not add any extra steps to this procedure, or perform
|
||||
the steps out of order.</para>
|
||||
</warning>
|
||||
<step>
|
||||
<para>Check the current relationship between the volume and its
|
||||
instance, so that you can recreate the attachment.</para>
|
||||
<para>This information can be found using the
|
||||
<command>nova volume-list</command>. Note that the
|
||||
<command>nova</command> client also includes the ability to get
|
||||
volume information from OpenStack Block Storage.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Update the database to clean the stalled state. Do this for
|
||||
every volume, using these queries:</para>
|
||||
<screen><prompt>mysql></prompt> <userinput>use cinder;</userinput>
|
||||
<prompt>mysql></prompt> <userinput>update volumes set mountpoint=NULL;</userinput>
|
||||
<prompt>mysql></prompt> <userinput>update volumes set status="available" where status <>"error_deleting";</userinput>
|
||||
<prompt>mysql></prompt> <userinput>update volumes set attach_status="detached";</userinput>
|
||||
<prompt>mysql></prompt> <userinput>update volumes set instance_id=0;</userinput></screen>
|
||||
<para>Use <command>nova volume-list</command> commands to list all
|
||||
volumes.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the instances using the <command>nova reboot
|
||||
<replaceable>INSTANCE</replaceable></command> command.</para>
|
||||
<important>
|
||||
<para>Some instances will completely reboot and become reachable,
|
||||
while some might stop at the <application>plymouth</application>
|
||||
stage. This is expected behavior, DO NOT reboot a second time.</para>
|
||||
<para>Instance state at this stage depends on whether you added
|
||||
an <filename>/etc/fstab</filename> entry for that volume.
|
||||
Images built with the <package>cloud-init</package> package
|
||||
remain in a <literal>pending</literal> state, while others
|
||||
skip the missing volume and start. This step is performed in
|
||||
order to ask Compute to reboot every instance, so that the
|
||||
stored state is preserved. It does not matter if not all
|
||||
instances come up successfully. For more information about
|
||||
<package>cloud-init</package>, see
|
||||
<link xlink:href="https://help.ubuntu.com/community/CloudInit">
|
||||
help.ubuntu.com/community/CloudInit</link>.</para>
|
||||
</important>
|
||||
</step>
|
||||
<step performance="optional">
|
||||
<para>Reattach the volumes to their respective instances, if
|
||||
required, using the <command>nova volume-attach</command>
|
||||
command. This example uses a file of listed volumes to reattach
|
||||
them:</para>
|
||||
<programlisting language="bash">#!/bin/bash
|
||||
|
||||
while read line; do
|
||||
volume=`echo $line | $CUT -f 1 -d " "`
|
||||
instance=`echo $line | $CUT -f 2 -d " "`
|
||||
mount_point=`echo $line | $CUT -f 3 -d " "`
|
||||
echo "ATTACHING VOLUME FOR INSTANCE - $instance"
|
||||
nova volume-attach $instance $volume $mount_point
|
||||
sleep 2
|
||||
done < $volumes_tmp_file</programlisting>
|
||||
<para>Instances that were stopped at the
|
||||
<application>plymouth</application> stage will now automatically
|
||||
continue booting and start normally. Instances that previously
|
||||
started successfully will now be able to see the volume.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>SSH into the instances and reboot them.</para>
|
||||
<para>If some services depend on the volume, or if a volume has an
|
||||
entry in <systemitem>fstab</systemitem>, you should now be able
|
||||
to restart the instance. Restart directly from the instance
|
||||
itself, not through <command>nova</command>:</para>
|
||||
<screen><prompt>#</prompt> <userinput>shutdown -r now</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>When you are planning for and performing a disaster recovery,
|
||||
follow these tips:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Use the <literal>errors=remount</literal> parameter in
|
||||
the <filename>fstab</filename> file to prevent data
|
||||
corruption.</para>
|
||||
<para>This parameter will cause the system to disable the ability
|
||||
to write to the disk if it detects an I/O error. This
|
||||
configuration option should be added into the
|
||||
<systemitem class="service">cinder-volume</systemitem> server
|
||||
(the one which performs the iSCSI connection to the SAN), and
|
||||
into the instances' <filename>fstab</filename> files.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Do not add the entry for the SAN's disks to the
|
||||
<systemitem class="service">cinder-volume</systemitem>'s
|
||||
<filename>fstab</filename> file.</para>
|
||||
<para>Some systems hang on that step, which means you could lose
|
||||
access to your cloud-controller. To re-run the session
|
||||
manually, run this command before performing the mount:
|
||||
<screen><prompt>#</prompt> <userinput>iscsiadm -m discovery -t st -p $SAN_IP $ iscsiadm -m node --target-name $IQN -p $SAN_IP -l</userinput></screen></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>On your instances, if you have the whole
|
||||
<filename>/home/</filename> directory on the disk, leave a
|
||||
user's directory with the user's bash files and the
|
||||
<filename>authorized_keys</filename> file (instead of emptying
|
||||
the <filename>/home</filename> directory and mapping the disk
|
||||
on it).</para>
|
||||
<para>This allows you to connect to the instance even without
|
||||
the volume attached, if you allow only connections through
|
||||
public keys.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>
|
||||
If you want to script the disaster recovery plan (DRP), a bash
|
||||
script is available from <link xlink:href="https://github.com/Razique/BashStuff/blob/master/SYSTEMS/OpenStack/SCR_5006_V00_NUAC-OPENSTACK-DRP-OpenStack.sh">
|
||||
https://github.com/Razique</link> which performs the following
|
||||
steps:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>An array is created for instances and their attached
|
||||
volumes.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The MySQL database is updated.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>All instances are restarted with
|
||||
<systemitem>euca2ools</systemitem>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The volumes are reattached.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>An SSH connection is performed into every instance using
|
||||
Compute credentials.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>The script includes a <command>test mode</command>, which
|
||||
allows you to perform that whole sequence for only one instance.</para>
|
||||
<para>To reproduce the power loss, connect to the compute node which
|
||||
runs that instance and close the iSCSI session. Do not detach the
|
||||
volume using the <command>nova volume-detach</command> command,
|
||||
manually close the iSCSI session. This example closes an iSCSI
|
||||
session with the number 15:</para>
|
||||
<screen><prompt>#</prompt> <userinput>iscsiadm -m session -u -r 15</userinput></screen>
|
||||
<para>Do not forget the <literal>-r</literal> flag. Otherwise, you
|
||||
will close all sessions.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,110 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="root-wrap-reference">
|
||||
<title>Secure with rootwrap</title>
|
||||
<para>Rootwrap allows unprivileged users to safely run Compute actions
|
||||
as the root user. Compute previously used <command>sudo</command> for
|
||||
this purpose, but this was difficult to maintain, and did not allow
|
||||
advanced filters. The <command>rootwrap</command> command replaces
|
||||
<command>sudo</command> for Compute.</para>
|
||||
<para>To use rootwrap, prefix the Compute command with
|
||||
<command>nova-rootwrap</command>. For example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>sudo nova-rootwrap /etc/nova/rootwrap.conf <replaceable>command</replaceable></userinput></screen>
|
||||
<para>
|
||||
A generic <filename>sudoers</filename> entry lets the Compute user run
|
||||
<command>nova-rootwrap</command> as root. The
|
||||
<command>nova-rootwrap</command> code looks for filter definition
|
||||
directories in its configuration file, and loads command filters from
|
||||
them. It then checks if the command requested by Compute matches one of
|
||||
those filters and, if so, executes the command (as root). If no filter
|
||||
matches, it denies the request.</para>
|
||||
<note><para>Be aware of issues with using NFS and root-owned files. The
|
||||
NFS share must be configured with the <option>no_root_squash</option>
|
||||
option enabled, in order for rootwrap to work correctly.</para>
|
||||
</note>
|
||||
<para>Rootwrap is fully controlled by the root user. The root user owns
|
||||
the sudoers entry which allows Compute to run a specific rootwrap
|
||||
executable as root, and only with a specific configuration file (which
|
||||
should also be owned by root). The <command>nova-rootwrap</command>
|
||||
command imports the Python modules it needs from a cleaned,
|
||||
system-default <replaceable>PYTHONPATH</replaceable>. The root-owned
|
||||
configuration file points to root-owned filter definition directories,
|
||||
which contain root-owned filters definition files. This chain ensures
|
||||
that the Compute user itself is not in control of the configuration or
|
||||
modules used by the <command>nova-rootwrap</command> executable.</para>
|
||||
<para>Rootwrap is configured using the <filename>rootwrap.conf</filename>
|
||||
file. Because it's in the trusted security path, it must be owned and
|
||||
writable by only the root user. The file's location is specified in both
|
||||
the sudoers entry and in the <filename>nova.conf</filename> configuration
|
||||
file with the <literal>rootwrap_config=entry</literal> parameter.</para>
|
||||
<para>The <filename>rootwrap.conf</filename> file uses an INI file format
|
||||
with these sections and parameters:</para>
|
||||
<table rules="all" frame="border" xml:id="rootwrap-conf-table-filter-path" width="100%">
|
||||
<caption>rootwrap.conf configuration options</caption>
|
||||
<col width="50%"/>
|
||||
<col width="50%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<td><para>Configuration option=Default value</para></td>
|
||||
<td><para>(Type) Description</para></td>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><para>[DEFAULT]</para>
|
||||
<para>filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap</para></td>
|
||||
<td><para>(ListOpt) Comma-separated list of directories containing
|
||||
filter definition files. Defines where rootwrap filters are
|
||||
stored. Directories defined on this line should all exist, and be
|
||||
owned and writable only by the root user.</para></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<para>If the root wrapper is not performing correctly, you can
|
||||
add a workaround option into the <filename>nova.conf</filename>
|
||||
configuration file. This workaround re-configures the
|
||||
root wrapper configuration to fall back to running commands as
|
||||
sudo, and is a Kilo release feature.</para>
|
||||
<para>Including this workaround in your configuration file
|
||||
safeguards your environment from issues that can impair root
|
||||
wrapper performance. Tool changes that have impacted
|
||||
<link xlink:href="https://git.openstack.org/cgit/openstack-dev/pbr/">Python Build Reasonableness (PBR)</link>,
|
||||
for example, are a known issue that affects
|
||||
root wrapper performance.</para>
|
||||
<para>To set up this workaround, configure the <parameter>disable_rootwrap</parameter>
|
||||
option in the <option>[workaround]</option> section
|
||||
of the <filename>nova.conf</filename> configuration file.</para>
|
||||
<para>The filters definition files contain lists of filters that rootwrap
|
||||
will use to allow or deny a specific command. They are generally
|
||||
suffixed by <literal>.filters</literal>. Since they are in the trusted
|
||||
security path, they need to be owned and writable only by the root user.
|
||||
Their location is specified in the <filename>rootwrap.conf</filename>
|
||||
file.</para>
|
||||
<para>Filter definition files use an INI file format with a
|
||||
<literal>[Filters]</literal> section and several lines, each with a
|
||||
unique parameter name, which should be different for each filter you
|
||||
define:</para>
|
||||
<table rules="all" frame="border" xml:id="rootwrap-conf-table-filter-name" width="100%">
|
||||
<caption>.filters configuration options</caption>
|
||||
<col width="50%"/>
|
||||
<col width="50%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<td><para>Configuration option=Default value</para></td>
|
||||
<td><para>(Type) Description</para></td>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><para>[Filters]</para>
|
||||
<para>filter_name=kpartx: CommandFilter, /sbin/kpartx, root</para></td>
|
||||
<td><para>(ListOpt) Comma-separated list containing the filter class
|
||||
to use, followed by the Filter arguments (which vary depending
|
||||
on the Filter class selected).</para></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
@ -1,39 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section-compute-security">
|
||||
<title>Security hardening</title>
|
||||
<para>OpenStack Compute can be integrated with various third-party
|
||||
technologies to increase security. For more information, see the
|
||||
<link xlink:href="http://docs.openstack.org/sec/">
|
||||
<citetitle>OpenStack Security Guide</citetitle></link>.</para>
|
||||
|
||||
<xi:include href="section_trusted-compute-pools.xml"/>
|
||||
|
||||
<section xml:id="section_compute_metadata_https">
|
||||
<title>Encrypt Compute metadata traffic</title>
|
||||
<para>OpenStack supports encrypting Compute metadata traffic with HTTPS.
|
||||
Enable SSL encryption in the <filename>metadata_agent.ini</filename>
|
||||
file.</para>
|
||||
<procedure>
|
||||
<title>Enabling SSL encryption</title>
|
||||
<step>
|
||||
<para>Enable the HTTPS protocol:</para>
|
||||
<programlisting>nova_metadata_protocol = https</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Determine whether insecure SSL connections are accepted for
|
||||
Compute metadata server requests. The default value is
|
||||
<option>False</option>:</para>
|
||||
<programlisting>nova_metadata_insecure = False</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Specify the path to the client certificate:</para>
|
||||
<programlisting>nova_client_cert = <replaceable>PATH_TO_CERT</replaceable></programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Specify the path to the private key:</para>
|
||||
<programlisting>nova_client_priv_key = <replaceable>PATH_TO_KEY</replaceable></programlisting>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
</section>
|
@ -1,577 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_compute-system-admin">
|
||||
<title>System administration</title>
|
||||
<para>To effectively administer Compute, you must understand how the
|
||||
different installed nodes interact with each other. Compute can be
|
||||
installed in many different ways using multiple servers, but generally
|
||||
multiple compute nodes control the virtual servers and a cloud
|
||||
controller node contains the remaining Compute services.</para>
|
||||
<para>The Compute cloud works using a series of daemon processes named
|
||||
<systemitem>nova-*</systemitem> that exist persistently on the host
|
||||
machine. These binaries can all run on the same machine or be spread out
|
||||
on multiple boxes in a large deployment. The responsibilities of
|
||||
services and drivers are:</para>
|
||||
<itemizedlist>
|
||||
<title>Services</title>
|
||||
<listitem>
|
||||
<para><systemitem class="service">nova-api</systemitem>: receives
|
||||
XML requests and sends them to the rest of the system. A WSGI app
|
||||
routes and authenticates requests. Supports the EC2 and
|
||||
OpenStack APIs. A <filename>nova.conf</filename> configuration
|
||||
file is created when Compute is installed.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem>nova-cert</systemitem>: manages certificates.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem class="service">nova-compute</systemitem>: manages
|
||||
virtual machines. Loads a Service object, and exposes the public
|
||||
methods on ComputeManager through a Remote Procedure Call (RPC).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem>nova-conductor</systemitem>: provides
|
||||
database-access support for Compute nodes (thereby reducing
|
||||
security risks).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem>nova-consoleauth</systemitem>: manages console
|
||||
authentication.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem class="service">nova-objectstore</systemitem>: a
|
||||
simple file-based storage system for images that replicates most
|
||||
of the S3 API. It can be replaced with OpenStack Image service and
|
||||
either a simple image manager or OpenStack Object Storage as the
|
||||
virtual machine image storage facility. It must exist on the same
|
||||
node as <systemitem class="service">nova-compute</systemitem>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem class="service">nova-network</systemitem>: manages
|
||||
floating and fixed IPs, DHCP, bridging and VLANs. Loads a Service
|
||||
object which exposes the public methods on one of the subclasses
|
||||
of <systemitem class="service">NetworkManager</systemitem>.
|
||||
Different networking strategies are available by changing the
|
||||
<literal>network_manager</literal> configuration option to
|
||||
<literal>FlatManager</literal>,
|
||||
<literal>FlatDHCPManager</literal>, or
|
||||
<literal>VLANManager</literal> (defaults to
|
||||
<literal>VLANManager</literal> if nothing is specified).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem>nova-scheduler</systemitem>: dispatches requests
|
||||
for new virtual machines to the correct node.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><systemitem>nova-novncproxy</systemitem>: provides a VNC proxy
|
||||
for browsers, allowing VNC consoles to access virtual machines.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note><para>Some services have drivers that change how the service
|
||||
implements its core functionality. For example, the
|
||||
<systemitem>nova-compute</systemitem> service supports drivers that
|
||||
let you choose which hypervisor type it can use.
|
||||
<systemitem>nova-network</systemitem> and
|
||||
<systemitem>nova-scheduler</systemitem> also have drivers.</para>
|
||||
</note>
|
||||
|
||||
<section xml:id="section_manage-compute-users">
|
||||
<title>Manage Compute users</title>
|
||||
<para>Access to the Euca2ools (ec2) API is controlled by an access key and
|
||||
a secret key. The user's access key needs to be included in the request,
|
||||
and the request must be signed with the secret key. Upon receipt of API
|
||||
requests, Compute verifies the signature and runs commands on behalf of
|
||||
the user.</para>
|
||||
<para>To begin using Compute, you must create a user with the Identity
|
||||
Service.</para>
|
||||
</section>
|
||||
|
||||
<xi:include href="../../common/section_cli_nova_volumes.xml"/>
|
||||
<xi:include href="../../common/section_cli_nova_customize_flavors.xml"/>
|
||||
<xi:include href="section_compute_config-firewalls.xml"/>
|
||||
|
||||
<section xml:id="admin-password-injection">
|
||||
<title>Injecting the administrator password</title>
|
||||
<para>Compute can generate a random administrator (root) password and
|
||||
inject that password into an instance. If this feature is enabled, users
|
||||
can <command>ssh</command> to an instance without an <command>ssh</command>
|
||||
keypair. The random password appears in the output of the
|
||||
<command>nova boot</command> command. You can also view and set the
|
||||
admin password from the dashboard.</para>
|
||||
<simplesect>
|
||||
<title>Password injection using the dashboard</title>
|
||||
<para>By default, the dashboard will display the <literal>admin</literal>
|
||||
password and allow the user to modify it.</para>
|
||||
<para>If you do not want to support password injection, disable the
|
||||
password fields by editing the dashboard's
|
||||
<filename>local_settings</filename> file. On Fedora/RHEL/CentOS, the
|
||||
file location is <filename>/etc/openstack-dashboard/local_settings</filename>.
|
||||
On Ubuntu and Debian, it is <filename>/etc/openstack-dashboard/local_settings.py</filename>.
|
||||
On openSUSE and SUSE Linux Enterprise Server, it is
|
||||
<filename>/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py</filename></para>
|
||||
<programlisting language="ini">OPENSTACK_HYPERVISOR_FEATURES = {
|
||||
...
|
||||
'can_set_password': False,
|
||||
}</programlisting>
|
||||
</simplesect>
|
||||
|
||||
<simplesect>
|
||||
<title>Password injection on libvirt-based hypervisors</title>
|
||||
<para>For hypervisors that use the libvirt back end (such as KVM, QEMU,
|
||||
and LXC), admin password injection is disabled by default. To enable
|
||||
it, set this option in <filename>/etc/nova/nova.conf</filename>:</para>
|
||||
<programlisting language="ini">[libvirt]
|
||||
inject_password=true</programlisting>
|
||||
<para>When enabled, Compute will modify the password of the admin
|
||||
account by editing the <filename>/etc/shadow</filename> file inside
|
||||
the virtual machine instance.</para>
|
||||
<note>
|
||||
<para>Users can only <command>ssh</command> to the instance by using
|
||||
the admin password if the virtual machine image is a Linux
|
||||
distribution, and it has been configured to allow users to
|
||||
<command>ssh</command> as the root user. This is not the case for
|
||||
<link xlink:href="http://cloud-images.ubuntu.com/">Ubuntu cloud
|
||||
images</link> which, by default, do not allow users to
|
||||
<command>ssh</command> to the root account.</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Password injection and XenAPI (XenServer/XCP)</title>
|
||||
<para>when using the XenAPI hypervisor back end, Compute uses the XenAPI
|
||||
agent to inject passwords into guests. The virtual machine image must
|
||||
be configured with the agent for password injection to work.</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Password injection and Windows images (all hypervisors)</title>
|
||||
<para>For Windows virtual machines, configure the Windows image to
|
||||
retrieve the admin password on boot by installing an agent such as
|
||||
<link xlink:href="https://github.com/cloudbase/cloudbase-init">
|
||||
cloudbase-init</link>.</para>
|
||||
</simplesect>
|
||||
</section>
|
||||
|
||||
<section xml:id="section_manage-the-cloud">
|
||||
<title>Manage the cloud</title>
|
||||
<para>System administrators can use <command>nova</command> client and
|
||||
<command>Euca2ools</command> commands to manage their clouds.</para>
|
||||
<para><command>nova</command> client and <command>euca2ools</command> can
|
||||
be used by all users, though specific commands might be restricted by
|
||||
Role Based Access Control in the Identity Service.</para>
|
||||
<procedure>
|
||||
<title>Managing the cloud with nova client</title>
|
||||
<step>
|
||||
<para>The <package>python-novaclient</package> package provides a
|
||||
<code>nova</code> shell that enables Compute API interactions from
|
||||
the command line. Install the client, and provide your user name and
|
||||
password (which can be set as environment variables for convenience),
|
||||
for the ability to administer the cloud from the command line.</para>
|
||||
<para>To install <package>python-novaclient</package>, download the
|
||||
tarball from <link xlink:href="http://pypi.python.org/pypi/python-novaclient/#downloads">
|
||||
http://pypi.python.org/pypi/python-novaclient/#downloads</link> and
|
||||
then install it in your favorite Python environment.</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl -O http://pypi.python.org/packages/source/p/python-novaclient/python-novaclient-2.6.3.tar.gz</userinput>
|
||||
<prompt>$</prompt> <userinput>tar -zxvf python-novaclient-2.6.3.tar.gz</userinput>
|
||||
<prompt>$</prompt> <userinput>cd python-novaclient-2.6.3</userinput></screen>
|
||||
<para>As root, run:</para>
|
||||
<screen><prompt>#</prompt> <userinput>python setup.py install</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Confirm the installation was successful:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova help</userinput>
|
||||
<computeroutput>usage: nova [--version] [--debug] [--os-cache] [--timings]
|
||||
[--timeout <replaceable>SECONDS</replaceable>] [--os-username <replaceable>AUTH_USER_NAME</replaceable>]
|
||||
[--os-password <replaceable>AUTH_PASSWORD</replaceable>]
|
||||
[--os-tenant-name <replaceable>AUTH_TENANT_NAME</replaceable>]
|
||||
[--os-tenant-id <replaceable>AUTH_TENANT_ID</replaceable>] [--os-auth-url <replaceable>AUTH_URL</replaceable>]
|
||||
[--os-region-name <replaceable>REGION_NAME</replaceable>] [--os-auth-system <replaceable>AUTH_SYSTEM</replaceable>]
|
||||
[--service-type <replaceable>SERVICE_TYPE</replaceable>] [--service-name <replaceable>SERVICE_NAME</replaceable>]
|
||||
[--volume-service-name <replaceable>VOLUME_SERVICE_NAME</replaceable>]
|
||||
[--endpoint-type <replaceable>ENDPOINT_TYPE</replaceable>]
|
||||
[--os-compute-api-version <replaceable>COMPUTE_API_VERSION</replaceable>]
|
||||
[--os-cacert <replaceable>CA_CERTIFICATE</replaceable>] [--insecure]
|
||||
[--bypass-url <replaceable>BYPASS_URL</replaceable>]
|
||||
<replaceable>SUBCOMMAND</replaceable> ...</computeroutput></screen>
|
||||
<para>This command returns a list of <command>nova</command> commands
|
||||
and parameters. To get help for a subcommand, run:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova help <replaceable>SUBCOMMAND</replaceable></userinput></screen>
|
||||
<para>For a complete list of <command>nova</command> commands and
|
||||
parameters, see the <link xlink:href="http://docs.openstack.org/cli-reference/content/">
|
||||
<citetitle>OpenStack Command-Line Reference</citetitle></link>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the required parameters as environment variables to make
|
||||
running commands easier. For example, you can add
|
||||
<parameter>--os-username</parameter> as a <command>nova</command>
|
||||
option, or set it as an environment variable. To set the user name,
|
||||
password, and tenant as environment variables, use:</para>
|
||||
<screen><prompt>$</prompt> <userinput>export OS_USERNAME=joecool</userinput>
|
||||
<prompt>$</prompt> <userinput>export OS_PASSWORD=coolword</userinput>
|
||||
<prompt>$</prompt> <userinput>export OS_TENANT_NAME=coolu</userinput> </screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>The Identity Service will give you an authentication endpoint,
|
||||
which Compute recognizes as <literal>OS_AUTH_URL</literal>.</para>
|
||||
<screen><prompt>$</prompt> <userinput>export OS_AUTH_URL=http://hostname:5000/v2.0</userinput>
|
||||
<prompt>$</prompt> <userinput>export NOVA_VERSION=1.1</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
|
||||
<section xml:id="section_euca2ools">
|
||||
<title>Managing the cloud with euca2ools</title>
|
||||
<para>The <command>euca2ools</command> command-line tool provides a
|
||||
command line interface to EC2 API calls. For more information about
|
||||
<command>euca2ools</command>, see
|
||||
<link xlink:href="http://open.eucalyptus.com/wiki/Euca2oolsGuide_v1.3">
|
||||
http://open.eucalyptus.com/wiki/Euca2oolsGuide_v1.3</link></para>
|
||||
</section>
|
||||
|
||||
<xi:include href="../../common/section_cli_nova_usage_statistics.xml"/>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="section_manage-logs">
|
||||
<title>Logging</title>
|
||||
<simplesect>
|
||||
<title>Logging module</title>
|
||||
<para>Logging behavior can be changed by creating a configuration file.
|
||||
To specify the configuration file, add this line to the
|
||||
<filename>/etc/nova/nova.conf</filename> file:</para>
|
||||
<programlisting language="ini">log-config=/etc/nova/logging.conf</programlisting>
|
||||
<para>
|
||||
To change the logging level, add <literal>DEBUG</literal>,
|
||||
<literal>INFO</literal>, <literal>WARNING</literal>, or
|
||||
<literal>ERROR</literal> as a parameter.
|
||||
</para>
|
||||
<para>The logging configuration file is an INI-style configuration
|
||||
file, which must contain a section called
|
||||
<literal>logger_nova</literal>. This controls the behavior of
|
||||
the logging facility in the <literal>nova-*</literal> services. For
|
||||
example:</para>
|
||||
<programlisting language="ini">[logger_nova]
|
||||
level = INFO
|
||||
handlers = stderr
|
||||
qualname = nova</programlisting>
|
||||
<para>This example sets the debugging level to <literal>INFO</literal>
|
||||
(which is less verbose than the default <literal>DEBUG</literal>
|
||||
setting).</para>
|
||||
<para>For more about the logging configuration syntax, including the
|
||||
<literal>handlers</literal> and <literal>quaname</literal>
|
||||
variables, see the
|
||||
<link xlink:href="http://docs.python.org/release/2.7/library/logging.html#configuration-file-format">
|
||||
Python documentation</link> on logging configuration files.</para>
|
||||
<para>For an example <filename>logging.conf</filename> file with
|
||||
various defined handlers, see the
|
||||
<link xlink:href="http://docs.openstack.org/kilo/config-reference/content/">
|
||||
<citetitle>OpenStack Configuration Reference</citetitle></link>.
|
||||
</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Syslog</title>
|
||||
<para>OpenStack Compute services can send logging information to
|
||||
<systemitem>syslog</systemitem>. This is useful if you want to use
|
||||
<systemitem>rsyslog</systemitem> to forward logs to a remote machine.
|
||||
Separately configure the Compute service (nova), the Identity
|
||||
service (keystone), the Image service (glance), and, if you are
|
||||
using it, the Block Storage service (cinder) to send log messages to
|
||||
<systemitem>syslog</systemitem>. Open these configuration files:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><filename>/etc/nova/nova.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/keystone/keystone.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/glance/glance-api.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/glance/glance-registry.conf</filename></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><filename>/etc/cinder/cinder.conf</filename></para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>In each configuration file, add these lines:</para>
|
||||
<programlisting language="ini">verbose = False
|
||||
debug = False
|
||||
use_syslog = True
|
||||
syslog_log_facility = LOG_LOCAL0</programlisting>
|
||||
<para>In addition to enabling <systemitem>syslog</systemitem>, these
|
||||
settings also turn off verbose and debugging output from the log.</para>
|
||||
<note>
|
||||
<para>Although this example uses the same local facility for each
|
||||
service (<literal>LOG_LOCAL0</literal>, which corresponds to
|
||||
<systemitem>syslog</systemitem> facility <literal>LOCAL0</literal>),
|
||||
we recommend that you configure a separate local facility for each
|
||||
service, as this provides better isolation and more flexibility.
|
||||
For example, you can capture logging information at different
|
||||
severity levels for different services.
|
||||
<systemitem>syslog</systemitem> allows you to define up to eight
|
||||
local facilities, <literal>LOCAL0, LOCAL1, ..., LOCAL7</literal>.
|
||||
For more information, see the <systemitem>syslog</systemitem>
|
||||
documentation.</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Rsyslog</title>
|
||||
<para><systemitem>rsyslog</systemitem> is useful for setting up a
|
||||
centralized log server across multiple machines. This section
|
||||
briefly describe the configuration to set up an
|
||||
<systemitem>rsyslog</systemitem> server. A full treatment of
|
||||
<systemitem>rsyslog</systemitem> is beyond the scope of this book.
|
||||
This section assumes <systemitem>rsyslog</systemitem> has already
|
||||
been installed on your hosts (it is installed by default on most
|
||||
Linux distributions).</para>
|
||||
<para>This example provides a minimal configuration for
|
||||
<filename>/etc/rsyslog.conf</filename> on the log server host,
|
||||
which receives the log files:</para>
|
||||
<programlisting># provides TCP syslog reception
|
||||
$ModLoad imtcp
|
||||
$InputTCPServerRun 1024</programlisting>
|
||||
<para>Add a filter rule to <filename>/etc/rsyslog.conf</filename>
|
||||
which looks for a host name. This example uses
|
||||
<replaceable>COMPUTE_01</replaceable> as the compute host name:</para>
|
||||
<programlisting>:hostname, isequal, "<replaceable>COMPUTE_01</replaceable>" /mnt/rsyslog/logs/compute-01.log</programlisting>
|
||||
<para>On each compute host, create a file named
|
||||
<filename>/etc/rsyslog.d/60-nova.conf</filename>, with the
|
||||
following content:</para>
|
||||
<programlisting># prevent debug from dnsmasq with the daemon.none parameter
|
||||
*.*;auth,authpriv.none,daemon.none,local0.none -/var/log/syslog
|
||||
# Specify a log level of ERROR
|
||||
local0.error @@172.20.1.43:1024</programlisting>
|
||||
<para>Once you have created the file, restart the
|
||||
<systemitem>rsyslog</systemitem> service. Error-level log messages
|
||||
on the compute hosts should now be sent to the log server.</para>
|
||||
</simplesect>
|
||||
<simplesect>
|
||||
<title>Serial console</title>
|
||||
<para>The serial console provides a way to examine kernel output and
|
||||
other system messages during troubleshooting if the instance lacks
|
||||
network connectivity.</para>
|
||||
<para>OpenStack Icehouse and earlier supports read-only access using
|
||||
the serial console using the <command>os-GetSerialOutput</command>
|
||||
server action. Most cloud images enable this feature by default.
|
||||
For more information, see <link linkend="section_compute-empty-log-output">
|
||||
Troubleshoot Compute</link>.</para>
|
||||
<para>OpenStack Juno and later supports read-write access using the
|
||||
serial console using the <command>os-GetSerialConsole</command>
|
||||
server action. This feature also requires a websocket client to
|
||||
access the serial console.</para>
|
||||
<procedure>
|
||||
<title>Configuring read-write serial console access</title>
|
||||
<para>On a compute node, edit the
|
||||
<filename>/etc/nova/nova.conf</filename> file:</para>
|
||||
<step>
|
||||
<para>In the <literal>[serial_console]</literal> section,
|
||||
enable the serial console:</para>
|
||||
<programlisting language="ini">[serial_console]
|
||||
...
|
||||
enabled = true</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>In the <literal>[serial_console]</literal> section,
|
||||
configure the serial console proxy similar to graphical
|
||||
console proxies:</para>
|
||||
<programlisting language="ini">[serial_console]
|
||||
...
|
||||
base_url = ws://<replaceable>controller</replaceable>:6083/
|
||||
listen = 0.0.0.0
|
||||
proxyclient_address = <replaceable>MANAGEMENT_INTERFACE_IP_ADDRESS</replaceable></programlisting>
|
||||
<para>The <option>base_url</option> option specifies the base
|
||||
URL that clients receive from the API upon requesting a serial
|
||||
console. Typically, this refers to the host name of the
|
||||
controller node.</para>
|
||||
<para>The <option>listen</option> option specifies the network
|
||||
interface <systemitem class="service">nova-compute</systemitem>
|
||||
should listen on for virtual console connections. Typically,
|
||||
0.0.0.0 will enable listening on all interfaces.</para>
|
||||
<para>The <option>proxyclient_address</option> option specifies
|
||||
which network interface the proxy should connect to. Typically,
|
||||
this refers to the IP address of the management interface.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>When you enable read-write serial console access, Compute
|
||||
will add serial console information to the Libvirt XML file for
|
||||
the instance. For example:</para>
|
||||
<programlisting language="xml"><console type='tcp'>
|
||||
<source mode='bind' host='127.0.0.1' service='10000'/>
|
||||
<protocol type='raw'/>
|
||||
<target type='serial' port='0'/>
|
||||
<alias name='serial0'/>
|
||||
</console></programlisting>
|
||||
<procedure>
|
||||
<title>Accessing the serial console on an instance</title>
|
||||
<step>
|
||||
<para>Use the <command>nova get-serial-proxy</command> command
|
||||
to retrieve the websocket URL for the serial console on the
|
||||
instance:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova get-serial-proxy <replaceable>INSTANCE_NAME</replaceable></userinput>
|
||||
<computeroutput>+--------+-----------------------------------------------------------------+
|
||||
| Type | Url |
|
||||
+--------+-----------------------------------------------------------------+
|
||||
| serial | ws://127.0.0.1:6083/?token=18510769-71ad-4e5a-8348-4218b5613b3d |
|
||||
+--------+-----------------------------------------------------------------+</computeroutput></screen>
|
||||
<para>Alternatively, use the API directly:</para>
|
||||
<screen><prompt>$</prompt> <userinput>curl -i 'http://<controller>:8774/v2/<tenant_uuid>/servers/<instance_uuid>/action' \
|
||||
-X POST \
|
||||
-H "Accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Auth-Project-Id: <project_id>" \
|
||||
-H "X-Auth-Token: <auth_token>" \
|
||||
-d '{"os-getSerialConsole": {"type": "serial"}}'</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Use Python websocket with the URL to generate
|
||||
<literal>.send</literal>, <literal>.recv</literal>, and
|
||||
<literal>.fileno</literal> methods for serial console access.
|
||||
For example:</para>
|
||||
<programlisting language="python">import websocket
|
||||
ws = websocket.create_connection(
|
||||
'ws://127.0.0.1:6083/?token=18510769-71ad-4e5a-8348-4218b5613b3d',
|
||||
subprotocols=['binary', 'base64'])</programlisting>
|
||||
<para>Alternatively, use a Python websocket client such as
|
||||
<link xlink:href="https://github.com/larsks/novaconsole/"/>.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<note>
|
||||
<para>When you enable the serial console, typical instance logging
|
||||
using the <command>nova console-log</command> command is disabled.
|
||||
Kernel output and other system messages will not be visible
|
||||
unless you are actively viewing the serial console.</para>
|
||||
</note>
|
||||
</simplesect>
|
||||
</section>
|
||||
|
||||
|
||||
<xi:include href="section_compute-rootwrap.xml"/>
|
||||
<xi:include href="section_compute-configure-migrations.xml"/>
|
||||
|
||||
<section xml:id="section_live-migration-usage">
|
||||
<title>Migrate instances</title>
|
||||
<para>This section discusses how to migrate running instances from one
|
||||
OpenStack Compute server to another OpenStack Compute server.</para>
|
||||
<para>Before starting a migration, review the
|
||||
<link linkend="section_configuring-compute-migrations">Configure
|
||||
migrations section</link>.</para>
|
||||
<note>
|
||||
<para>Although the <command>nova</command> command is called
|
||||
<command>live-migration</command>, under the default Compute
|
||||
configuration options, the instances are suspended before migration.
|
||||
For more information, see <link xlink:href="http://docs.openstack.org/kilo/config-reference/content/list-of-compute-config-options.html">
|
||||
Configure migrations</link> in the <citetitle>OpenStack
|
||||
Configuration Reference</citetitle>.</para>
|
||||
</note>
|
||||
<procedure>
|
||||
<title>Migrating instances</title>
|
||||
<step>
|
||||
<para>Check the ID of the instance to be migrated:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova list</userinput>
|
||||
<computeroutput><![CDATA[+--------------------------------------+------+--------+-----------------+
|
||||
| ID | Name | Status |Networks |
|
||||
+--------------------------------------+------+--------+-----------------+
|
||||
| d1df1b5a-70c4-4fed-98b7-423362f2c47c | vm1 | ACTIVE | private=a.b.c.d |
|
||||
| d693db9e-a7cf-45ef-a7c9-b3ecb5f22645 | vm2 | ACTIVE | private=e.f.g.h |
|
||||
+--------------------------------------+------+--------+-----------------+]]></computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Check the information associated with the instance. In this
|
||||
example, <literal>vm1</literal> is running on
|
||||
<literal>HostB</literal>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova show d1df1b5a-70c4-4fed-98b7-423362f2c47c</userinput>
|
||||
<computeroutput><![CDATA[+-------------------------------------+----------------------------------------------------------+
|
||||
| Property | Value |
|
||||
+-------------------------------------+----------------------------------------------------------+
|
||||
...
|
||||
| OS-EXT-SRV-ATTR:host | HostB |
|
||||
...
|
||||
| flavor | m1.tiny |
|
||||
| id | d1df1b5a-70c4-4fed-98b7-423362f2c47c |
|
||||
| name | vm1 |
|
||||
| private network | a.b.c.d |
|
||||
| status | ACTIVE |
|
||||
...
|
||||
+-------------------------------------+----------------------------------------------------------+]]></computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Select the compute node the instance will be migrated to. In
|
||||
this example, we will migrate the instance to
|
||||
<literal>HostC</literal>, because
|
||||
<systemitem class="service">nova-compute</systemitem> is running
|
||||
on it.:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova service-list</userinput>
|
||||
<computeroutput>+------------------+------------+----------+---------+-------+----------------------------+-----------------+
|
||||
| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
|
||||
+------------------+------------+----------+---------+-------+----------------------------+-----------------+
|
||||
| nova-consoleauth | HostA | internal | enabled | up | 2014-03-25T10:33:25.000000 | - |
|
||||
| nova-scheduler | HostA | internal | enabled | up | 2014-03-25T10:33:25.000000 | - |
|
||||
| nova-conductor | HostA | internal | enabled | up | 2014-03-25T10:33:27.000000 | - |
|
||||
| nova-compute | HostB | nova | enabled | up | 2014-03-25T10:33:31.000000 | - |
|
||||
| nova-compute | HostC | nova | enabled | up | 2014-03-25T10:33:31.000000 | - |
|
||||
| nova-cert | HostA | internal | enabled | up | 2014-03-25T10:33:31.000000 | - |
|
||||
+------------------+------------+----------+---------+-------+----------------------------+-----------------+</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Check that <literal>HostC</literal> has enough resources for
|
||||
migration:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova host-describe HostC</userinput>
|
||||
<computeroutput>+-----------+------------+-----+-----------+---------+
|
||||
| HOST | PROJECT | cpu | memory_mb | disk_gb |
|
||||
+-----------+------------+-----+-----------+---------+
|
||||
| HostC | (total) | 16 | 32232 | 878 |
|
||||
| HostC | (used_now) | 13 | 21284 | 442 |
|
||||
| HostC | (used_max) | 13 | 21284 | 442 |
|
||||
| HostC | p1 | 13 | 21284 | 442 |
|
||||
| HostC | p2 | 13 | 21284 | 442 |
|
||||
+-----------+------------+-----+-----------+---------+</computeroutput></screen>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>cpu</literal>: Number of CPUs</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>memory_mb</literal>: Total amount of memory,
|
||||
in MB</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>disk_gb</literal>: Total amount of space for
|
||||
NOVA-INST-DIR/instances, in GB</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>In this table, the first row shows the total amount of
|
||||
resources available on the physical server. The second line shows
|
||||
the currently used resources. The third line shows the maximum
|
||||
used resources. The fourth line and below shows the resources
|
||||
available for each project.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Migrate the instances using the
|
||||
<command>nova live-migration</command> command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova live-migration <replaceable>SERVER</replaceable> <replaceable>HOST_NAME</replaceable></userinput></screen>
|
||||
<para>In this example, <replaceable>SERVER</replaceable> can be the
|
||||
ID or name of the instance. Another example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova live-migration d1df1b5a-70c4-4fed-98b7-423362f2c47c HostC</userinput><computeroutput>
|
||||
<![CDATA[Migration of d1df1b5a-70c4-4fed-98b7-423362f2c47c initiated.]]></computeroutput></screen>
|
||||
<warning>
|
||||
<para>When using live migration to move workloads between
|
||||
Icehouse and Juno compute nodes, it may cause data loss
|
||||
because libvirt live migration with shared block storage
|
||||
was buggy (potential loss of data) before version 3.32.
|
||||
This issue can be solved when we upgrade to RPC API version 4.0.
|
||||
</para>
|
||||
</warning>
|
||||
</step>
|
||||
<step>
|
||||
<para>Check the instances have been migrated successfully, using
|
||||
<command>nova list</command>. If instances are still running on
|
||||
<literal>HostB</literal>, check the log files at src/dest for
|
||||
<systemitem class="service">nova-compute</systemitem> and
|
||||
<systemitem class="service">nova-scheduler</systemitem>) to
|
||||
determine why.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
|
||||
<xi:include href="../../common/section_compute-configure-console.xml"/>
|
||||
<xi:include href="section_compute-configure-service-groups.xml"/>
|
||||
<xi:include href="section_compute-security.xml"/>
|
||||
<xi:include href="section_compute-recover-nodes.xml"/>
|
||||
</section>
|
@ -1,38 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="default_ports">
|
||||
|
||||
<title>Compute service node firewall requirements</title>
|
||||
<para>Console connections for virtual machines, whether direct or through a
|
||||
proxy, are received on ports <literal>5900</literal> to
|
||||
<literal>5999</literal>. The firewall on each Compute service node must
|
||||
allow network traffic on these ports.</para>
|
||||
<para>This procedure modifies the <systemitem>iptables</systemitem> firewall
|
||||
to allow incoming connections to the Compute services.</para>
|
||||
<procedure>
|
||||
<title>Configuring the service-node firewall</title>
|
||||
<step>
|
||||
<para>Log in to the server that hosts the Compute service, as
|
||||
<systemitem>root</systemitem>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the <filename>/etc/sysconfig/iptables</filename> file, to add an
|
||||
INPUT rule that allows TCP traffic on ports from
|
||||
<literal>5900</literal> to <literal>5999</literal>. Make sure the new
|
||||
rule appears before any INPUT rules that REJECT traffic:</para>
|
||||
<programlisting language="ini">-A INPUT -p tcp -m multiport --dports 5900:5999 -j ACCEPT</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Save the changes to <filename>/etc/sysconfig/iptables</filename>,
|
||||
and restart the <systemitem>iptables</systemitem> service to pick up
|
||||
the changes:</para>
|
||||
<screen><prompt>$</prompt> <userinput>service iptables restart</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Repeat this process for each Compute service node.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
@ -1,159 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="trusted-compute-pools">
|
||||
|
||||
<title>Trusted compute pools</title>
|
||||
<para>Administrators can designate a group of compute hosts as trusted using
|
||||
trusted compute pools. The trusted hosts use hardware-based security
|
||||
features, such as the Intel Trusted Execution Technology (TXT), to provide
|
||||
an additional level of security. Combined with an external stand-alone,
|
||||
web-based remote attestation server, cloud providers can ensure that the
|
||||
compute node runs only software with verified measurements and can ensure
|
||||
a secure cloud stack.</para>
|
||||
<para>Trusted compute pools provide the ability for cloud subscribers to
|
||||
request services run only on verified compute nodes.</para>
|
||||
<para>The remote attestation server performs node verification like this:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>Compute nodes boot with Intel TXT technology enabled.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The compute node BIOS, hypervisor, and operating system are
|
||||
measured.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When the attestation server challenges the compute node, the
|
||||
measured data is sent to the attestation server.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The attestation server verifies the measurements against a known
|
||||
good database to determine node trustworthiness.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>A description of how to set up an attestation service is beyond the
|
||||
scope of this document. For an open source project that you can use to
|
||||
implement an attestation service, see the
|
||||
<link xlink:href="https://github.com/OpenAttestation/OpenAttestation">
|
||||
Open Attestation</link> project.</para>
|
||||
<mediaobject>
|
||||
<imageobject role="fo">
|
||||
<imagedata
|
||||
fileref="../../common/figures/OpenStackTrustedComputePool1.png"
|
||||
format="PNG" contentwidth="6in"/>
|
||||
</imageobject>
|
||||
<imageobject role="html">
|
||||
<imagedata
|
||||
fileref="../../common/figures/OpenStackTrustedComputePool1.png"
|
||||
format="PNG" contentwidth="6in"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
|
||||
<procedure>
|
||||
<title>Configuring Compute to use trusted compute pools</title>
|
||||
<step>
|
||||
<para>Enable scheduling support for trusted compute pools by adding
|
||||
these lines to the <literal>DEFAULT</literal> section of the
|
||||
<filename>/etc/nova/nova.conf</filename> file:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
|
||||
scheduler_available_filters=nova.scheduler.filters.all_filters
|
||||
scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter,TrustedFilter</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Specify the connection information for your attestation service by
|
||||
adding these lines to the <literal>trusted_computing</literal> section
|
||||
of the <filename>/etc/nova/nova.conf</filename> file:</para>
|
||||
<programlisting language="ini">[trusted_computing]
|
||||
attestation_server = 10.1.71.206
|
||||
attestation_port = 8443
|
||||
# If using OAT v2.0 after, use this port:
|
||||
# attestation_port = 8181
|
||||
attestation_server_ca_file = /etc/nova/ssl.10.1.71.206.crt
|
||||
# If using OAT v1.5, use this api_url:
|
||||
attestation_api_url = /AttestationService/resources
|
||||
# If using OAT pre-v1.5, use this api_url:
|
||||
# attestation_api_url = /OpenAttestationWebServices/V1.0
|
||||
attestation_auth_blob = i-am-openstack</programlisting>
|
||||
<para>In this example:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>server</term>
|
||||
<listitem>
|
||||
<para>Host name or IP address of the host that runs the attestation
|
||||
service</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>port</term>
|
||||
<listitem>
|
||||
<para>HTTPS port for the attestation service</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>server_ca_file</term>
|
||||
<listitem>
|
||||
<para>Certificate file used to verify the attestation server's
|
||||
identity</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>api_url</term>
|
||||
<listitem>
|
||||
<para>The attestation service's URL path</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>auth_blob</term>
|
||||
<listitem>
|
||||
<para>An authentication blob, required by the attestation service.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</step>
|
||||
<step>
|
||||
<para>Save the file, and restart the
|
||||
<systemitem class="service">nova-compute</systemitem> and
|
||||
<systemitem class="service">nova-scheduler</systemitem> services to
|
||||
pick up the changes.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
|
||||
<para>To customize the trusted compute pools, use these configuration option
|
||||
settings:</para>
|
||||
|
||||
<xi:include href="../../common/tables/nova-trustedcomputing.xml"/>
|
||||
|
||||
<procedure>
|
||||
<title>Specifying trusted flavors</title>
|
||||
<step>
|
||||
<para>Flavors can be designated as trusted using the
|
||||
<command>nova flavor-key set</command> command. In this example, the
|
||||
<literal>m1.tiny</literal> flavor is being set as trusted:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova flavor-key m1.tiny set trust:trusted_host=trusted</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>You can request that your instance is run on a trusted host by
|
||||
specifying a trusted flavor when booting the instance:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova boot --flavor m1.tiny --key_name myKeypairName --image myImageID newInstanceName</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
|
||||
<figure xml:id="concept_trusted_pool">
|
||||
<title>Trusted compute pool</title>
|
||||
<mediaobject>
|
||||
<imageobject role="fo">
|
||||
<imagedata
|
||||
fileref="../../common/figures/OpenStackTrustedComputePool2.png"
|
||||
format="PNG" contentwidth="6in"/>
|
||||
</imageobject>
|
||||
<imageobject role="html">
|
||||
<imagedata
|
||||
fileref="../../common/figures/OpenStackTrustedComputePool2.png"
|
||||
format="PNG" contentwidth="6in"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
</section>
|
@ -1,117 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_caching-layer">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Caching layer</title>
|
||||
<para>OpenStack Identity supports a caching layer that is above the
|
||||
configurable subsystems (for example, token, assignment). OpenStack Identity
|
||||
uses the <link xlink:href="http://dogpilecache.readthedocs.org/en/latest/">
|
||||
<literal>dogpile.cache</literal></link> library which allows flexible
|
||||
cache back ends. The majority of the caching configuration options are set in
|
||||
the <literal>[cache]</literal> section of the <filename>keystone.conf</filename>
|
||||
file. However, each section that has the capability to be cached usually
|
||||
has a caching boolean value that toggles caching.</para>
|
||||
<para>So to enable only the token back end caching, set the values as follows:</para>
|
||||
<programlisting language="ini">[cache]
|
||||
enabled=true
|
||||
|
||||
[assignment]
|
||||
caching=false
|
||||
|
||||
[token]
|
||||
caching=true</programlisting>
|
||||
<note><para>For the Juno release, the default setting is enabled for subsystem
|
||||
caching, but the global toggle is disabled. As a result, no caching in available
|
||||
unless the global toggle for <literal>[cache]</literal> is enabled by setting
|
||||
the value to <literal>true</literal>.</para></note>
|
||||
<section xml:id="caching_layer-token">
|
||||
<title>Caching for tokens and tokens validation</title>
|
||||
<para>
|
||||
The token system has a separate <literal>cache_time</literal> configuration
|
||||
option, that can be set to a value above or below the global
|
||||
<literal>expiration_time</literal> default, allowing for different caching
|
||||
behavior from the other systems in OpenStack Identity. This option
|
||||
is set in the <literal>[token]</literal> section of the configuration file.
|
||||
</para>
|
||||
<para>
|
||||
The token revocation list cache time is handled by the configuration
|
||||
option <literal>revocation_cache_time</literal> in the <literal>[token]</literal>
|
||||
section. The revocation list is refreshed whenever a token is revoked. It
|
||||
typically sees significantly more requests than specific token retrievals
|
||||
or token validation calls.
|
||||
</para>
|
||||
<para>Here is a list of actions that are affected by the cached time:
|
||||
getting a new token, revoking tokens, validating tokens, checking v2 tokens,
|
||||
and checking v3 tokens.</para>
|
||||
<para>The delete token API calls invalidate the cache for the tokens being
|
||||
acted upon, as well as invalidating the cache for the revoked token list
|
||||
and the validate/check token calls.</para>
|
||||
<para>Token caching is configurable independently of the <literal>revocation_list</literal>
|
||||
caching. Lifted expiration checks from the token drivers to the token manager.
|
||||
This ensures that cached tokens will still raise a <literal>TokenNotFound</literal>
|
||||
flag when expired.</para>
|
||||
<para>For cache consistency, all token IDs are transformed into the short
|
||||
token hash at the provider and token driver level. Some methods have access
|
||||
to the full ID (PKI Tokens), and some methods do not. Cache invalidation is
|
||||
inconsistent without token ID normalization.</para>
|
||||
</section>
|
||||
<section xml:id="caching_layer-assignment">
|
||||
<title>Caching around assignment CRUD</title>
|
||||
<para>The assignment system has a separate <literal>cache_time</literal>
|
||||
configuration option, that can be set to a value above or below the global
|
||||
<literal>expiration_time</literal> default, allowing for different caching
|
||||
behavior from the other systems in Identity service. This option
|
||||
is set in the <literal>[assignment]</literal> section of the configuration file.</para>
|
||||
<para>
|
||||
Currently <literal>assignment</literal> has caching for <literal>project</literal>,
|
||||
<literal>domain</literal>, and <literal>role</literal> specific requests
|
||||
(primarily around the CRUD actions). Caching is currently not implemented
|
||||
on grants. The <literal>list</literal> methods are not subject to caching.</para>
|
||||
<para>Here is a list of actions that are affected by the assignment:
|
||||
assign domain API, assign project API, and assign role API.</para>
|
||||
<para>The create, update, and delete actions for domains, projects and
|
||||
roles will perform proper invalidations of the cached methods listed
|
||||
above.</para>
|
||||
<note>
|
||||
<para>
|
||||
If a read-only <literal>assignment</literal> back end is in
|
||||
use, the cache will not immediately reflect changes on the back end. Any
|
||||
given change may take up to the <literal>cache_time</literal> (if set in
|
||||
the <literal>[assignment]</literal> section of the configuration file)
|
||||
or the global <literal>expiration_time</literal> (set in the <literal>[cache]</literal>
|
||||
section of the configuration file) before it is reflected. If this type
|
||||
of delay (when using a read-only <literal>assignment</literal> back end)
|
||||
is an issue, it is recommended that caching be disabled on
|
||||
<literal>assignment</literal>. To disable caching specifically on
|
||||
<literal>assignment</literal>, in the <literal>[assignment]</literal>
|
||||
section of the configuration set <literal>caching</literal> to
|
||||
<literal>False</literal>.</para>
|
||||
</note>
|
||||
<para>For more information about the different back ends (and configuration options), see:</para>
|
||||
<itemizedlist>
|
||||
<listitem><para><link xlink:href="http://dogpilecache.readthedocs.org/en/latest/api.html#memory-backend">
|
||||
<literal>dogpile.cache.backends.memory</literal></link></para></listitem>
|
||||
<listitem><para><link xlink:href="http://dogpilecache.readthedocs.org/en/latest/api.html#memcached-backends">
|
||||
<literal>dogpile.cache.backends.memcached</literal></link></para>
|
||||
<note><para>The memory back end is not suitable for use in a production environment.</para></note></listitem>
|
||||
<listitem><para><link xlink:href="http://dogpilecache.readthedocs.org/en/latest/api.html#redis-backends">
|
||||
<literal>dogpile.cache.backends.redis</literal></link></para></listitem>
|
||||
<listitem><para><link xlink:href="http://dogpilecache.readthedocs.org/en/latest/api.html#file-backends">
|
||||
<literal>dogpile.cache.backends.file</literal></link></para></listitem>
|
||||
<listitem><para><literal>keystone.common.cache.backends.mongo</literal></para></listitem>
|
||||
</itemizedlist>
|
||||
<example>
|
||||
<title>Configure the Memcached back end</title>
|
||||
<para>The following example shows how to configure the memcached back end:</para>
|
||||
<programlisting language="ini">[cache]
|
||||
|
||||
enabled = true
|
||||
backend = dogpile.cache.memcached
|
||||
backend_argument = url:<replaceable>127.0.0.1</replaceable>:11211</programlisting>
|
||||
<para>You need to specify the URL to reach the <literal>memcached</literal> instance with the <literal>backend_argument</literal> parameter.</para>
|
||||
</example>
|
||||
</section>
|
||||
</section>
|
@ -1,63 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="keystone-token-binding">
|
||||
<title>Configure Identity service for token binding</title>
|
||||
<para>Token binding embeds information from an external
|
||||
authentication mechanism, such as a Kerberos server or X.509 certificate,
|
||||
inside a token. By using token binding, a client can enforce the use of a
|
||||
specified external authentication mechanism with the token. This
|
||||
additional security mechanism ensures that if a token is stolen,
|
||||
for example, it is not usable without external
|
||||
authentication.</para>
|
||||
<para>You configure the authentication types for a token binding in
|
||||
the <filename>keystone.conf</filename> file:</para>
|
||||
<programlisting language="ini">[token]
|
||||
bind = kerberos</programlisting>
|
||||
<para>or</para>
|
||||
<programlisting language="ini">[token]
|
||||
bind = x509</programlisting>
|
||||
<para>Currently <literal>kerberos</literal> and <literal>x509</literal>
|
||||
are supported.</para>
|
||||
<para>To enforce checking of token binding, set the
|
||||
<option>enforce_token_bind</option> option to one of these
|
||||
modes:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>disabled</literal></para>
|
||||
<para>Disables token bind checking.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>permissive</literal></para>
|
||||
<para>Enables bind checking. If a token is bound to an unknown
|
||||
authentication mechanism, the server ignores it. The default
|
||||
is this mode.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>strict</literal></para>
|
||||
<para>Enables bind checking. If a token is bound to an unknown
|
||||
authentication mechanism, the server rejects it.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>required</literal></para>
|
||||
<para>Enables bind checking. Requires use of at least
|
||||
authentication mechanism for tokens.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>kerberos</literal></para>
|
||||
<para>Enables bind checking. Requires use of kerberos as the
|
||||
authentication mechanism for tokens:</para>
|
||||
<programlisting language="ini">[token]
|
||||
enforce_token_bind = kerberos</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>x509</literal></para>
|
||||
<para>Enables bind checking. Requires use of X.509 as the
|
||||
authentication mechanism for tokens:</para>
|
||||
<programlisting language="ini">[token]
|
||||
enforce_token_bind = x509</programlisting>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
@ -1,72 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_keystone-trusts">
|
||||
<?dbhtml stop-chunking?>
|
||||
<title>Use trusts</title>
|
||||
<para>OpenStack Identity manages authentication and authorization. A trust is
|
||||
an OpenStack Identity extension that enables delegation and, optionally,
|
||||
impersonation through <literal>keystone</literal>. A trust extension defines
|
||||
a relationship between:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>Trustor</term>
|
||||
<listitem><para>The user delegating a limited set of their own rights
|
||||
to another user.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Trustee</term>
|
||||
<listitem><para>The user trust is being delegated to, for a limited
|
||||
time.</para></listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
<para>The trust can eventually allow the trustee to impersonate the trustor.
|
||||
For security reasons, some safeties are added. For example, if a trustor
|
||||
loses a given role, any trusts the user issued with that role, and the
|
||||
related tokens, are automatically revoked.</para>
|
||||
<para>The delegation parameters are:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>User ID</term>
|
||||
<listitem><para>The user IDs for the trustor and trustee.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Privileges</term>
|
||||
<listitem><para>The delegated privileges are a combination of a tenant
|
||||
ID and a number of roles that must be a subset of the roles assigned to
|
||||
the trustor.</para>
|
||||
<para>If you omit all privileges, nothing is delegated. You cannot
|
||||
delegate everything.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Delegation depth</term>
|
||||
<listitem><para>Defines whether or not the delegation is recursive. If
|
||||
it is recursive, defines the delegation chain length.</para>
|
||||
<para>Specify one of the following values:</para>
|
||||
<itemizedlist>
|
||||
<listitem><para><literal>0</literal>. The delegate cannot delegate
|
||||
these permissions further.</para></listitem>
|
||||
<listitem><para><literal>1</literal>. The delegate can delegate the
|
||||
permissions to any set of delegates but the latter cannot delegate
|
||||
further.</para></listitem>
|
||||
<listitem><para><literal>inf</literal>. The delegation is infinitely
|
||||
recursive.</para></listitem>
|
||||
</itemizedlist></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Endpoints</term>
|
||||
<listitem><para>A list of endpoints associated with the delegation.</para>
|
||||
<para>This parameter further restricts the delegation to the specified
|
||||
endpoints only. If you omit the endpoints, the delegation is useless.
|
||||
A special value of <literal>all_endpoints</literal> allows the trust
|
||||
to be used by all endpoints associated with the delegated tenant.</para></listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Duration</term>
|
||||
<listitem><para>(Optional) Comprised of the start time and end time for
|
||||
the trust.</para></listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</section>
|
@ -1,53 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE section [
|
||||
<!ENTITY % openstack SYSTEM "../../common/entities/openstack.ent">
|
||||
%openstack;
|
||||
]>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="glance-nova-image-download">
|
||||
<title>Image download: how it works</title>
|
||||
<para>
|
||||
Prior to starting a virtual machine, the virtual machine image
|
||||
used must be transferred to the compute node from the Image
|
||||
Service. How this works can change depending on the settings
|
||||
chosen for the compute node and the Image service.
|
||||
</para>
|
||||
<para>
|
||||
Typically, the Compute service will use the image identifier
|
||||
passed to it by the scheduler service and request the image from the
|
||||
Image API. Though images are not stored in glance—rather in a
|
||||
back end, which could be Object Storage, a filesystem or any other
|
||||
supported method—the connection is made from the compute node
|
||||
to the Image service and the image is transferred over this
|
||||
connection. The Image service streams the image from the back end to the
|
||||
compute node.
|
||||
</para>
|
||||
<para>
|
||||
It is possible to set up the Object Storage node on a separate network,
|
||||
and still allow image traffic to flow between the Compute and Object
|
||||
Storage nodes. Configure the <literal>my_block_storage_ip</literal>
|
||||
option in the storage node configuration to allow block storage traffic
|
||||
to reach the Compute node.
|
||||
</para>
|
||||
<para>
|
||||
Certain back ends support a more direct method, where on request
|
||||
the Image service will return a URL that can be used to
|
||||
download the image directly from the back-end store. Currently the
|
||||
only store to support the direct download approach is the
|
||||
filesystem store. It can be configured using the
|
||||
<option>filesystems</option> option in the
|
||||
<literal>image_file_url</literal> section of the
|
||||
<filename>nova.conf</filename> file on compute nodes.
|
||||
</para>
|
||||
<para>
|
||||
Compute nodes also implement caching of images, meaning that if an
|
||||
image has been used before it won't necessarily be downloaded
|
||||
every time. Information on the configuration options for caching
|
||||
on compute nodes can be found in the <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/"><citetitle>Configuration
|
||||
Reference</citetitle></link>.
|
||||
</para>
|
||||
</section>
|
@ -1,106 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="glance-property-protection">
|
||||
<title>Image properties and property protection</title>
|
||||
<para>An image property is a key and value pair that the cloud
|
||||
administrator or the image owner attaches to an OpenStack Image
|
||||
Service image, as follows:</para>
|
||||
<para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>The cloud administrator defines <emphasis role="italic"
|
||||
>core</emphasis> properties, such as the image
|
||||
name.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The cloud administrator and the image owner can define
|
||||
<emphasis role="italic">additional</emphasis> properties,
|
||||
such as licensing and billing information.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>The cloud administrator can configure any property as
|
||||
<firstterm>protected</firstterm>, which limits which policies or
|
||||
user roles can perform CRUD operations on that property. Protected
|
||||
properties are generally additional properties to which only cloud
|
||||
administrators have access.</para>
|
||||
<para>For unprotected image properties, the cloud administrator can
|
||||
manage core properties and the image owner can manage additional
|
||||
properties.</para>
|
||||
<procedure>
|
||||
<title>To configure property protection</title>
|
||||
<para>To configure property protection, the cloud administrator
|
||||
completes these steps:</para>
|
||||
<step>
|
||||
<para>Define roles or policies in the
|
||||
<filename>policy.json</filename> file:</para>
|
||||
<programlisting language="json"><xi:include parse="text"
|
||||
href="https://git.openstack.org/cgit/openstack/glance/plain/etc/policy.json?h=stable/kilo"/></programlisting>
|
||||
<para>For each parameter, use <literal>"rule:restricted"</literal> to
|
||||
restrict access to all users or <literal>"role:admin"</literal>
|
||||
to limit access to administrator roles. For example:</para>
|
||||
<programlisting>"download_image": <replaceable>"rule:restricted"</replaceable>
|
||||
"upload_image": <replaceable>"role:admin"</replaceable></programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Define which roles or policies can manage which properties
|
||||
in a property protections configuration file. For
|
||||
example:</para>
|
||||
<programlisting language="ini">[x_none_read]
|
||||
create = context_is_admin
|
||||
read = !
|
||||
update = !
|
||||
delete = !
|
||||
|
||||
[x_none_update]
|
||||
create = context_is_admin
|
||||
read = context_is_admin
|
||||
update = !
|
||||
delete = context_is_admin
|
||||
|
||||
[x_none_delete]
|
||||
create = context_is_admin
|
||||
read = context_is_admin
|
||||
update = context_is_admin
|
||||
delete = !</programlisting>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>A value of <literal>@</literal> allows the
|
||||
corresponding operation for a property.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A value of <literal>!</literal> disallows the
|
||||
corresponding operation for a property.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>In the <filename>glance-api.conf</filename> file, define
|
||||
the location of a property protections configuration
|
||||
file:</para>
|
||||
<programlisting language="ini">property_protection_file = {file_name}</programlisting>
|
||||
<para>This file contains the rules for property protections and
|
||||
the roles and policies associated with it.</para>
|
||||
<para>By default, property protections are not enforced.</para>
|
||||
<para>If you specify a file name value and the file is not
|
||||
found, the <systemitem role="service">glance-api</systemitem>
|
||||
service does not start.</para>
|
||||
<para>To view a sample configuration file, see <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/section_glance-api.conf.html"
|
||||
>glance-api.conf</link>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Optionally, in the <filename>glance-api.conf</filename>
|
||||
file, specify whether roles or policies are used in the
|
||||
property protections configuration file:</para>
|
||||
<programlisting language="ini">property_protection_rule_format = roles</programlisting>
|
||||
<para>The default is <literal>roles</literal>.</para>
|
||||
<para>To view a sample configuration file, see <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/section_glance-api.conf.html"
|
||||
>glance-api.conf</link>.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
File diff suppressed because one or more lines are too long
@ -1,69 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
|
||||
xml:id="section_networking-advanced-config">
|
||||
<title>Advanced configuration options</title>
|
||||
<para>This section describes advanced configuration options for various system components. For
|
||||
example, configuration options where the default works but that the user wants to customize
|
||||
options. After installing from packages, <literal>$NEUTRON_CONF_DIR</literal> is
|
||||
<filename>/etc/neutron</filename>.</para>
|
||||
<section xml:id="section_adv_cfg_l3_metering_agent">
|
||||
<title>L3 metering agent</title>
|
||||
<para>You can run an L3 metering agent that enables layer-3 traffic metering. In general,
|
||||
you should launch the metering agent on all nodes that run the L3 agent:</para>
|
||||
<screen><userinput>neutron-metering-agent --config-file <replaceable>NEUTRON_CONFIG_FILE</replaceable> --config-file <replaceable>L3_METERING_CONFIG_FILE</replaceable></userinput></screen>
|
||||
<para>You must configure a driver that matches the plug-in that runs on the service. The
|
||||
driver adds metering to the routing interface.</para>
|
||||
<table rules="all">
|
||||
<caption>Settings</caption>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Option</th>
|
||||
<th>Value</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><emphasis role="bold">Open vSwitch</emphasis></td>
|
||||
<td/>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>interface_driver ($NEUTRON_CONF_DIR/metering_agent.ini)</td>
|
||||
<td>neutron.agent.linux.interface.OVSInterfaceDriver</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold">Linux Bridge</emphasis></td>
|
||||
<td/>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>interface_driver ($NEUTRON_CONF_DIR/metering_agent.ini)</td>
|
||||
<td>neutron.agent.linux.interface.BridgeInterfaceDriver</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<section xml:id="adv_cfg_l3_metering_agent_namespace">
|
||||
<title>Namespace</title>
|
||||
<para>The metering agent and the L3 agent must have the same network namespaces
|
||||
configuration.</para>
|
||||
<note>
|
||||
<para>If the Linux installation does not support network namespaces, you must
|
||||
disable network namespaces in the L3 metering configuration file. The default
|
||||
value of the <option>use_namespaces</option> option is <code>True</code>.</para>
|
||||
</note>
|
||||
<para><programlisting language="ini">use_namespaces = False</programlisting></para>
|
||||
</section>
|
||||
<section xml:id="adv_cfg_l3_metering_agent_driver">
|
||||
<title>L3 metering driver</title>
|
||||
<para>You must configure any driver that implements the metering abstraction. Currently
|
||||
the only available implementation uses iptables for metering.</para>
|
||||
<para><programlisting language="ini">driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver</programlisting></para>
|
||||
</section>
|
||||
<section xml:id="adv_cfg_l3_metering_service_driver">
|
||||
<title>L3 metering service driver</title>
|
||||
<para>To enable L3 metering, you must set the following option in the
|
||||
<filename>neutron.conf</filename> file on the host that runs <systemitem
|
||||
class="service">neutron-server</systemitem>:</para>
|
||||
<programlisting language="ini">service_plugins = metering</programlisting>
|
||||
</section>
|
||||
</section>
|
||||
</section>
|
@ -1,323 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_networking-config-identity">
|
||||
<title>Configure Identity Service for Networking</title>
|
||||
<procedure>
|
||||
<title>To configure the Identity Service for use with
|
||||
Networking</title>
|
||||
<step>
|
||||
<title>Create the <function>get_id()</function> function</title>
|
||||
<para>The <function>get_id()</function> function stores the ID of created objects, and removes
|
||||
the need to copy and paste object IDs in later steps:</para>
|
||||
<substeps>
|
||||
<step>
|
||||
<para>Add the following function to your
|
||||
<filename>.bashrc</filename> file:</para>
|
||||
<programlisting>function get_id () {
|
||||
echo `"$@" | awk '/ id / { print $4 }'`
|
||||
}</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Source the <filename>.bashrc</filename> file:</para>
|
||||
<screen><prompt>$</prompt> <userinput>source .bashrc</userinput></screen>
|
||||
</step>
|
||||
</substeps>
|
||||
</step>
|
||||
<step>
|
||||
<title>Create the Networking service entry</title>
|
||||
<para>Networking must be available in the Compute service catalog. Create the service:</para>
|
||||
<screen><prompt>$</prompt> <userinput>NEUTRON_SERVICE_ID=$(get_id keystone service-create --name neutron --type network --description 'OpenStack Networking Service')</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<title>Create the Networking service endpoint
|
||||
entry</title>
|
||||
<para>The way that you create a Networking endpoint entry depends on whether you are using the
|
||||
SQL or the template catalog driver:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>If you use the <emphasis>SQL driver</emphasis>, run the following command with the
|
||||
specified region (<literal>$REGION</literal>), IP address of the Networking server
|
||||
(<literal>$IP</literal>), and service ID (<literal>$NEUTRON_SERVICE_ID</literal>,
|
||||
obtained in the previous step).</para>
|
||||
|
||||
<screen><prompt>$</prompt> <userinput>keystone endpoint-create --region $REGION --service-id $NEUTRON_SERVICE_ID \
|
||||
--publicurl 'http://$IP:9696/' --adminurl 'http://$IP:9696/' --internalurl 'http://$IP:9696/'</userinput></screen>
|
||||
<para>For example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>keystone endpoint-create --region myregion --service-id $NEUTRON_SERVICE_ID \
|
||||
--publicurl "http://10.211.55.17:9696/" --adminurl "http://10.211.55.17:9696/" --internalurl "http://10.211.55.17:9696/" </userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>If you are using the <emphasis>template driver</emphasis>, specify the following
|
||||
parameters in your Compute catalog template file
|
||||
(<filename>default_catalog.templates</filename>), along with the region
|
||||
(<literal>$REGION</literal>) and IP address of the Networking server
|
||||
(<literal>$IP</literal>).</para>
|
||||
<programlisting language="bash">catalog.$REGION.network.publicURL = http://$IP:9696
|
||||
catalog.$REGION.network.adminURL = http://$IP:9696
|
||||
catalog.$REGION.network.internalURL = http://$IP:9696
|
||||
catalog.$REGION.network.name = Network Service</programlisting>
|
||||
<para>For example:</para>
|
||||
<programlisting language="bash">catalog.$Region.network.publicURL = http://10.211.55.17:9696
|
||||
catalog.$Region.network.adminURL = http://10.211.55.17:9696
|
||||
catalog.$Region.network.internalURL = http://10.211.55.17:9696
|
||||
catalog.$Region.network.name = Network Service</programlisting>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<title>Create the Networking service user</title>
|
||||
<para>You must provide admin user credentials that Compute and some internal Networking
|
||||
components can use to access the Networking API. Create a special <literal>service</literal>
|
||||
tenant and a <literal>neutron</literal> user within this tenant, and assign an
|
||||
<literal>admin</literal> role to this role.</para>
|
||||
<substeps>
|
||||
<step>
|
||||
<para>Create the <literal>admin</literal> role:</para>
|
||||
<screen><prompt>$</prompt> <userinput>ADMIN_ROLE=$(get_id keystone role-create --name admin)
|
||||
</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create the <literal>neutron</literal> user:</para>
|
||||
<screen><prompt>$</prompt> <userinput>NEUTRON_USER=$(get_id keystone user-create --name neutron --pass "$NEUTRON_PASSWORD" --email demo@example.com --tenant-id service)
|
||||
</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create the <literal>service</literal> tenant:</para>
|
||||
<screen><prompt>$</prompt> <userinput>SERVICE_TENANT=$(get_id keystone tenant-create --name service --description "Services Tenant")</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Establish the relationship among the tenant, user, and
|
||||
role:</para>
|
||||
<screen><prompt>$</prompt> <userinput>keystone user-role-add --user_id $NEUTRON_USER --role_id $ADMIN_ROLE --tenant_id $SERVICE_TENANT</userinput></screen>
|
||||
</step>
|
||||
</substeps>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>For information about how to create service entries and users, see the <citetitle>OpenStack
|
||||
Installation Guide</citetitle> for your distribution (<link xlink:href="http://docs.openstack.org"
|
||||
>docs.openstack.org</link>).</para>
|
||||
<section xml:id="nova_with_neutron">
|
||||
<title>Compute</title>
|
||||
<para>If you use Networking, do not run the Compute <systemitem class="service"
|
||||
>nova-network</systemitem> service (like you do in traditional Compute deployments).
|
||||
Instead, Compute delegates most network-related decisions to Networking. Compute proxies
|
||||
tenant-facing API calls to manage security groups and floating IPs to Networking APIs.
|
||||
However, operator-facing tools such as <systemitem class="service">nova-manage</systemitem>,
|
||||
are not proxied and should not be used.</para>
|
||||
<warning>
|
||||
<para>When you configure networking, you must use this guide. Do not rely on Compute
|
||||
networking documentation or past experience with Compute. If a <command>nova</command>
|
||||
command or configuration option related to networking is not mentioned in this guide, the
|
||||
command is probably not supported for use with Networking. In particular, you cannot use CLI
|
||||
tools like <command>nova-manage</command> and <command>nova</command> to manage networks or
|
||||
IP addressing, including both fixed and floating IPs, with Networking.</para>
|
||||
</warning>
|
||||
<note>
|
||||
<para>Uninstall <systemitem class="service">nova-network</systemitem> and reboot any physical
|
||||
nodes that have been running <systemitem class="service">nova-network</systemitem> before
|
||||
using them to run Networking. Inadvertently running the <systemitem class="service"
|
||||
>nova-network</systemitem> process while using Networking can cause problems, as can stale
|
||||
iptables rules pushed down by previously running <systemitem class="service"
|
||||
>nova-network</systemitem>.</para>
|
||||
|
||||
</note>
|
||||
<para>To ensure that Compute works properly with Networking
|
||||
(rather than the legacy <systemitem
|
||||
class="service">nova-network</systemitem> mechanism), you must
|
||||
adjust settings in the <filename>nova.conf</filename>
|
||||
configuration file.</para>
|
||||
</section>
|
||||
<section xml:id="nova_with_neutron_api">
|
||||
<title>Networking API and credential configuration</title>
|
||||
<para>Each time you provision or de-provision a VM in Compute, <systemitem class="service"
|
||||
>nova-*</systemitem> services communicate with Networking using the standard API. For this
|
||||
to happen, you must configure the following items in the <filename>nova.conf</filename> file
|
||||
(used by each <systemitem class="service">nova-compute</systemitem> and <systemitem
|
||||
class="service">nova-api</systemitem> instance).</para>
|
||||
<table rules="all">
|
||||
<caption>nova.conf API and credential settings</caption>
|
||||
<col width="30%"/>
|
||||
<col width="70%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Item</th>
|
||||
<th>Configuration</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><para><literal>[DEFAULT] network_api_class</literal></para></td>
|
||||
<td>
|
||||
<para>Modify from the default to
|
||||
<literal>nova.network.neutronv2.api.API</literal>, to
|
||||
indicate that Networking should be used rather than the
|
||||
traditional <systemitem class="service" >nova-network
|
||||
</systemitem> networking model.</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para><literal>[neutron] url</literal></para></td>
|
||||
<td><para>Update to the hostname/IP and port of the
|
||||
<systemitem class="service"
|
||||
>neutron-server</systemitem> instance for this
|
||||
deployment.</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para><literal>[neutron] auth_strategy</literal></para></td>
|
||||
<td><para>Keep the default <literal>keystone</literal> value
|
||||
for all production deployments.</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para><literal>[neutron] admin_tenant_name</literal></para></td>
|
||||
<td>
|
||||
<para>Update to the name of the service tenant created in
|
||||
the above section on Identity configuration.</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para><literal>[neutron] admin_username</literal></para></td>
|
||||
<td>
|
||||
<para>Update to the name of the user created in the above
|
||||
section on Identity configuration.</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para><literal>[neutron] admin_password</literal></para></td>
|
||||
<td>
|
||||
<para>Update to the password of the user created in the
|
||||
above section on Identity configuration.</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para><literal>[neutron] admin_auth_url</literal></para></td>
|
||||
<td>
|
||||
<para>Update to the Identity server IP and port. This is
|
||||
the Identity (keystone) admin API server IP and port
|
||||
value, and not the Identity service API IP and
|
||||
port.</para>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
<section xml:id="nova_config_security_groups">
|
||||
<title>Configure security groups</title>
|
||||
<para>The Networking Service provides security group functionality using a mechanism that is
|
||||
more flexible and powerful than the security group capabilities built into Compute. Therefore,
|
||||
if you use Networking, you should always disable built-in security groups and proxy all
|
||||
security group calls to the Networking API . If you do not, security policies will conflict by
|
||||
being simultaneously applied by both services.</para>
|
||||
<para>To proxy security groups to Networking, use the following configuration values in
|
||||
<filename>nova.conf</filename>:</para>
|
||||
<table rules="all">
|
||||
<caption>nova.conf security group settings</caption>
|
||||
<col width="25%"/>
|
||||
<col width="75%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<td>Item</td>
|
||||
<td>Configuration</td>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><para><literal>firewall_driver</literal></para></td>
|
||||
<td><para>Update to
|
||||
<literal>nova.virt.firewall.NoopFirewallDriver</literal>,
|
||||
so that <systemitem class="service"
|
||||
>nova-compute</systemitem> does not perform
|
||||
iptables-based filtering itself.</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para><literal>security_group_api</literal></para></td>
|
||||
<td><para>Update to <literal>neutron</literal>, so that all security group requests are proxied to the
|
||||
Network Service.</para></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
<section xml:id="nova_config_metadata">
|
||||
<title>Configure metadata</title>
|
||||
<para>The Compute service allows VMs to query metadata associated with a VM by making a web
|
||||
request to a special 169.254.169.254 address. Networking supports proxying those requests to
|
||||
<systemitem class="service">nova-api</systemitem>, even when the requests are made from
|
||||
isolated networks, or from multiple networks that use overlapping IP addresses.</para>
|
||||
<para>To enable proxying the requests, you must update the
|
||||
following fields in <literal>[neutron]</literal> section in
|
||||
<filename>nova.conf</filename>.</para>
|
||||
<table rules="all">
|
||||
<caption>nova.conf metadata settings</caption>
|
||||
<col width="45%"/>
|
||||
<col width="55%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<td>Item</td>
|
||||
<td>Configuration</td>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><para><literal>service_metadata_proxy</literal>
|
||||
</para></td>
|
||||
<td><para>Update to <literal>true</literal>, otherwise
|
||||
<systemitem class="service">nova-api</systemitem> will
|
||||
not properly respond to requests from the <systemitem
|
||||
class="service">neutron-metadata-agent</systemitem>.
|
||||
</para></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para><literal>metadata_proxy_shared_secret</literal>
|
||||
</para></td>
|
||||
<td><para>Update to a string "password" value. You must also
|
||||
configure the same value in the
|
||||
<filename>metadata_agent.ini</filename> file, to
|
||||
authenticate requests made for metadata.</para>
|
||||
<para>The default value of an empty string in both files
|
||||
will allow metadata to function, but will not be secure
|
||||
if any non-trusted entities have access to the metadata
|
||||
APIs exposed by <systemitem class="service"
|
||||
>nova-api</systemitem>.</para></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<note>
|
||||
<para>As a precaution, even when using
|
||||
<literal>metadata_proxy_shared_secret</literal>, it
|
||||
is recommended that you do not expose metadata using the same
|
||||
<systemitem class="service">nova-api</systemitem> instances
|
||||
that are used for tenants. Instead, you should run a dedicated
|
||||
set of <systemitem class="service">nova-api</systemitem>
|
||||
instances for metadata that are available only on your
|
||||
management network. Whether a given <systemitem
|
||||
class="service">nova-api</systemitem> instance exposes
|
||||
metadata APIs is determined by the value of
|
||||
<literal>enabled_apis</literal> in its
|
||||
<filename>nova.conf</filename>.</para>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="nova_with_neutron_example">
|
||||
<title>Example nova.conf (for <systemitem class="service"
|
||||
>nova-compute</systemitem> and <systemitem class="service"
|
||||
>nova-api</systemitem>)</title>
|
||||
<para>Example values for the above settings, assuming a cloud controller node running Compute
|
||||
and Networking with an IP address of 192.168.1.2:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
security_group_api=neutron
|
||||
network_api_class=nova.network.neutronv2.api.API
|
||||
firewall_driver=nova.virt.firewall.NoopFirewallDriver
|
||||
|
||||
[neutron]
|
||||
url=http://192.168.1.2:9696
|
||||
auth_strategy=keystone
|
||||
admin_tenant_name=service
|
||||
admin_username=neutron
|
||||
admin_password=password
|
||||
admin_auth_url=http://192.168.1.2:35357/v2.0
|
||||
service_metadata_proxy=true
|
||||
metadata_proxy_shared_secret=foo
|
||||
</programlisting>
|
||||
</section>
|
||||
</section>
|
@ -1,501 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="app_demo_multi_dhcp_agents">
|
||||
<title>Scalable and highly available DHCP agents</title>
|
||||
<para>This section describes how to use the agent management
|
||||
(alias agent) and scheduler (alias agent_scheduler) extensions
|
||||
for DHCP agents scalability and HA.</para>
|
||||
<note>
|
||||
<para>Use the <command>neutron ext-list</command> client
|
||||
command to check if these extensions are enabled:
|
||||
<screen><prompt>$</prompt> <userinput>neutron ext-list -c name -c alias</userinput>
|
||||
<computeroutput>+-----------------+--------------------------+
|
||||
| alias | name |
|
||||
+-----------------+--------------------------+
|
||||
| agent_scheduler | Agent Schedulers |
|
||||
| binding | Port Binding |
|
||||
| quotas | Quota management support |
|
||||
| agent | agent |
|
||||
| provider | Provider Network |
|
||||
| router | Neutron L3 Router |
|
||||
| lbaas | Load Balancing service |
|
||||
| extraroute | Neutron Extra Route |
|
||||
+-----------------+--------------------------+</computeroutput></screen></para>
|
||||
</note>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata
|
||||
fileref="../../common/figures/demo_multiple_dhcp_agents.png"
|
||||
contentwidth="6in"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
<para>There will be three hosts in the setup.
|
||||
<table rules="all">
|
||||
<caption>Hosts for demo</caption>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Host</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>OpenStack controller host - controlnode</td>
|
||||
<td><para>Runs the Networking, Identity, and Compute
|
||||
services that are required to deploy VMs.
|
||||
The node must have at least one network
|
||||
interface that is connected to the
|
||||
Management Network.</para>
|
||||
<para>Note that <systemitem class="service"
|
||||
>nova-network</systemitem> should
|
||||
not be running because it is replaced
|
||||
by Neutron.</para>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>HostA</td>
|
||||
<td>Runs <systemitem
|
||||
class="service">nova-compute</systemitem>, the
|
||||
Neutron L2 agent and DHCP agent</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>HostB</td>
|
||||
<td>Same as HostA</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</para>
|
||||
<section xml:id="multi_agent_demo_configuration">
|
||||
<title>Configuration</title>
|
||||
<procedure>
|
||||
<title>controlnode: neutron server</title>
|
||||
<step>
|
||||
<para>Neutron configuration file
|
||||
<filename>/etc/neutron/neutron.conf</filename>:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
core_plugin = linuxbridge
|
||||
rabbit_host = controlnode
|
||||
allow_overlapping_ips = True
|
||||
host = controlnode
|
||||
agent_down_time = 5</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Update the plug-in configuration file
|
||||
<filename>/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini</filename>:</para>
|
||||
<programlisting language="ini">[vlans]
|
||||
tenant_network_type = vlan
|
||||
network_vlan_ranges = physnet1:1000:2999
|
||||
[database]
|
||||
connection = mysql://root:root@127.0.0.1:3306/neutron_linux_bridge
|
||||
retry_interval = 2
|
||||
[linux_bridge]
|
||||
physical_interface_mappings = physnet1:eth0</programlisting>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>HostA and HostB: L2 agent</title>
|
||||
<step>
|
||||
<para>Neutron configuration file
|
||||
<filename>/etc/neutron/neutron.conf</filename>:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
rabbit_host = controlnode
|
||||
rabbit_password = openstack
|
||||
# host = HostB on hostb
|
||||
host = HostA</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Update the plug-in configuration file
|
||||
<filename>/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini</filename>:</para>
|
||||
<programlisting language="ini">[vlans]
|
||||
tenant_network_type = vlan
|
||||
network_vlan_ranges = physnet1:1000:2999
|
||||
[database]
|
||||
connection = mysql://root:root@127.0.0.1:3306/neutron_linux_bridge
|
||||
retry_interval = 2
|
||||
[linux_bridge]
|
||||
physical_interface_mappings = physnet1:eth0</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Update the nova configuration file
|
||||
<filename>/etc/nova/nova.conf</filename>:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
network_api_class=nova.network.neutronv2.api.API
|
||||
firewall_driver=nova.virt.firewall.NoopFirewallDriver
|
||||
|
||||
[neutron]
|
||||
admin_username=neutron
|
||||
admin_password=servicepassword
|
||||
admin_auth_url=http://controlnode:35357/v2.0/
|
||||
auth_strategy=keystone
|
||||
admin_tenant_name=servicetenant
|
||||
url=http://100.1.1.10:9696/</programlisting>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>HostA and HostB: DHCP agent</title>
|
||||
<step>
|
||||
<para>Update the DHCP configuration file
|
||||
<filename>/etc/neutron/dhcp_agent.ini</filename>:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver</programlisting>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="demo_multiple_operation">
|
||||
<title>Commands in agent management and scheduler
|
||||
extensions</title>
|
||||
<para>The following commands require the tenant running the
|
||||
command to have an admin role.</para>
|
||||
<note>
|
||||
<para>Ensure that the following environment variables are
|
||||
set. These are used by the various clients to access
|
||||
the Identity Service.</para>
|
||||
<programlisting language="bash">export OS_USERNAME=admin
|
||||
export OS_PASSWORD=adminpassword
|
||||
export OS_TENANT_NAME=admin
|
||||
export OS_AUTH_URL=http://controlnode:5000/v2.0/</programlisting>
|
||||
</note>
|
||||
<procedure>
|
||||
<title>Settings</title>
|
||||
<step><para>To experiment, you need VMs and a neutron
|
||||
network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova list</userinput>
|
||||
<computeroutput>+--------------------------------------+-----------+--------+---------------+
|
||||
| ID | Name | Status | Networks |
|
||||
+--------------------------------------+-----------+--------+---------------+
|
||||
| c394fcd0-0baa-43ae-a793-201815c3e8ce | myserver1 | ACTIVE | net1=10.0.1.3 |
|
||||
| 2d604e05-9a6c-4ddb-9082-8a1fbdcc797d | myserver2 | ACTIVE | net1=10.0.1.4 |
|
||||
| c7c0481c-3db8-4d7a-a948-60ce8211d585 | myserver3 | ACTIVE | net1=10.0.1.5 |
|
||||
+--------------------------------------+-----------+--------+---------------+
|
||||
</computeroutput>
|
||||
<prompt>$</prompt> <userinput>neutron net-list</userinput>
|
||||
<computeroutput>+--------------------------------------+------+--------------------------------------+
|
||||
| id | name | subnets |
|
||||
+--------------------------------------+------+--------------------------------------+
|
||||
| 89dca1c6-c7d4-4f7a-b730-549af0fb6e34 | net1 | f6c832e3-9968-46fd-8e45-d5cf646db9d1 |
|
||||
+--------------------------------------+------+--------------------------------------+</computeroutput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Manage agents in neutron deployment</title>
|
||||
<para>Every agent that supports these extensions will
|
||||
register itself with the neutron server when it
|
||||
starts up.</para>
|
||||
<step>
|
||||
<para>List all agents:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron agent-list</userinput>
|
||||
<computeroutput>+--------------------------------------+--------------------+-------+-------+----------------+
|
||||
| id | agent_type | host | alive | admin_state_up |
|
||||
+--------------------------------------+--------------------+-------+-------+----------------+
|
||||
| 1b69828d-6a9b-4826-87cd-1757f0e27f31 | Linux bridge agent | HostA | :-) | True |
|
||||
| a0c1c21c-d4f4-4577-9ec7-908f2d48622d | DHCP agent | HostA | :-) | True |
|
||||
| ed96b856-ae0f-4d75-bb28-40a47ffd7695 | Linux bridge agent | HostB | :-) | True |
|
||||
| f28aa126-6edb-4ea5-a81e-8850876bc0a8 | DHCP agent | HostB | :-) | True |
|
||||
+--------------------------------------+--------------------+-------+-------+----------------+
|
||||
</computeroutput></screen>
|
||||
<para>The output shows information for four
|
||||
agents. The <literal>alive</literal> field
|
||||
shows <literal>:-)</literal> if the agent
|
||||
reported its state within the period
|
||||
defined by the
|
||||
<option>agent_down_time</option>
|
||||
option in the
|
||||
<filename>neutron.conf</filename>
|
||||
file. Otherwise the <option>alive</option>
|
||||
is <literal>xxx</literal>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>List the DHCP agents that host a
|
||||
specified network</para>
|
||||
<para>In some deployments, one DHCP agent is
|
||||
not enough to hold all network data. In
|
||||
addition, you must have a backup for it
|
||||
even when the deployment is small. The
|
||||
same network can be assigned to more than
|
||||
one DHCP agent and one DHCP agent can host
|
||||
more than one network.</para>
|
||||
<para>List DHCP agents that host a specified
|
||||
network:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron dhcp-agent-list-hosting-net net1</userinput>
|
||||
<computeroutput>+--------------------------------------+-------+----------------+-------+
|
||||
| id | host | admin_state_up | alive |
|
||||
+--------------------------------------+-------+----------------+-------+
|
||||
| a0c1c21c-d4f4-4577-9ec7-908f2d48622d | HostA | True | :-) |
|
||||
+--------------------------------------+-------+----------------+-------+</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>List the networks hosted by a given DHCP
|
||||
agent.</para>
|
||||
<para>This command is to show which networks a
|
||||
given dhcp agent is managing.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-list-on-dhcp-agent a0c1c21c-d4f4-4577-9ec7-908f2d48622d</userinput>
|
||||
<computeroutput>+--------------------------------------+------+---------------------------------------------------+
|
||||
| id | name | subnets |
|
||||
+--------------------------------------+------+---------------------------------------------------+
|
||||
| 89dca1c6-c7d4-4f7a-b730-549af0fb6e34 | net1 | f6c832e3-9968-46fd-8e45-d5cf646db9d1 10.0.1.0/24 |
|
||||
+--------------------------------------+------+---------------------------------------------------+</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Show agent details.</para>
|
||||
<para>The <command>agent-show</command>
|
||||
command shows details for a specified
|
||||
agent:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron agent-show a0c1c21c-d4f4-4577-9ec7-908f2d48622d</userinput>
|
||||
<computeroutput>+---------------------+----------------------------------------------------------+
|
||||
| Field | Value |
|
||||
+---------------------+----------------------------------------------------------+
|
||||
| admin_state_up | True |
|
||||
| agent_type | DHCP agent |
|
||||
| alive | False |
|
||||
| binary | neutron-dhcp-agent |
|
||||
| configurations | { |
|
||||
| | "subnets": 1, |
|
||||
| | "use_namespaces": true, |
|
||||
| | "dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq", |
|
||||
| | "networks": 1, |
|
||||
| | "dhcp_lease_time": 120, |
|
||||
| | "ports": 3 |
|
||||
| | } |
|
||||
| created_at | 2013-03-16T01:16:18.000000 |
|
||||
| description | |
|
||||
| heartbeat_timestamp | 2013-03-17T01:37:22.000000 |
|
||||
| host | HostA |
|
||||
| id | 58f4ce07-6789-4bb3-aa42-ed3779db2b03 |
|
||||
| started_at | 2013-03-16T06:48:39.000000 |
|
||||
| topic | dhcp_agent |
|
||||
+---------------------+----------------------------------------------------------+</computeroutput></screen>
|
||||
<para>In this output,
|
||||
<literal>heartbeat_timestamp</literal>
|
||||
is the time on the neutron server. You do
|
||||
not need to synchronize all agents to this
|
||||
time for this extension to run correctly.
|
||||
<literal>configurations</literal>
|
||||
describes the static configuration for the
|
||||
agent or run time data. This agent is a
|
||||
DHCP agent and it hosts one network, one
|
||||
subnet, and three ports.</para>
|
||||
<para>Different types of agents show different
|
||||
details. The following output shows
|
||||
information for a Linux bridge
|
||||
agent:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron agent-show ed96b856-ae0f-4d75-bb28-40a47ffd7695</userinput>
|
||||
<computeroutput>+---------------------+--------------------------------------+
|
||||
| Field | Value |
|
||||
+---------------------+--------------------------------------+
|
||||
| admin_state_up | True |
|
||||
| binary | neutron-linuxbridge-agent |
|
||||
| configurations | { |
|
||||
| | "physnet1": "eth0", |
|
||||
| | "devices": "4" |
|
||||
| | } |
|
||||
| created_at | 2013-03-16T01:49:52.000000 |
|
||||
| description | |
|
||||
| disabled | False |
|
||||
| group | agent |
|
||||
| heartbeat_timestamp | 2013-03-16T01:59:45.000000 |
|
||||
| host | HostB |
|
||||
| id | ed96b856-ae0f-4d75-bb28-40a47ffd7695 |
|
||||
| topic | N/A |
|
||||
| started_at | 2013-03-16T06:48:39.000000 |
|
||||
| type | Linux bridge agent |
|
||||
+---------------------+--------------------------------------+</computeroutput></screen>
|
||||
<para>The output shows
|
||||
<literal>bridge-mapping</literal> and
|
||||
the number of virtual network devices on
|
||||
this L2 agent.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Manage assignment of networks to DHCP agent</title>
|
||||
<para>Now that you have run the
|
||||
<command>net-list-on-dhcp-agent</command> and
|
||||
<command>dhcp-agent-list-hosting-net</command>
|
||||
commands, you can add a network to a DHCP agent
|
||||
and remove one from it.</para>
|
||||
<step>
|
||||
<para>Default scheduling.</para>
|
||||
<para>When you create a network with one port,
|
||||
you can schedule it to an active DHCP
|
||||
agent. If many active DHCP agents are
|
||||
running, select one randomly. You can
|
||||
design more sophisticated scheduling
|
||||
algorithms in the same way as <systemitem
|
||||
class="service"
|
||||
>nova-schedule</systemitem> later
|
||||
on.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create net2</userinput>
|
||||
<prompt>$</prompt> <userinput>neutron subnet-create net2 9.0.1.0/24 --name subnet2</userinput>
|
||||
<prompt>$</prompt> <userinput>neutron port-create net2</userinput>
|
||||
<prompt>$</prompt> <userinput>neutron dhcp-agent-list-hosting-net net2</userinput>
|
||||
<computeroutput>+--------------------------------------+-------+----------------+-------+
|
||||
| id | host | admin_state_up | alive |
|
||||
+--------------------------------------+-------+----------------+-------+
|
||||
| a0c1c21c-d4f4-4577-9ec7-908f2d48622d | HostA | True | :-) |
|
||||
+--------------------------------------+-------+----------------+-------+</computeroutput></screen>
|
||||
<para>It is allocated to DHCP agent on HostA.
|
||||
If you want to validate the behavior
|
||||
through the <command>dnsmasq</command>
|
||||
command, you must create a subnet for the
|
||||
network because the DHCP agent starts the
|
||||
<systemitem class="service"
|
||||
>dnsmasq</systemitem> service only if
|
||||
there is a DHCP.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Assign a network to a given DHCP
|
||||
agent.</para>
|
||||
<para>To add another DHCP agent to host the
|
||||
network, run this command:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron dhcp-agent-network-add f28aa126-6edb-4ea5-a81e-8850876bc0a8 net2</userinput>
|
||||
<computeroutput>Added network net2 to dhcp agent</computeroutput>
|
||||
<prompt>$</prompt> <userinput>neutron dhcp-agent-list-hosting-net net2</userinput>
|
||||
<computeroutput>+--------------------------------------+-------+----------------+-------+
|
||||
| id | host | admin_state_up | alive |
|
||||
+--------------------------------------+-------+----------------+-------+
|
||||
| a0c1c21c-d4f4-4577-9ec7-908f2d48622d | HostA | True | :-) |
|
||||
| f28aa126-6edb-4ea5-a81e-8850876bc0a8 | HostB | True | :-) |
|
||||
+--------------------------------------+-------+----------------+-------+</computeroutput></screen>
|
||||
<para>Both DHCP agents host the
|
||||
<literal>net2</literal>
|
||||
network.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Remove a network from a specified DHCP
|
||||
agent.</para>
|
||||
<para>This command is the sibling command for
|
||||
the previous one. Remove
|
||||
<literal>net2</literal> from the DHCP
|
||||
agent for HostA:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron dhcp-agent-network-remove a0c1c21c-d4f4-4577-9ec7-908f2d48622d net2</userinput>
|
||||
<computeroutput>Removed network net2 to dhcp agent</computeroutput>
|
||||
<prompt>$</prompt> <userinput>neutron dhcp-agent-list-hosting-net net2</userinput>
|
||||
<computeroutput>+--------------------------------------+-------+----------------+-------+
|
||||
| id | host | admin_state_up | alive |
|
||||
+--------------------------------------+-------+----------------+-------+
|
||||
| f28aa126-6edb-4ea5-a81e-8850876bc0a8 | HostB | True | :-) |
|
||||
+--------------------------------------+-------+----------------+-------+</computeroutput></screen>
|
||||
<para>You can see that only the DHCP agent for
|
||||
HostB is hosting the
|
||||
<literal>net2</literal>
|
||||
network.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>HA of DHCP agents</title>
|
||||
<para>Boot a VM on net2. Let both DHCP agents host
|
||||
<literal>net2</literal>. Fail the agents in
|
||||
turn to see if the VM can still get the desired
|
||||
IP.</para>
|
||||
<step>
|
||||
<para>Boot a VM on net2.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-list</userinput>
|
||||
<computeroutput>+--------------------------------------+------+--------------------------------------------------+
|
||||
| id | name | subnets |
|
||||
+--------------------------------------+------+--------------------------------------------------+
|
||||
| 89dca1c6-c7d4-4f7a-b730-549af0fb6e34 | net1 | f6c832e3-9968-46fd-8e45-d5cf646db9d1 10.0.1.0/24|
|
||||
| 9b96b14f-71b8-4918-90aa-c5d705606b1a | net2 | 6979b71a-0ae8-448c-aa87-65f68eedcaaa 9.0.1.0/24 |
|
||||
+--------------------------------------+------+--------------------------------------------------+</computeroutput>
|
||||
<prompt>$</prompt> <userinput>nova boot --image tty --flavor 1 myserver4 \
|
||||
--nic net-id=9b96b14f-71b8-4918-90aa-c5d705606b1a</userinput>
|
||||
<prompt>$</prompt> <userinput>nova list</userinput>
|
||||
<computeroutput>+--------------------------------------+-----------+--------+---------------+
|
||||
| ID | Name | Status | Networks |
|
||||
+--------------------------------------+-----------+--------+---------------+
|
||||
| c394fcd0-0baa-43ae-a793-201815c3e8ce | myserver1 | ACTIVE | net1=10.0.1.3 |
|
||||
| 2d604e05-9a6c-4ddb-9082-8a1fbdcc797d | myserver2 | ACTIVE | net1=10.0.1.4 |
|
||||
| c7c0481c-3db8-4d7a-a948-60ce8211d585 | myserver3 | ACTIVE | net1=10.0.1.5 |
|
||||
| f62f4731-5591-46b1-9d74-f0c901de567f | myserver4 | ACTIVE | net2=9.0.1.2 |
|
||||
+--------------------------------------+-----------+--------+---------------+</computeroutput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Make sure both DHCP agents hosting
|
||||
'net2'.</para>
|
||||
<para>Use the previous commands to assign the
|
||||
network to agents.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron dhcp-agent-list-hosting-net net2</userinput>
|
||||
<computeroutput>+--------------------------------------+-------+----------------+-------+
|
||||
| id | host | admin_state_up | alive |
|
||||
+--------------------------------------+-------+----------------+-------+
|
||||
| a0c1c21c-d4f4-4577-9ec7-908f2d48622d | HostA | True | :-) |
|
||||
| f28aa126-6edb-4ea5-a81e-8850876bc0a8 | HostB | True | :-) |
|
||||
+--------------------------------------+-------+----------------+-------+</computeroutput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Test the HA</title>
|
||||
<step>
|
||||
<para>Log in to the
|
||||
<literal>myserver4</literal> VM,
|
||||
and run <literal>udhcpc</literal>,
|
||||
<literal>dhclient</literal> or
|
||||
other DHCP client.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Stop the DHCP agent on HostA.
|
||||
Besides stopping the
|
||||
<code>neutron-dhcp-agent</code>
|
||||
binary, you must stop the
|
||||
<command>dnsmasq</command>
|
||||
processes.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Run a DHCP client in VM to see
|
||||
if it can get the wanted IP.
|
||||
</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Stop the DHCP agent on HostB
|
||||
too.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Run <command>udhcpc</command> in
|
||||
the VM; it cannot get the wanted
|
||||
IP.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Start DHCP agent on HostB. The
|
||||
VM gets the wanted IP again.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>Disable and remove an agent</title>
|
||||
<para>An administrator might want to disable an agent
|
||||
if a system hardware or software upgrade is
|
||||
planned. Some agents that support scheduling also
|
||||
support disabling and enabling agents, such as L3
|
||||
and DHCP agents. After the agent is disabled, the
|
||||
scheduler does not schedule new resources to the
|
||||
agent. After the agent is disabled, you can safely
|
||||
remove the agent. Remove the resources on the
|
||||
agent before you delete the agent.</para>
|
||||
<step><para>To run the following commands, you must stop the
|
||||
DHCP agent on HostA.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron agent-update --admin-state-up False a0c1c21c-d4f4-4577-9ec7-908f2d48622d</userinput>
|
||||
<prompt>$</prompt> <userinput>neutron agent-list</userinput>
|
||||
<computeroutput>+--------------------------------------+--------------------+-------+-------+----------------+
|
||||
| id | agent_type | host | alive | admin_state_up |
|
||||
+--------------------------------------+--------------------+-------+-------+----------------+
|
||||
| 1b69828d-6a9b-4826-87cd-1757f0e27f31 | Linux bridge agent | HostA | :-) | True |
|
||||
| a0c1c21c-d4f4-4577-9ec7-908f2d48622d | DHCP agent | HostA | :-) | False |
|
||||
| ed96b856-ae0f-4d75-bb28-40a47ffd7695 | Linux bridge agent | HostB | :-) | True |
|
||||
| f28aa126-6edb-4ea5-a81e-8850876bc0a8 | DHCP agent | HostB | :-) | True |
|
||||
+--------------------------------------+--------------------+-------+-------+----------------+</computeroutput>
|
||||
<prompt>$</prompt> <userinput>neutron agent-delete a0c1c21c-d4f4-4577-9ec7-908f2d48622d</userinput>
|
||||
<computeroutput>Deleted agent: a0c1c21c-d4f4-4577-9ec7-908f2d48622d</computeroutput>
|
||||
<prompt>$</prompt> <userinput>neutron agent-list</userinput>
|
||||
<computeroutput>+--------------------------------------+--------------------+-------+-------+----------------+
|
||||
| id | agent_type | host | alive | admin_state_up |
|
||||
+--------------------------------------+--------------------+-------+-------+----------------+
|
||||
| 1b69828d-6a9b-4826-87cd-1757f0e27f31 | Linux bridge agent | HostA | :-) | True |
|
||||
| ed96b856-ae0f-4d75-bb28-40a47ffd7695 | Linux bridge agent | HostB | :-) | True |
|
||||
| f28aa126-6edb-4ea5-a81e-8850876bc0a8 | DHCP agent | HostB | :-) | True |
|
||||
+--------------------------------------+--------------------+-------+-------+----------------+</computeroutput></screen>
|
||||
<para>After deletion, if you restart the DHCP agent,
|
||||
it appears on the agent list again.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
</section>
|
@ -1,293 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_networking-use">
|
||||
<title>Use Networking</title>
|
||||
<para>You can manage OpenStack Networking services by using the <systemitem>service</systemitem>
|
||||
command. For example:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server stop</userinput>
|
||||
<prompt>#</prompt> <userinput>service neutron-server status</userinput>
|
||||
<prompt>#</prompt> <userinput>service neutron-server start</userinput>
|
||||
<prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
<para>Log files are in the <systemitem>/var/log/neutron</systemitem> directory.</para>
|
||||
<para>Configuration files are in the <systemitem>/etc/neutron</systemitem> directory.</para>
|
||||
<para>Cloud administrators and tenants can use OpenStack Networking to build rich network
|
||||
topologies. Cloud administrators can create network connectivity on behalf of
|
||||
tenants.</para>
|
||||
<!-- removed this line because there are no 'following procedures' -->
|
||||
<!--<para>A tenant or cloud administrator can both perform the
|
||||
following procedures.</para>-->
|
||||
<section xml:id="api_features">
|
||||
<title>Core Networking API features</title>
|
||||
<para>After you install and configure Networking, tenants and administrators can perform
|
||||
create-read-update-delete (CRUD) API networking operations by using the Networking API
|
||||
directly or neutron command-line interface (CLI). The neutron CLI is a wrapper around
|
||||
the Networking API. Every Networking API call has a corresponding neutron
|
||||
command.</para>
|
||||
<para>The CLI includes a number of options. For details, see the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/index.html"><citetitle>OpenStack End
|
||||
User Guide</citetitle></link>.</para>
|
||||
<section xml:id="basic_operations">
|
||||
<title>Basic Networking operations</title>
|
||||
<para>To learn about advanced capabilities available through the neutron command-line
|
||||
interface (CLI), read the networking section in the <link
|
||||
xlink:href="http://docs.openstack.org/user-guide/index.html"> OpenStack
|
||||
End User Guide</link>.</para>
|
||||
<para>This table shows example neutron commands that enable you to complete basic
|
||||
network operations:</para>
|
||||
<table rules="all">
|
||||
<caption>Basic Networking operations</caption>
|
||||
<col width="40%"/>
|
||||
<col width="60%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Operation</th>
|
||||
<th>Command</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Creates a network.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron net-create net1</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet that is associated with net1.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create net1 10.0.0.0/24</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lists ports for a specified tenant.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lists ports for a specified tenant and displays the <option>id</option>,
|
||||
<option>fixed_ips</option>, and <option>device_owner</option>
|
||||
columns.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list -c id -c fixed_ips -c device_owner</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Shows information for a specified port.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-show <replaceable>PORT_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<note>
|
||||
<para>The <option>device_owner</option> field describes who owns the port. A port
|
||||
whose <option>device_owner</option> begins with:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><literal>network</literal> is created by Networking.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><literal>compute</literal> is created by Compute.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="admin_api_config">
|
||||
<title>Administrative operations</title>
|
||||
<para>The cloud administrator can run any <command>neutron</command> command on behalf
|
||||
of tenants by specifying an Identity <option>tenant_id</option> in the command, as
|
||||
follows:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create --tenant-id <replaceable>TENANT_ID</replaceable> <replaceable>NETWORK_NAME</replaceable></userinput></screen>
|
||||
<para>For example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron net-create --tenant-id 5e4bbe24b67a4410bc4d9fae29ec394e net1</userinput></screen>
|
||||
<note>
|
||||
<para>To view all tenant IDs in Identity, run the following command as an Identity
|
||||
Service admin user:</para>
|
||||
<screen><prompt>$</prompt> <userinput>keystone tenant-list</userinput></screen>
|
||||
</note>
|
||||
</section>
|
||||
<?hard-pagebreak?>
|
||||
<section xml:id="advanced_networking">
|
||||
<title>Advanced Networking operations</title>
|
||||
<para>This table shows example Networking commands that enable you to complete advanced
|
||||
network operations:</para>
|
||||
<table rules="all">
|
||||
<caption>Advanced Networking operations</caption>
|
||||
<col width="40%"/>
|
||||
<col width="60%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Operation</th>
|
||||
<th>Command</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Creates a network that all tenants can use.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron net-create --shared public-net</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet with a specified gateway IP address.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create --gateway 10.0.0.254 net1 10.0.0.0/24</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet that has no gateway IP address.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create --no-gateway net1 10.0.0.0/24</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet with DHCP disabled.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create net1 10.0.0.0/24 --enable-dhcp False</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet with a specified set of host routes.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create test-net1 40.0.0.0/24 --host-routes type=dict list=true destination=40.0.1.0/24,nexthop=40.0.0.2</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Creates a subnet with a specified set of dns name servers.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron subnet-create test-net1 40.0.0.0/24 --dns-nameservers list=true 8.8.4.4 8.8.8.8</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Displays all ports and IPs allocated on a network.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --network_id <replaceable>NET_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
</section>
|
||||
<?hard-pagebreak?>
|
||||
<section xml:id="using_nova_with_neutron">
|
||||
<title>Use Compute with Networking</title>
|
||||
<section xml:id="basic_workflow_with_nova">
|
||||
<title>Basic Compute and Networking operations</title>
|
||||
<para>This table shows example neutron and nova commands that enable you to complete
|
||||
basic VM networking operations:</para>
|
||||
<table rules="all">
|
||||
<caption>Basic Compute and Networking operations</caption>
|
||||
<col width="40%"/>
|
||||
<col width="60%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Action</th>
|
||||
<th>Command</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Checks available networks.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron net-list</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Boots a VM with a single NIC on a selected Networking network.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>IMAGE</replaceable> --flavor <replaceable>FLAVOR</replaceable> --nic net-id=<replaceable>NET_ID</replaceable> <replaceable>VM_NAME</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><para>Searches for ports with a <option>device_id</option> that matches
|
||||
the Compute instance UUID. See <xref linkend="network_compute_note"
|
||||
/>.</para></td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --device_id <replaceable>VM_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Searches for ports, but shows only the <option>mac_address</option> of
|
||||
the port.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-list --field mac_address --device_id <replaceable>VM_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Temporarily disables a port from sending traffic.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron port-update <replaceable>PORT_ID</replaceable> --admin_state_up False</userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<note>
|
||||
<para>The <option>device_id</option> can also be a logical router ID.</para>
|
||||
</note>
|
||||
<note xml:id="network_compute_note">
|
||||
<title>Create and delete VMs</title>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>When you boot a Compute VM, a port on the network that corresponds to
|
||||
the VM NIC is automatically created and associated with the default
|
||||
security group. You can configure <link linkend="enabling_ping_and_ssh"
|
||||
>security group rules</link> to enable users to access the
|
||||
VM.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When you delete a Compute VM, the underlying Networking port is
|
||||
automatically deleted.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="advanced_vm_creation">
|
||||
<title>Advanced VM creation operations</title>
|
||||
<para>This table shows example nova and neutron commands that enable you to complete
|
||||
advanced VM creation operations:</para>
|
||||
<table rules="all">
|
||||
<caption>Advanced VM creation operations</caption>
|
||||
<col width="40%"/>
|
||||
<col width="60%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Operation</th>
|
||||
<th>Command</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Boots a VM with multiple NICs.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>IMAGE</replaceable> --flavor <replaceable>FLAVOR</replaceable> --nic net-id=<replaceable>NET1-ID</replaceable> --nic net-id=<replaceable>NET2-ID</replaceable> <replaceable>VM_NAME</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Boots a VM with a specific IP address. Note that you cannot use the <parameter>--num-instances</parameter> parameter in this case.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>IMAGE</replaceable> --flavor <replaceable>FLAVOR</replaceable> --nic net-id=<replaceable>NET-ID</replaceable>,v4-fixed-ip=<replaceable>IP-ADDR</replaceable> <replaceable>VM_NAME</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Boots a VM that connects to all networks that are accessible to the
|
||||
tenant who submits the request (without the <parameter>--nic</parameter>
|
||||
option).</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>nova boot --image <replaceable>IMAGE</replaceable> --flavor <replaceable>FLAVOR</replaceable> <replaceable>VM_NAME</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<note>
|
||||
<para>Cloud images that distribution vendors offer usually have only one active NIC
|
||||
configured. When you boot with multiple NICs, you must configure additional
|
||||
interfaces on the image or the NICS are not reachable.</para>
|
||||
<para>The following Debian/Ubuntu-based example shows how to set up the interfaces
|
||||
within the instance in the <filename>/etc/network/interfaces</filename> file.
|
||||
You must apply this configuration to the image.</para>
|
||||
<programlisting language="bash"># The loopback network interface
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
|
||||
auto eth1
|
||||
iface eth1 inet dhcp</programlisting>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="enabling_ping_and_ssh">
|
||||
<title>Enable ping and SSH on VMs (security groups)</title>
|
||||
<para>You must configure security group rules depending on the type of plug-in you are
|
||||
using. If you are using a plug-in that:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Implements Networking security groups, you can configure security group
|
||||
rules directly by using the <command>neutron
|
||||
security-group-rule-create</command> command. This example enables
|
||||
<command>ping</command> and <command>ssh</command> access to your
|
||||
VMs.</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron security-group-rule-create --protocol icmp \
|
||||
--direction ingress default</userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>neutron security-group-rule-create --protocol tcp --port-range-min 22 \
|
||||
--port-range-max 22 --direction ingress default</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Does not implement Networking security groups, you can configure security
|
||||
group rules by using the <command>nova secgroup-add-rule</command> or
|
||||
<command>euca-authorize</command> command. These <command>nova</command>
|
||||
commands enable <command>ping</command> and <command>ssh</command> access to
|
||||
your VMs.</para>
|
||||
<screen><prompt>$</prompt> <userinput>nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0</userinput>
|
||||
<prompt>$</prompt> <userinput>nova secgroup-add-rule default tcp 22 22 0.0.0.0/0</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<note>
|
||||
<para>If your plug-in implements Networking security groups, you can also leverage
|
||||
Compute security groups by setting <code>security_group_api = neutron</code> in
|
||||
the <filename>nova.conf</filename> file. After you set this option, all Compute
|
||||
security group commands are proxied to Networking.</para>
|
||||
</note>
|
||||
</section>
|
||||
</section>
|
||||
</section>
|
File diff suppressed because it is too large
Load Diff
@ -1,152 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_networking-adv-operational_features">
|
||||
<title>Advanced operational features</title>
|
||||
<section xml:id="section_adv_logging">
|
||||
<title>Logging settings</title>
|
||||
<para>Networking components use Python logging module to do
|
||||
logging. Logging configuration can be provided in
|
||||
<filename>neutron.conf</filename> or as command-line
|
||||
options. Command options override ones in
|
||||
<filename>neutron.conf</filename>.</para>
|
||||
<para>To configure logging for Networking components, use one
|
||||
of these methods:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Provide logging settings in a logging
|
||||
configuration file.</para>
|
||||
<para>See <link xlink:href="http://docs.python.org/howto/logging.html">Python
|
||||
logging how-to</link> to learn more about logging.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Provide logging setting in
|
||||
<filename>neutron.conf</filename></para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
# Default log level is WARNING
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
# debug = False
|
||||
|
||||
# Show more verbose log output (sets INFO log level output) if debug is False
|
||||
# verbose = False
|
||||
|
||||
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
|
||||
# log_date_format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
# use_syslog = False
|
||||
# syslog_log_facility = LOG_USER
|
||||
|
||||
# if use_syslog is False, we can set log_file and log_dir.
|
||||
# if use_syslog is False and we do not set log_file,
|
||||
# the log will be printed to stdout.
|
||||
# log_file =
|
||||
# log_dir =</programlisting>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
<section xml:id="section_adv_notification">
|
||||
<title>Notifications</title>
|
||||
<para>Notifications can be sent when Networking resources such
|
||||
as network, subnet and port are created, updated or
|
||||
deleted.</para>
|
||||
<section xml:id="section_adv_notification_overview">
|
||||
<title>Notification options</title>
|
||||
<para>To support DHCP agent, rpc_notifier driver must be
|
||||
set. To set up the notification, edit notification
|
||||
options in <filename>neutron.conf</filename>:</para>
|
||||
<programlisting language="ini"># Driver or drivers to handle sending notifications. (multi
|
||||
# valued)
|
||||
#notification_driver=
|
||||
|
||||
# AMQP topic used for OpenStack notifications. (list value)
|
||||
# Deprecated group/name - [rpc_notifier2]/topics
|
||||
notification_topics = notifications</programlisting>
|
||||
</section>
|
||||
<section xml:id="section_adv_notification_cases">
|
||||
<title>Setting cases</title>
|
||||
<section xml:id="section_adv_notification_cases_log_rpc">
|
||||
<title>Logging and RPC</title>
|
||||
<para>These options configure the Networking
|
||||
server to send notifications through logging and
|
||||
RPC. The logging options are described in
|
||||
<citetitle
|
||||
>OpenStack Configuration Reference</citetitle>
|
||||
. RPC notifications go to 'notifications.info'
|
||||
queue bound to a topic exchange defined by
|
||||
'control_exchange' in
|
||||
<filename>neutron.conf</filename>.</para>
|
||||
<programlisting language="ini"># ============ Notification System Options =====================
|
||||
|
||||
# Notifications can be sent when network/subnet/port are create, updated or deleted.
|
||||
# There are three methods of sending notifications: logging (via the
|
||||
# log_file directive), rpc (via a message queue) and
|
||||
# noop (no notifications sent, the default)
|
||||
|
||||
# Notification_driver can be defined multiple times
|
||||
# Do nothing driver
|
||||
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
|
||||
# Logging driver
|
||||
notification_driver = neutron.openstack.common.notifier.log_notifier
|
||||
# RPC driver
|
||||
notification_driver = neutron.openstack.common.notifier.rpc_notifier
|
||||
|
||||
# default_notification_level is used to form actual topic names or to set logging level
|
||||
default_notification_level = INFO
|
||||
|
||||
# default_publisher_id is a part of the notification payload
|
||||
# host = myhost.com
|
||||
# default_publisher_id = $host
|
||||
|
||||
# Defined in rpc_notifier for rpc way, can be comma-separated values.
|
||||
# The actual topic names will be %s.%(default_notification_level)s
|
||||
notification_topics = notifications
|
||||
|
||||
# Options defined in oslo.messaging
|
||||
#
|
||||
|
||||
# The default exchange under which topics are scoped. May be
|
||||
# overridden by an exchange name specified in the
|
||||
# transport_url option. (string value)
|
||||
#control_exchange=openstack</programlisting>
|
||||
</section>
|
||||
<section
|
||||
xml:id="ch_adv_notification_cases_multi_rpc_topics">
|
||||
<title>Multiple RPC topics</title>
|
||||
<para>These options configure the Networking
|
||||
server to send notifications to multiple RPC
|
||||
topics. RPC notifications go to
|
||||
'notifications_one.info' and
|
||||
'notifications_two.info' queues bound to a topic
|
||||
exchange defined by 'control_exchange' in
|
||||
<filename>neutron.conf</filename>.</para>
|
||||
<programlisting language="ini"># ============ Notification System Options =====================
|
||||
|
||||
# Notifications can be sent when network/subnet/port are create, updated or deleted.
|
||||
# There are three methods of sending notifications: logging (via the
|
||||
# log_file directive), rpc (via a message queue) and
|
||||
# noop (no notifications sent, the default)
|
||||
|
||||
# Notification_driver can be defined multiple times
|
||||
# Do nothing driver
|
||||
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
|
||||
# Logging driver
|
||||
# notification_driver = neutron.openstack.common.notifier.log_notifier
|
||||
# RPC driver
|
||||
notification_driver = neutron.openstack.common.notifier.rpc_notifier
|
||||
|
||||
# default_notification_level is used to form actual topic names or to set logging level
|
||||
default_notification_level = INFO
|
||||
|
||||
# default_publisher_id is a part of the notification payload
|
||||
# host = myhost.com
|
||||
# default_publisher_id = $host
|
||||
|
||||
# Defined in rpc_notifier for rpc way, can be comma-separated values.
|
||||
# The actual topic names will be %s.%(default_notification_level)s
|
||||
notification_topics = notifications_one,notifications_two</programlisting>
|
||||
</section>
|
||||
</section>
|
||||
</section>
|
||||
</section>
|
@ -1,135 +0,0 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_networking-arch">
|
||||
<title>Networking architecture</title>
|
||||
<para>Before you deploy Networking, it's useful to understand the
|
||||
Networking services and how they interact with the OpenStack
|
||||
components.</para>
|
||||
<section xml:id="arch_overview">
|
||||
<title>Overview</title>
|
||||
<para>Networking is a standalone component in the OpenStack modular
|
||||
architecture. It's positioned alongside OpenStack components such
|
||||
as Compute, Image service, Identity, or the Dashboard. Like
|
||||
those components, a deployment of Networking often involves
|
||||
deploying several services to a variety of hosts.</para>
|
||||
<para>The Networking server uses the <systemitem class="service"
|
||||
>neutron-server</systemitem> daemon to expose the Networking
|
||||
API and enable administration of the configured Networking
|
||||
plug-in. Typically, the plug-in requires access to a database
|
||||
for persistent storage (also similar to other OpenStack
|
||||
services).</para>
|
||||
<para>If your deployment uses a controller host to run centralized
|
||||
Compute components, you can deploy the Networking server to that
|
||||
same host. However, Networking is entirely standalone and can be
|
||||
deployed to a dedicated host. Depending on your configuration,
|
||||
Networking can also include the following agents:</para>
|
||||
<para>
|
||||
<table rules="all">
|
||||
<caption>Networking agents</caption>
|
||||
<col width="30%"/>
|
||||
<col width="70%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Agent</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><emphasis role="bold">plug-in
|
||||
agent</emphasis>
|
||||
(<literal>neutron-*-agent</literal>)</td>
|
||||
<td>Runs on each hypervisor to perform local
|
||||
vSwitch configuration. The agent that runs, depends
|
||||
on the plug-in that you use. Certain plug-ins do not
|
||||
require an agent.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold">dhcp
|
||||
agent</emphasis>
|
||||
(<literal>neutron-dhcp-agent</literal>)</td>
|
||||
<td>Provides DHCP services to tenant networks.
|
||||
Required by certain plug-ins.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold">l3
|
||||
agent</emphasis>
|
||||
(<literal>neutron-l3-agent</literal>)</td>
|
||||
<td>Provides L3/NAT forwarding to provide external
|
||||
network access for VMs on tenant networks. Required
|
||||
by certain plug-ins.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold">metering agent</emphasis>
|
||||
(<literal>neutron-metering-agent</literal>)</td>
|
||||
<td>Provides L3 traffic metering for tenant networks.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</para>
|
||||
<para>These agents interact with the main neutron process through
|
||||
RPC (for example, RabbitMQ or Qpid) or through the standard
|
||||
Networking API. In addition, Networking integrates with OpenStack
|
||||
components in a number of ways:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Networking relies on the Identity service
|
||||
(keystone) for the authentication and
|
||||
authorization of all API requests.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Compute (nova) interacts with Networking
|
||||
through calls to its standard API. As part of
|
||||
creating a VM, the <systemitem class="service"
|
||||
>nova-compute</systemitem> service
|
||||
communicates with the Networking API to plug
|
||||
each virtual NIC on the VM into a particular
|
||||
network.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The dashboard (horizon) integrates with the
|
||||
Networking API, enabling administrators and
|
||||
tenant users to create and manage network
|
||||
services through a web-based GUI.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
<section xml:id="NSX_overview">
|
||||
<title>VMware NSX integration</title>
|
||||
<para>OpenStack Networking uses the NSX plug-in to
|
||||
integrate with an existing VMware vCenter deployment. When installed on
|
||||
the network nodes, the NSX plug-in enables a NSX controller to centrally
|
||||
manage configuration settings and push them to managed network nodes.
|
||||
Network nodes are considered managed when they're added as hypervisors
|
||||
to the NSX controller.</para>
|
||||
<para>The diagrams below depict some VMware NSX deployment examples.
|
||||
The first diagram illustrates the traffic flow between VMs on separate
|
||||
Compute nodes, and the second diagram between two VMs on a single
|
||||
Compute node. Note the placement of the VMware NSX plug-in and the
|
||||
<systemitem class="service">neutron-server</systemitem> service on the
|
||||
network node. The green arrow indicates the management relationship
|
||||
between the NSX controller and the network node.</para>
|
||||
<figure>
|
||||
<title>VMware NSX deployment example - two Compute nodes</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata
|
||||
fileref="../../common/figures/vmware_nsx_ex1.png"
|
||||
format="PNG" contentwidth="6in"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<figure>
|
||||
<title>VMware NSX deployment example - single Compute node</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata
|
||||
fileref="../../common/figures/vmware_nsx_ex2.png"
|
||||
format="PNG" contentwidth="6in"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
</section>
|
||||
</section>
|
@ -1,192 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_networking_auth">
|
||||
<title>Authentication and authorization</title>
|
||||
<para>Networking uses the Identity Service as the default
|
||||
authentication service. When the Identity Service is enabled,
|
||||
users who submit requests to the Networking service must
|
||||
provide an authentication token in
|
||||
<literal>X-Auth-Token</literal> request header. Users
|
||||
obtain this token by authenticating with the Identity Service
|
||||
endpoint. For more information about authentication with the
|
||||
Identity Service, see <link
|
||||
xlink:href="http://developer.openstack.org/api-ref-identity-v2.html"
|
||||
><citetitle>OpenStack Identity Service API v2.0
|
||||
Reference</citetitle></link>. When the Identity
|
||||
Service is enabled, it is not mandatory to specify the tenant
|
||||
ID for resources in create requests because the tenant ID is
|
||||
derived from the authentication token.</para>
|
||||
<note>
|
||||
<para>The default authorization settings only allow
|
||||
administrative users to create resources on behalf of a
|
||||
different tenant. Networking uses information received
|
||||
from Identity to authorize user requests. Networking
|
||||
handles two kind of authorization policies:</para>
|
||||
</note>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Operation-based</emphasis>
|
||||
policies specify access criteria for specific
|
||||
operations, possibly with fine-grained control over
|
||||
specific attributes;</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Resource-based</emphasis>
|
||||
policies specify whether access to specific resource
|
||||
is granted or not according to the permissions
|
||||
configured for the resource (currently available only
|
||||
for the network resource). The actual authorization
|
||||
policies enforced in Networking might vary from
|
||||
deployment to deployment.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>The policy engine reads entries from the
|
||||
<filename>policy.json</filename> file. The actual location
|
||||
of this file might vary from distribution to distribution.
|
||||
Entries can be updated while the system is running, and no
|
||||
service restart is required. Every time the policy file is
|
||||
updated, the policies are automatically reloaded. Currently
|
||||
the only way of updating such policies is to edit the policy
|
||||
file. In this section, the terms <emphasis role="italic"
|
||||
>policy</emphasis> and <emphasis role="italic"
|
||||
>rule</emphasis> refer to objects that are specified in
|
||||
the same way in the policy file. There are no syntax
|
||||
differences between a rule and a policy. A policy is something
|
||||
that is matched directly from the Networking policy engine. A
|
||||
rule is an element in a policy, which is evaluated. For
|
||||
instance in <code>create_subnet:
|
||||
[["admin_or_network_owner"]]</code>, <emphasis
|
||||
role="italic">create_subnet</emphasis> is a policy, and
|
||||
<emphasis role="italic">admin_or_network_owner</emphasis>
|
||||
is a rule.</para>
|
||||
<para>Policies are triggered by the Networking policy engine
|
||||
whenever one of them matches a Networking API operation or a
|
||||
specific attribute being used in a given operation. For
|
||||
instance the <code>create_subnet</code> policy is triggered
|
||||
every time a <code>POST /v2.0/subnets</code> request is sent
|
||||
to the Networking server; on the other hand
|
||||
<code>create_network:shared</code> is triggered every time
|
||||
the <emphasis role="italic">shared</emphasis> attribute is
|
||||
explicitly specified (and set to a value different from its
|
||||
default) in a <code>POST /v2.0/networks</code> request. It is
|
||||
also worth mentioning that policies can also be related to
|
||||
specific API extensions; for instance
|
||||
<code>extension:provider_network:set</code> is
|
||||
triggered if the attributes defined by the Provider Network
|
||||
extensions are specified in an API request.</para>
|
||||
<para>An authorization policy can be composed by one or more
|
||||
rules. If more rules are specified then the evaluation policy succeeds
|
||||
if any of the rules evaluates successfully; if an API
|
||||
operation matches multiple policies, then all the policies
|
||||
must evaluate successfully. Also, authorization rules are
|
||||
recursive. Once a rule is matched, the rule(s) can be resolved
|
||||
to another rule, until a terminal rule is reached.</para>
|
||||
<para>The Networking policy engine currently defines the following
|
||||
kinds of terminal rules:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Role-based rules</emphasis>
|
||||
evaluate successfully if the user who submits the
|
||||
request has the specified role. For instance
|
||||
<code>"role:admin"</code> is successful if the
|
||||
user who submits the request is an
|
||||
administrator.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Field-based rules
|
||||
</emphasis>evaluate successfully if a field of the
|
||||
resource specified in the current request matches a
|
||||
specific value. For instance
|
||||
<code>"field:networks:shared=True"</code> is
|
||||
successful if the <literal>shared</literal> attribute
|
||||
of the <literal>network</literal> resource is set to
|
||||
true.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><emphasis role="bold">Generic rules</emphasis>
|
||||
compare an attribute in the resource with an attribute
|
||||
extracted from the user's security credentials and
|
||||
evaluates successfully if the comparison is
|
||||
successful. For instance
|
||||
<code>"tenant_id:%(tenant_id)s"</code> is
|
||||
successful if the tenant identifier in the resource is
|
||||
equal to the tenant identifier of the user submitting
|
||||
the request.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>This extract is from the default
|
||||
<filename>policy.json</filename> file:</para>
|
||||
|
||||
<programlistingco>
|
||||
<areaspec>
|
||||
<area xml:id="networking_auth.json.rule"
|
||||
units="linecolumn" coords="2 23"/>
|
||||
<area xml:id="networking_auth.json.policy1"
|
||||
units="linecolumn" coords="31 16"/>
|
||||
<area xml:id="networking_auth.json.policy2"
|
||||
units="linecolumn" coords="62 20"/>
|
||||
<area xml:id="networking_auth.json.policy3"
|
||||
units="linecolumn" coords="70 30"/>
|
||||
<area xml:id="networking_auth.json.policy4"
|
||||
units="linecolumn" coords="88 32"/>
|
||||
</areaspec>
|
||||
<programlisting language="json"><xi:include href="../../common/samples/networking_auth.json" parse="text"/></programlisting>
|
||||
</programlistingco>
|
||||
<calloutlist>
|
||||
<callout arearefs="networking_auth.json.rule">
|
||||
<para>A rule that evaluates successfully if the current
|
||||
user is an administrator or the owner of the resource
|
||||
specified in the request (tenant identifier is
|
||||
equal).</para>
|
||||
</callout>
|
||||
<callout arearefs="networking_auth.json.policy1">
|
||||
<para>The default policy that is always evaluated if an
|
||||
API operation does not match any of the policies in
|
||||
<filename>policy.json</filename>.</para>
|
||||
</callout>
|
||||
<callout arearefs="networking_auth.json.policy2">
|
||||
<para>This policy evaluates successfully if either
|
||||
<emphasis role="italic">admin_or_owner</emphasis>,
|
||||
or <emphasis role="italic">shared</emphasis> evaluates
|
||||
successfully.</para>
|
||||
</callout>
|
||||
<callout arearefs="networking_auth.json.policy3">
|
||||
<para>This policy restricts the ability to manipulate the
|
||||
<emphasis role="italic">shared</emphasis>
|
||||
attribute for a network to administrators only.</para>
|
||||
</callout>
|
||||
<callout arearefs="networking_auth.json.policy4">
|
||||
<para>This policy restricts the ability to manipulate the
|
||||
<emphasis role="italic">mac_address</emphasis>
|
||||
attribute for a port only to administrators and the
|
||||
owner of the network where the port is
|
||||
attached.</para>
|
||||
</callout>
|
||||
</calloutlist>
|
||||
<para>In some cases, some operations are restricted to
|
||||
administrators only. This example shows you how to modify a
|
||||
policy file to permit tenants to define networks, see their
|
||||
resources, and permit administrative users to perform all other
|
||||
operations:</para>
|
||||
<programlisting language="bash">{
|
||||
"admin_or_owner": [["role:admin"], ["tenant_id:%(tenant_id)s"]],
|
||||
"admin_only": [["role:admin"]], "regular_user": [],
|
||||
"default": [["rule:admin_only"]],
|
||||
"create_subnet": [["rule:admin_only"]],
|
||||
"get_subnet": [["rule:admin_or_owner"]],
|
||||
"update_subnet": [["rule:admin_only"]],
|
||||
"delete_subnet": [["rule:admin_only"]],
|
||||
"create_network": [],
|
||||
"get_network": [["rule:admin_or_owner"]],
|
||||
"create_network:shared": [["rule:admin_only"]],
|
||||
"update_network": [["rule:admin_or_owner"]],
|
||||
"delete_network": [["rule:admin_or_owner"]],
|
||||
"create_port": [["rule:admin_only"]],
|
||||
"get_port": [["rule:admin_or_owner"]],
|
||||
"update_port": [["rule:admin_only"]],
|
||||
"delete_port": [["rule:admin_only"]]
|
||||
}</programlisting>
|
||||
</section>
|
@ -1,530 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="install_neutron_agent">
|
||||
<title>Configure neutron agents</title>
|
||||
<para>Plug-ins typically have requirements for particular
|
||||
software that must be run on each node that handles data
|
||||
packets. This includes any node that runs <systemitem
|
||||
class="service">nova-compute</systemitem> and nodes
|
||||
that run dedicated OpenStack Networking service agents
|
||||
such as <systemitem class="service">neutron-dhcp-agent</systemitem>,
|
||||
<systemitem class="service">neutron-l3-agent</systemitem>,
|
||||
<systemitem class="service">neutron-metering-agent</systemitem> or
|
||||
<systemitem class="service">neutron-lbaas-agent</systemitem>.</para>
|
||||
<para>A data-forwarding node typically has a network interface
|
||||
with an IP address on the management network and another
|
||||
interface on the data network.</para>
|
||||
<para>This section shows you how to install and configure a
|
||||
subset of the available plug-ins, which might include the
|
||||
installation of switching software (for example, Open
|
||||
vSwitch) and as agents used to communicate with the
|
||||
<systemitem class="service"
|
||||
>neutron-server</systemitem> process running elsewhere
|
||||
in the data center.</para>
|
||||
<section xml:id="config_neutron_data_fwd_node">
|
||||
<title>Configure data-forwarding nodes</title>
|
||||
<section xml:id="install_neutron_agent_nsx">
|
||||
<title>Node set up: NSX plug-in</title>
|
||||
<para>If you use the NSX plug-in, you must also
|
||||
install Open vSwitch on each data-forwarding node.
|
||||
However, you do not need to install an additional
|
||||
agent on each node.</para>
|
||||
<warning>
|
||||
<para>It is critical that you run an Open vSwitch
|
||||
version that is compatible with the current
|
||||
version of the NSX Controller software. Do not
|
||||
use the Open vSwitch version that is installed
|
||||
by default on Ubuntu. Instead, use the Open
|
||||
vSwitch version that is provided on the VMware
|
||||
support portal for your NSX Controller
|
||||
version.</para>
|
||||
</warning>
|
||||
<procedure>
|
||||
<title>To set up each node for the NSX
|
||||
plug-in</title>
|
||||
<step>
|
||||
<para>Ensure that each data-forwarding node
|
||||
has an IP address on the management
|
||||
network, and an IP address on the "data
|
||||
network" that is used for tunneling data
|
||||
traffic. For full details on configuring
|
||||
your forwarding node, see the
|
||||
<citetitle>NSX Administrator
|
||||
Guide</citetitle>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Use the <citetitle>NSX Administrator
|
||||
Guide</citetitle> to add the node as a
|
||||
Hypervisor by using the NSX Manager GUI.
|
||||
Even if your forwarding node has no VMs
|
||||
and is only used for services agents like
|
||||
<systemitem class="service">neutron-dhcp-agent</systemitem>
|
||||
or
|
||||
<systemitem class="service">neutron-lbaas-agent</systemitem>,
|
||||
it should still be added to NSX as a
|
||||
Hypervisor.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>After following the <citetitle>NSX
|
||||
Administrator Guide</citetitle>, use
|
||||
the page for this Hypervisor in the NSX
|
||||
Manager GUI to confirm that the node is
|
||||
properly connected to the NSX Controller
|
||||
Cluster and that the NSX Controller
|
||||
Cluster can see the
|
||||
<literal>br-int</literal> integration
|
||||
bridge.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
</section>
|
||||
<section xml:id="install_neutron_dhcp">
|
||||
<title>Configure DHCP agent</title>
|
||||
<para>The DHCP service agent is compatible with all
|
||||
existing plug-ins and is required for all deployments
|
||||
where VMs should automatically receive IP addresses
|
||||
through DHCP.</para>
|
||||
<procedure>
|
||||
<title>To install and configure the DHCP agent</title>
|
||||
<step>
|
||||
<para>You must configure the host running the
|
||||
<systemitem class="service">neutron-dhcp-agent</systemitem>
|
||||
as a data forwarding node according to the
|
||||
requirements for your plug-in. See <xref
|
||||
linkend="install_neutron_agent"/>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Install the DHCP agent:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install neutron-dhcp-agent</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Finally, update any options in the
|
||||
<filename>/etc/neutron/dhcp_agent.ini</filename>
|
||||
file that depend on the plug-in in use. See
|
||||
the sub-sections.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<important>
|
||||
<para>If you reboot a node that runs the DHCP agent,
|
||||
you must run the
|
||||
<command>neutron-ovs-cleanup</command> command
|
||||
before the <systemitem class="service"
|
||||
>neutron-dhcp-agent</systemitem> service
|
||||
starts.</para>
|
||||
<para>On Red Hat, SUSE, and Ubuntu based systems, the
|
||||
<systemitem class="service"
|
||||
>neutron-ovs-cleanup</systemitem> service runs
|
||||
the <command>neutron-ovs-cleanup</command> command
|
||||
automatically. However, on Debian-based systems,
|
||||
you must manually run this command or
|
||||
write your own system script that runs on boot
|
||||
before the <systemitem class="service"
|
||||
>neutron-dhcp-agent</systemitem> service
|
||||
starts.</para>
|
||||
</important>
|
||||
<para>Networking dhcp-agent can use
|
||||
<link xlink:href="http://www.thekelleys.org.uk/dnsmasq/doc.html">dnsmasq</link>
|
||||
driver which supports stateful and stateless DHCPv6 for
|
||||
subnets created with <parameter>--ipv6_address_mode</parameter>
|
||||
set to <option>dhcpv6-stateful</option> or
|
||||
<option>dhcpv6-stateless</option>.
|
||||
</para>
|
||||
<para>For example:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron subnet-create --ip-version 6 --ipv6_ra_mode dhcpv6-stateful --ipv6_address_mode dhcpv6-stateful NETWORK CIDR</userinput></screen>
|
||||
<screen><prompt>$</prompt> <userinput>neutron subnet-create --ip-version 6 --ipv6_ra_mode dhcpv6-stateless --ipv6_address_mode dhcpv6-stateless NETWORK CIDR</userinput></screen>
|
||||
<para>If no dnsmasq process for subnet's network is
|
||||
launched, Networking will launch a new one on
|
||||
subnet's dhcp port in <literal>qdhcp-XXX</literal>
|
||||
namespace. If previous dnsmasq process is already
|
||||
launched, restart dnsmasq with a new configuration.
|
||||
</para>
|
||||
<para>Networking will update dnsmasq process and
|
||||
restart it when subnet gets updated.</para>
|
||||
<note>
|
||||
<para>For dhcp-agent to operate in IPv6 mode use at least dnsmasq v2.63.</para>
|
||||
</note>
|
||||
<para>After a certain, configured timeframe, networks uncouple from
|
||||
DHCP agents when the agents are no longer in use. You can
|
||||
configure the DHCP agent to automatically detach from a network
|
||||
when the agent is out of service, or no longer needed.</para>
|
||||
<para>This feature applies to all plug-ins that support DHCP scaling. For more information,
|
||||
see the <link xlink:href="http://docs.openstack.org/kilo/config-reference/content/networking-options-dhcp.html">
|
||||
DHCP agent configuration options</link> listed in the OpenStack Configuration Reference.</para>
|
||||
<section xml:id="dhcp_agent_ovs">
|
||||
<title>DHCP agent setup: OVS plug-in</title>
|
||||
<para>These DHCP agent options are required in the
|
||||
<filename>/etc/neutron/dhcp_agent.ini</filename>
|
||||
file for the OVS plug-in:</para>
|
||||
<programlisting language="bash">[DEFAULT]
|
||||
enable_isolated_metadata = True
|
||||
use_namespaces = True
|
||||
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver</programlisting>
|
||||
</section>
|
||||
<section xml:id="dhcp_agent_nsx">
|
||||
<title>DHCP agent setup: NSX plug-in</title>
|
||||
<para>These DHCP agent options are required in the
|
||||
<filename>/etc/neutron/dhcp_agent.ini</filename>
|
||||
file for the NSX plug-in:</para>
|
||||
<programlisting language="bash">[DEFAULT]
|
||||
enable_metadata_network = True
|
||||
enable_isolated_metadata = True
|
||||
use_namespaces = True
|
||||
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver</programlisting>
|
||||
</section>
|
||||
</section>
|
||||
<section xml:id="install_neutron-l3">
|
||||
<title>Configure L3 agent</title>
|
||||
<para>The OpenStack Networking Service has a widely used
|
||||
API extension to allow administrators and tenants to
|
||||
create routers to interconnect L2 networks, and
|
||||
floating IPs to make ports on private networks
|
||||
publicly accessible.</para>
|
||||
<para>Many plug-ins rely on the L3 service agent to
|
||||
implement the L3 functionality. However, the following
|
||||
plug-ins already have built-in L3 capabilities:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Big Switch/Floodlight plug-in, which
|
||||
supports both the open source <link
|
||||
xlink:href="http://www.projectfloodlight.org/floodlight/"
|
||||
>Floodlight</link> controller and the
|
||||
proprietary Big Switch controller.</para>
|
||||
<note>
|
||||
<para>Only the proprietary BigSwitch
|
||||
controller implements L3 functionality.
|
||||
When using Floodlight as your OpenFlow
|
||||
controller, L3 functionality is not
|
||||
available.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>IBM SDN-VE plug-in</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>MidoNet plug-in</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>NSX plug-in</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>PLUMgrid plug-in</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<warning>
|
||||
<para>Do not configure or use
|
||||
<systemitem class="service">neutron-l3-agent</systemitem> if you
|
||||
use one of these plug-ins.</para>
|
||||
</warning>
|
||||
<procedure>
|
||||
<title>To install the L3 agent for all other
|
||||
plug-ins</title>
|
||||
<step>
|
||||
<para>Install the
|
||||
<systemitem class="service">neutron-l3-agent</systemitem>
|
||||
binary on the network node:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install neutron-l3-agent</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>To uplink the node that runs
|
||||
<systemitem class="service">neutron-l3-agent</systemitem>
|
||||
to the external network, create a bridge named
|
||||
"br-ex" and attach the NIC for the external
|
||||
network to this bridge.</para>
|
||||
<para>For example, with Open vSwitch and NIC eth1
|
||||
connected to the external network, run:</para>
|
||||
<screen><prompt>#</prompt> <userinput>ovs-vsctl add-br br-ex</userinput>
|
||||
<prompt>#</prompt> <userinput>ovs-vsctl add-port br-ex eth1</userinput></screen>
|
||||
<para>Do not manually configure an IP address on
|
||||
the NIC connected to the external network for
|
||||
the node running
|
||||
<systemitem class="service">neutron-l3-agent</systemitem>.
|
||||
Rather, you must have a range of IP addresses
|
||||
from the external network that can be used by
|
||||
OpenStack Networking for routers that uplink
|
||||
to the external network. This range must be
|
||||
large enough to have an IP address for each
|
||||
router in the deployment, as well as each
|
||||
floating IP.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>The
|
||||
<systemitem class="service">neutron-l3-agent</systemitem>
|
||||
uses the Linux IP stack and iptables to
|
||||
perform L3 forwarding and NAT. In order to
|
||||
support multiple routers with potentially
|
||||
overlapping IP addresses,
|
||||
<systemitem class="service">neutron-l3-agent</systemitem>
|
||||
defaults to using Linux network namespaces to
|
||||
provide isolated forwarding contexts. As a
|
||||
result, the IP addresses of routers are not
|
||||
visible simply by running the <command>ip addr
|
||||
list</command> or
|
||||
<command>ifconfig</command> command on the
|
||||
node. Similarly, you cannot directly
|
||||
<command>ping</command> fixed IPs.</para>
|
||||
<para>To do either of these things, you must run
|
||||
the command within a particular network
|
||||
namespace for the router. The namespace has
|
||||
the name "qrouter-<replaceable>ROUTER_UUID</replaceable>.
|
||||
These example commands run in the router
|
||||
namespace with UUID
|
||||
47af3868-0fa8-4447-85f6-1304de32153b:</para>
|
||||
<screen><prompt>#</prompt> <userinput>ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ip addr list</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ping <replaceable>FIXED_IP</replaceable></userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
<note>
|
||||
<para>For iproute version 3.12.0 and above, networking
|
||||
namespaces are configured to be deleted by default.
|
||||
This behavior can be changed for both DHCP and L3 agents.
|
||||
The configuration files are <filename>/etc/neutron/dhcp_agent.ini
|
||||
</filename> and <filename>/etc/neutron/l3_agent.ini</filename>
|
||||
respectively.</para>
|
||||
<para>For DHCP namespace the configuration key:
|
||||
<literal>dhcp_delete_namespaces = True</literal>. You
|
||||
can set it to <replaceable>False</replaceable> in case
|
||||
namespaces cannot be deleted cleanly on the host
|
||||
running the DHCP agent.</para>
|
||||
<para>For L3 namespace, the configuration key:
|
||||
<literal>router_delete_namespaces = True</literal>. You
|
||||
can set it to <replaceable>False</replaceable> in case
|
||||
namespaces cannot be deleted cleanly on the host
|
||||
running the L3 agent.</para>
|
||||
</note>
|
||||
<important>
|
||||
<para>If you reboot a node that runs the L3 agent, you
|
||||
must run the
|
||||
<command>neutron-ovs-cleanup</command> command
|
||||
before the <systemitem class="service"
|
||||
>neutron-l3-agent</systemitem> service
|
||||
starts.</para>
|
||||
<para>On Red Hat, SUSE and Ubuntu based systems, the
|
||||
<systemitem class="service"
|
||||
>neutron-ovs-cleanup</systemitem> service runs
|
||||
the <command>neutron-ovs-cleanup</command> command
|
||||
automatically. However, on Debian-based systems,
|
||||
you must
|
||||
manually run this command or write your own system
|
||||
script that runs on boot before the <systemitem
|
||||
class="service">neutron-l3-agent</systemitem>
|
||||
service starts.</para>
|
||||
</important>
|
||||
</section>
|
||||
<section xml:id="install_neutron-metering-agent">
|
||||
<title>Configure metering agent</title>
|
||||
<para>The Neutron Metering agent resides beside
|
||||
<systemitem class="service">neutron-l3-agent</systemitem>.</para>
|
||||
<procedure>
|
||||
<title>To install the metering agent and configure the
|
||||
node</title>
|
||||
<step>
|
||||
<para>Install the agent by running:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install neutron-metering-agent</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>If you use one of the following plug-ins, you
|
||||
need to configure the metering agent with
|
||||
these lines as well:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>An OVS-based plug-in such as OVS,
|
||||
NSX, NEC,
|
||||
BigSwitch/Floodlight:</para>
|
||||
<programlisting language="ini">interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A plug-in that uses
|
||||
LinuxBridge:</para>
|
||||
<programlisting language="ini">interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver</programlisting>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</step>
|
||||
<step>
|
||||
<para>To use the reference implementation, you
|
||||
must set:</para>
|
||||
<programlisting language="ini">driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Set the <option>service_plugins</option> option in the
|
||||
<filename>/etc/neutron/neutron.conf</filename> file on
|
||||
the host that runs <systemitem class="service"
|
||||
>neutron-server</systemitem>:</para>
|
||||
<programlisting language="ini">service_plugins = metering</programlisting>
|
||||
<para>If this option is already defined, add
|
||||
<literal>metering</literal> to the list, using a comma as
|
||||
separator. For example:</para>
|
||||
<programlisting language="ini">service_plugins = router,metering</programlisting>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="install_neutron-lbaas-agent">
|
||||
<title>Configure Load-Balancer-as-a-Service
|
||||
(LBaaS)</title>
|
||||
<para>Configure Load-Balancer-as-a-Service (LBaas) with
|
||||
the Open vSwitch or Linux Bridge plug-in. The Open
|
||||
vSwitch LBaaS driver is required when enabling LBaaS
|
||||
for OVS-based plug-ins, including BigSwitch,
|
||||
Floodlight, NEC, and NSX.</para>
|
||||
<procedure>
|
||||
<title>To configure LBaas with Open vSwitch or Linux
|
||||
Bridge plug-in</title>
|
||||
<step>
|
||||
<para>Install the agent:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install neutron-lbaas-agent haproxy</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Enable the
|
||||
<productname>HAProxy</productname> plug-in
|
||||
by using the <option>service_provider</option>
|
||||
option in the
|
||||
<filename>/etc/neutron/neutron.conf</filename>
|
||||
file:</para>
|
||||
<programlisting language="ini">service_provider = LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default</programlisting>
|
||||
<warning>
|
||||
<para>The <option>service_provider</option> option is
|
||||
already defined in the
|
||||
<filename>/usr/share/neutron/neutron-dist.conf</filename>
|
||||
file on Red Hat based systems. Do not define it in
|
||||
<filename>neutron.conf</filename> otherwise the
|
||||
Networking services will fail to restart.</para>
|
||||
</warning>
|
||||
</step>
|
||||
<step>
|
||||
<para>Enable the load-balancing plug-in by using the
|
||||
<option>service_plugins</option> option in the
|
||||
<filename>/etc/neutron/neutron.conf</filename>
|
||||
file:</para>
|
||||
<programlisting language="ini">service_plugins = lbaas</programlisting>
|
||||
<para>If this option is already defined, add
|
||||
<literal>lbaas</literal> to the list, using a comma as
|
||||
separator. For example:</para>
|
||||
<programlisting language="ini">service_plugins = router,lbaas</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Enable the
|
||||
<productname>HAProxy</productname> load
|
||||
balancer in the
|
||||
<filename>/etc/neutron/lbaas_agent.ini</filename>
|
||||
file:</para>
|
||||
<programlisting language="ini">device_driver = neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Select the required driver in the
|
||||
<filename>/etc/neutron/lbaas_agent.ini</filename>
|
||||
file:</para>
|
||||
<para>Enable the Open vSwitch LBaaS driver:</para>
|
||||
<programlisting language="ini">interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver</programlisting>
|
||||
<para>Or, enable the Linux Bridge LBaaS
|
||||
driver:</para>
|
||||
<programlisting language="ini">interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create the required tables in the database:</para>
|
||||
<screen><prompt>#</prompt> <userinput>neutron-db-manage --service lbaas upgrade head</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Apply the settings by restarting the
|
||||
<systemitem class="service">neutron-server</systemitem>
|
||||
and
|
||||
<systemitem class="service">neutron-lbaas-agent</systemitem>
|
||||
services.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Enable load balancing in the
|
||||
<guimenu>Project</guimenu> section of the
|
||||
dashboard.</para>
|
||||
<para>Change the <option>enable_lb</option> option
|
||||
to <literal>True</literal> in the
|
||||
<filename>local_settings</filename> file
|
||||
(on Fedora, RHEL, and CentOS:
|
||||
<filename>/etc/openstack-dashboard/local_settings</filename>,
|
||||
on Ubuntu and Debian:
|
||||
<filename>/etc/openstack-dashboard/local_settings.py</filename>,
|
||||
and on openSUSE and SLES:
|
||||
<filename>/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py</filename>):</para>
|
||||
<programlisting language="python">OPENSTACK_NEUTRON_NETWORK = {
|
||||
'enable_lb': True,
|
||||
...
|
||||
}</programlisting>
|
||||
<para>Apply the settings by restarting the web server.
|
||||
You can now view the Load Balancer management
|
||||
options in the <guimenu>Project</guimenu> view
|
||||
in the dashboard.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="install_neutron-hyperv-agent">
|
||||
<title>Configure Hyper-V L2 agent</title>
|
||||
<para>Before you install the OpenStack Networking Hyper-V L2 agent on a
|
||||
Hyper-V compute node, ensure the compute node has been configured
|
||||
correctly using these <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/hyper-v-virtualization-platform.html"
|
||||
>instructions</link>.</para>
|
||||
<procedure>
|
||||
<title>To install the OpenStack Networking Hyper-V agent and configure the node</title>
|
||||
<step>
|
||||
<para>Download the OpenStack Networking code from the repository:</para>
|
||||
<screen><prompt>></prompt> <userinput>cd C:\OpenStack\</userinput>
|
||||
<prompt>></prompt> <userinput>git clone https://git.openstack.org/cgit/openstack/neutron</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Install the OpenStack Networking Hyper-V Agent:</para>
|
||||
<screen><prompt>></prompt> cd C:\OpenStack\neutron\
|
||||
<prompt>></prompt> python setup.py install</screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Copy the <filename>policy.json</filename> file:</para>
|
||||
<screen><prompt>></prompt> <userinput>xcopy C:\OpenStack\neutron\etc\policy.json C:\etc\</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create the <filename>C:\etc\neutron-hyperv-agent.conf</filename> file and add the
|
||||
proper configuration options and the <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/networking-plugin-hyperv_agent.html">Hyper-V
|
||||
related options</link>. Here is a sample config file:</para>
|
||||
<programlisting><xi:include parse="text" href="../../common/samples/neutron-hyperv-agent.conf"/></programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Start the OpenStack Networking Hyper-V agent:</para>
|
||||
<screen><prompt>></prompt> <userinput>C:\Python27\Scripts\neutron-hyperv-agent.exe --config-file C:\etc\neutron-hyperv-agent.conf</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="basic_operations_on_agents">
|
||||
<title>Basic operations on agents</title>
|
||||
<para>This table shows examples of Networking commands that enable you to complete basic operations on agents:</para>
|
||||
<table rules="all">
|
||||
<caption>Basic operations on Networking agents</caption>
|
||||
<col width="40%"/>
|
||||
<col width="60%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Operation</th>
|
||||
<th>Command</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>List all available agents.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron agent-list</userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Show information of a given agent.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron agent-show <replaceable>AGENT_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Update the admin status and description for a specified agent. The command can be used to enable and disable agents by using <parameter>--admin-state-up</parameter> parameter set to <literal>False</literal> or <literal>True</literal>.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron agent-update --admin-state-up False <replaceable>AGENT_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Delete a given agent. Consider disabling the agent before deletion.</td>
|
||||
<td><screen><prompt>$</prompt> <userinput>neutron agent-delete <replaceable>AGENT_ID</replaceable></userinput></screen></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<para>See the <link xlink:href="http://docs.openstack.org/cli-reference/content/index.html"><citetitle>OpenStack Command-Line Interface Reference</citetitle></link>
|
||||
for more information on Networking commands.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,210 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_plugin-config">
|
||||
<title>Plug-in configurations</title>
|
||||
<para>For configurations options, see <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/section_networking-options-reference.html"
|
||||
>Networking configuration options</link> in <citetitle>Configuration
|
||||
Reference</citetitle>. These sections explain how to configure specific plug-ins.</para>
|
||||
<section xml:id="bigswitch_floodlight_plugin">
|
||||
<title>Configure Big Switch (Floodlight REST Proxy) plug-in</title>
|
||||
<procedure>
|
||||
<title>To use the REST proxy plug-in with OpenStack Networking</title>
|
||||
<step>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and add this
|
||||
line:</para>
|
||||
<programlisting language="ini">core_plugin = bigswitch</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>In the <filename>/etc/neutron/neutron.conf</filename>
|
||||
file, set the <literal>service_plugins</literal> option:</para>
|
||||
<programlisting>service_plugins = <replaceable>neutron.plugins.bigswitch.l3_router_plugin.L3RestProxy</replaceable></programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the <filename>/etc/neutron/plugins/bigswitch/restproxy.ini</filename>
|
||||
file for the plug-in and specify a comma-separated list of
|
||||
<systemitem>controller_ip:port</systemitem> pairs:</para>
|
||||
<programlisting language="ini">server = <replaceable>CONTROLLER_IP</replaceable>:<replaceable>PORT</replaceable></programlisting>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/kilo/install-guide/install/apt/content/neutron-controller-node.html"
|
||||
>Install Networking Services</link> in the <citetitle>Installation
|
||||
Guide</citetitle> in the <link xlink:href="http://docs.openstack.org"
|
||||
>OpenStack Documentation index</link>. (The link defaults to the Ubuntu
|
||||
version.)</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart <systemitem class="service">neutron-server</systemitem> to apply the
|
||||
settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="brocade_plugin">
|
||||
<title>Configure Brocade plug-in</title>
|
||||
<procedure>
|
||||
<title>To use the Brocade plug-in with OpenStack Networking</title>
|
||||
<step>
|
||||
<para>Install the Brocade-modified Python netconf client (ncclient) library, which
|
||||
is available at <link xlink:href="https://github.com/brocade/ncclient"
|
||||
>https://github.com/brocade/ncclient</link>:</para>
|
||||
<screen><prompt>$</prompt> <userinput>git clone https://github.com/brocade/ncclient</userinput></screen>
|
||||
<para>As <systemitem class="username">root</systemitem>, run this command:</para>
|
||||
<screen><prompt>#</prompt> <userinput>cd ncclient;python setup.py install</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and set the
|
||||
following option:</para>
|
||||
<programlisting language="ini">core_plugin = brocade</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the <filename>/etc/neutron/plugins/brocade/brocade.ini</filename> file
|
||||
for the Brocade plug-in and specify the admin user name, password, and IP
|
||||
address of the Brocade switch:</para>
|
||||
<programlisting language="ini">[SWITCH]
|
||||
username = <replaceable>ADMIN</replaceable>
|
||||
password = <replaceable>PASSWORD</replaceable>
|
||||
address = <replaceable>SWITCH_MGMT_IP_ADDRESS</replaceable>
|
||||
ostype = NOS</programlisting>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/kilo/install-guide/install/apt/content/neutron-controller-node.html"
|
||||
>Install Networking Services</link> in any of the <citetitle>Installation
|
||||
Guides</citetitle> in the <link xlink:href="http://docs.openstack.org"
|
||||
>OpenStack Documentation index</link>. (The link defaults to the Ubuntu
|
||||
version.)</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem class="service">neutron-server</systemitem> service to
|
||||
apply the settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
<section xml:id="nsx_plugin">
|
||||
<title>Configure NSX-mh plug-in</title>
|
||||
<procedure>
|
||||
<title>Configuring OpenStack Networking to use the NSX multi hypervisor plug-in</title>
|
||||
<para>The instructions in this section refer to the VMware NSX-mh platform, formerly known as Nicira NVP.</para>
|
||||
<step>
|
||||
<para>Install the NSX plug-in:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install neutron-plugin-vmware</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and set this
|
||||
line:</para>
|
||||
<programlisting language="ini">core_plugin = vmware</programlisting>
|
||||
<para>Example <filename>neutron.conf</filename> file for NSX-mh integration:</para>
|
||||
<programlisting language="ini">core_plugin = vmware
|
||||
rabbit_host = 192.168.203.10
|
||||
allow_overlapping_ips = True</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>To configure the NSX-mh controller cluster for OpenStack Networking, locate the
|
||||
<literal>[default]</literal> section in the
|
||||
<filename>/etc/neutron/plugins/vmware/nsx.ini</filename> file and add the
|
||||
following entries:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>To establish and configure the connection with the controller cluster
|
||||
you must set some parameters, including NSX-mh API endpoints, access
|
||||
credentials, and optionally specify settings for HTTP timeouts, redirects
|
||||
and retries in case of connection failures:</para>
|
||||
<programlisting language="ini">nsx_user = <replaceable>ADMIN_USER_NAME</replaceable>
|
||||
nsx_password = <replaceable>NSX_USER_PASSWORD</replaceable>
|
||||
http_timeout = <replaceable>HTTP_REQUEST_TIMEOUT</replaceable> # (seconds) default 75 seconds
|
||||
retries = <replaceable>HTTP_REQUEST_RETRIES</replaceable> # default 2
|
||||
redirects = <replaceable>HTTP_REQUEST_MAX_REDIRECTS</replaceable> # default 2
|
||||
nsx_controllers = <replaceable>API_ENDPOINT_LIST</replaceable> # comma-separated list</programlisting>
|
||||
<para>To ensure correct operations, the <literal>nsx_user</literal> user
|
||||
must have administrator credentials on the NSX-mh platform.</para>
|
||||
<para>A controller API endpoint consists of the IP address and port for the
|
||||
controller; if you omit the port, port 443 is used. If multiple API
|
||||
endpoints are specified, it is up to the user to ensure that all these
|
||||
endpoints belong to the same controller cluster. The OpenStack
|
||||
Networking VMware NSX-mh plug-in does not perform this check, and results
|
||||
might be unpredictable.</para>
|
||||
<para>When you specify multiple API endpoints, the plug-in takes care of load balancing
|
||||
requests on the various API endpoints.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The UUID of the NSX-mh transport zone that should be used by default when
|
||||
a tenant creates a network. You can get this value from the
|
||||
<guilabel>Transport Zones</guilabel> page for the NSX-mh manager:</para>
|
||||
<para>Alternatively the transport zone identifier can be retrieved by query the NSX-mh
|
||||
API: <literal>/ws.v1/transport-zone</literal></para>
|
||||
<programlisting language="ini">default_tz_uuid = <replaceable>TRANSPORT_ZONE_UUID</replaceable></programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<programlisting language="ini">default_l3_gw_service_uuid = <replaceable>GATEWAY_SERVICE_UUID</replaceable></programlisting>
|
||||
<warning>
|
||||
<para>Ubuntu packaging currently does not update the neutron init script
|
||||
to point to the NSX-mh configuration file. Instead, you must manually
|
||||
update <filename>/etc/default/neutron-server</filename> to add this
|
||||
line:</para>
|
||||
<programlisting language="ini">NEUTRON_PLUGIN_CONFIG = /etc/neutron/plugins/vmware/nsx.ini</programlisting>
|
||||
</warning>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/kilo/install-guide/install/apt/content/neutron-controller-node.html"
|
||||
>Install Networking Services</link> in the <citetitle>Installation
|
||||
Guide</citetitle>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart <systemitem class="service">neutron-server</systemitem> to apply
|
||||
settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
<warning>
|
||||
<para>The neutron NSX-mh plug-in does not implement initial re-synchronization of Neutron resources.
|
||||
Therefore resources that might already exist in the database when Neutron is switched to the
|
||||
NSX-mh plug-in will not be created on the NSX-mh backend upon restart.</para>
|
||||
</warning>
|
||||
</step>
|
||||
</procedure>
|
||||
<para>Example <filename>nsx.ini</filename> file:</para>
|
||||
<programlisting language="ini">[DEFAULT]
|
||||
default_tz_uuid = d3afb164-b263-4aaa-a3e4-48e0e09bb33c
|
||||
default_l3_gw_service_uuid=5c8622cc-240a-40a1-9693-e6a5fca4e3cf
|
||||
nsx_user=admin
|
||||
nsx_password=changeme
|
||||
nsx_controllers=10.127.0.100,10.127.0.200:8888</programlisting>
|
||||
<note>
|
||||
<para>To debug <filename>nsx.ini</filename> configuration issues, run this command from
|
||||
the host that runs <systemitem class="service">neutron-server</systemitem>:</para>
|
||||
<screen><prompt>#</prompt> <userinput>neutron-check-nsx-config <replaceable>PATH_TO_NSX.INI</replaceable></userinput></screen>
|
||||
<para>This command tests whether <systemitem class="service">neutron-server</systemitem>
|
||||
can log into all of the NSX-mh controllers and the SQL server, and whether all UUID
|
||||
values are correct.</para>
|
||||
</note>
|
||||
</section>
|
||||
<section xml:id="PLUMgridplugin">
|
||||
<title>Configure PLUMgrid plug-in</title>
|
||||
<procedure>
|
||||
<title>To use the PLUMgrid plug-in with OpenStack Networking</title>
|
||||
<step>
|
||||
<para>Edit the <filename>/etc/neutron/neutron.conf</filename> file and set this
|
||||
line:</para>
|
||||
<programlisting language="ini">core_plugin = plumgrid</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Edit the <systemitem>[PLUMgridDirector]</systemitem> section in the
|
||||
<filename>/etc/neutron/plugins/plumgrid/plumgrid.ini</filename> file and
|
||||
specify the IP address, port, admin user name, and password of the PLUMgrid
|
||||
Director:</para>
|
||||
<programlisting language="ini">[PLUMgridDirector]
|
||||
director_server = "PLUMgrid-director-ip-address"
|
||||
director_server_port = "PLUMgrid-director-port"
|
||||
username = "PLUMgrid-director-admin-username"
|
||||
password = "PLUMgrid-director-admin-password"</programlisting>
|
||||
<para>For database configuration, see <link
|
||||
xlink:href="http://docs.openstack.org/kilo/install-guide/install/apt/content/neutron-controller-node.html"
|
||||
>Install Networking Services</link> in the <citetitle>Installation
|
||||
Guide</citetitle>.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem class="service">neutron-server</systemitem> service to
|
||||
apply the settings:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</step>
|
||||
</procedure>
|
||||
</section>
|
||||
</section>
|
@ -1,405 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_networking-intro">
|
||||
<title>Introduction to Networking</title>
|
||||
<para>The Networking service, code-named neutron, provides an API
|
||||
that lets you define network connectivity and addressing in
|
||||
the cloud. The Networking service enables operators to
|
||||
leverage different networking technologies to power their
|
||||
cloud networking. The Networking service also provides an API
|
||||
to configure and manage a variety of network services ranging
|
||||
from L3 forwarding and NAT to load balancing, edge firewalls,
|
||||
and IPsec VPN.</para>
|
||||
<para>For a detailed description of the Networking API
|
||||
abstractions and their attributes, see the <link
|
||||
xlink:href="http://developer.openstack.org/api-ref-networking-v2.html"
|
||||
><citetitle>OpenStack Networking API v2.0
|
||||
Reference</citetitle></link>.</para>
|
||||
<section xml:id="section_networking-api">
|
||||
<title>Networking API</title>
|
||||
<para>Networking is a virtual network service that provides a
|
||||
powerful API to define the network connectivity and IP
|
||||
addressing that devices from other services, such as
|
||||
Compute, use.</para>
|
||||
<para>The Compute API has a virtual server abstraction to
|
||||
describe computing resources. Similarly, the Networking
|
||||
API has virtual network, subnet, and port abstractions to
|
||||
describe networking resources.</para>
|
||||
<table rules="all">
|
||||
<caption>Networking resources</caption>
|
||||
<col width="10%"/>
|
||||
<col width="90%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Resource</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><emphasis role="bold">Network</emphasis></td>
|
||||
<td>An isolated L2 segment, analogous to VLAN in
|
||||
the physical networking world.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold">Subnet</emphasis></td>
|
||||
<td>A block of v4 or v6 IP addresses and
|
||||
associated configuration state.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold">Port</emphasis></td>
|
||||
<td>A connection point for attaching a single
|
||||
device, such as the NIC of a virtual server,
|
||||
to a virtual network. Also describes the
|
||||
associated network configuration, such as the
|
||||
MAC and IP addresses to be used on that
|
||||
port.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<para>To configure rich network topologies, you can create and
|
||||
configure networks and subnets and instruct other
|
||||
OpenStack services like Compute to attach virtual devices
|
||||
to ports on these networks.</para>
|
||||
<para>In particular, Networking supports each tenant having
|
||||
multiple private networks and enables tenants to choose
|
||||
their own IP addressing scheme, even if those IP addresses
|
||||
overlap with those that other tenants use.</para>
|
||||
<para>The Networking service:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Enables advanced cloud networking use cases,
|
||||
such as building multi-tiered web applications and
|
||||
enabling migration of applications to the cloud
|
||||
without changing IP addresses.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Offers flexibility for the cloud administrator
|
||||
to customize network offerings.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Enables developers to extend the Networking API.
|
||||
Over time, the extended functionality becomes part
|
||||
of the core Networking API.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
<section xml:id="section_networking-api-ssl">
|
||||
<title>Configure SSL support for networking API</title>
|
||||
<para>OpenStack Networking supports SSL for the Networking API
|
||||
server. By default, SSL is disabled but you can enable it
|
||||
in the <filename>neutron.conf</filename> file.</para>
|
||||
<para>Set these options to configure SSL:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term><code>use_ssl = True</code></term>
|
||||
<listitem>
|
||||
<para>Enables SSL on the networking API
|
||||
server.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>ssl_cert_file =
|
||||
<replaceable>PATH_TO_CERTFILE</replaceable></code></term>
|
||||
<listitem>
|
||||
<para>Certificate file that is used when you
|
||||
securely start the Networking API
|
||||
server.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>ssl_key_file =
|
||||
<replaceable>PATH_TO_KEYFILE</replaceable></code></term>
|
||||
<listitem>
|
||||
<para>Private key file that is used when you
|
||||
securely start the Networking API
|
||||
server.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>ssl_ca_file =
|
||||
<replaceable>PATH_TO_CAFILE</replaceable></code></term>
|
||||
<listitem>
|
||||
<para>Optional. CA certificate file that is used
|
||||
when you securely start the Networking API
|
||||
server. This file verifies connecting clients.
|
||||
Set this option when API clients must
|
||||
authenticate to the API server by using SSL
|
||||
certificates that are signed by a trusted
|
||||
CA.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>tcp_keepidle = 600</code></term>
|
||||
<listitem>
|
||||
<para>The value of TCP_KEEPIDLE, in seconds, for
|
||||
each server socket when starting the API
|
||||
server. Not supported on OS X.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>retry_until_window = 30</code></term>
|
||||
<listitem>
|
||||
<para>Number of seconds to keep retrying to
|
||||
listen.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term><code>backlog = 4096</code></term>
|
||||
<listitem>
|
||||
<para>Number of backlog requests with which to
|
||||
configure the socket.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</section>
|
||||
<section xml:id="section_lbaas-overview">
|
||||
<title>Load-Balancer-as-a-Service (LBaaS) overview</title>
|
||||
<para><glossterm>Load-Balancer-as-a-Service
|
||||
(LBaaS)</glossterm> enables Networking to distribute
|
||||
incoming requests evenly among designated instances. This
|
||||
distribution ensures that the workload is shared
|
||||
predictably among instances and enables more effective use
|
||||
of system resources. Use one of these load balancing
|
||||
methods to distribute incoming requests:</para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>Round robin</term>
|
||||
<listitem>
|
||||
<para>Rotates requests evenly between multiple
|
||||
instances.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Source IP</term>
|
||||
<listitem>
|
||||
<para>Requests from a unique source IP address are
|
||||
consistently directed to the same
|
||||
instance.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>Least connections</term>
|
||||
<listitem>
|
||||
<para>Allocates requests to the instance with the
|
||||
least number of active connections.</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
<table rules="all">
|
||||
<caption>LBaaS features</caption>
|
||||
<col width="25%"/>
|
||||
<col width="75%"/>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Feature</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
<emphasis role="bold">Monitors</emphasis></td>
|
||||
<td>LBaaS provides availability monitoring with
|
||||
the <command>ping</command>, TCP, HTTP and
|
||||
HTTPS GET methods. <glossterm
|
||||
baseform="Monitor
|
||||
(LBaaS)"
|
||||
>Monitors</glossterm> are implemented to
|
||||
determine whether pool members are available
|
||||
to handle requests.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold"
|
||||
>Management</emphasis></td>
|
||||
<td>LBaaS is managed using a variety of tool sets.
|
||||
The <systemitem>REST API</systemitem> is
|
||||
available for programmatic administration and
|
||||
scripting. Users perform administrative
|
||||
management of load balancers through either
|
||||
the CLI (<command>neutron</command>) or the
|
||||
OpenStack dashboard.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold">Connection
|
||||
limits</emphasis></td>
|
||||
<td>Ingress traffic can be shaped with <emphasis>
|
||||
connection limits</emphasis>. This feature
|
||||
allows workload control, and can also assist
|
||||
with mitigating DoS (Denial of Service)
|
||||
attacks.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><emphasis role="bold">Session
|
||||
persistence</emphasis></td>
|
||||
<td>
|
||||
<para>LBaaS supports session persistence by
|
||||
ensuring incoming requests are routed to
|
||||
the same instance within a pool of
|
||||
multiple instances. LBaaS supports routing
|
||||
decisions based on cookies and source IP
|
||||
address.</para></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
<section xml:id="install_neutron-fwaas-agent">
|
||||
<title>Firewall-as-a-Service (FWaaS) overview</title>
|
||||
<para>The <glossterm>Firewall-as-a-Service (FWaaS)</glossterm>
|
||||
plug-in adds perimeter firewall management to Networking.
|
||||
FWaaS uses iptables to apply firewall policy to all
|
||||
Networking routers within a project. FWaaS supports one
|
||||
firewall policy and logical firewall instance per
|
||||
project.</para>
|
||||
<para>Whereas security groups operate at the instance-level,
|
||||
FWaaS operates at the perimeter to filter traffic at the
|
||||
neutron router.</para>
|
||||
<note>
|
||||
<para>FWaaS is currently in technical preview; untested
|
||||
operation is not recommended.</para>
|
||||
</note>
|
||||
<para>The example diagram illustrates the flow of ingress and
|
||||
egress traffic for the VM2 instance:</para>
|
||||
<figure>
|
||||
<title>FWaaS architecture</title>
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata
|
||||
fileref="../../common/figures/fwaas.png"
|
||||
format="PNG" contentwidth="7in"/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
</figure>
|
||||
<procedure>
|
||||
<title>To enable FWaaS</title>
|
||||
<para>FWaaS management options are also available in the
|
||||
OpenStack dashboard.</para>
|
||||
<step>
|
||||
<para>Enable the FWaaS plug-in in the
|
||||
<filename>/etc/neutron/neutron.conf</filename> file:</para>
|
||||
<programlisting language="ini">service_plugins = firewall
|
||||
[service_providers]
|
||||
...
|
||||
service_provider = FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver:default
|
||||
|
||||
[fwaas]
|
||||
driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
|
||||
enabled = True</programlisting>
|
||||
<note>
|
||||
<para>On Ubuntu, modify the <literal>[fwaas]</literal> section
|
||||
in the <filename>/etc/neutron/fwaas_driver.ini</filename>
|
||||
file instead of <filename>/etc/neutron/neutron.conf</filename>.</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create the required tables in the database:</para>
|
||||
<screen><prompt>#</prompt> <userinput>neutron-db-manage --service fwaas upgrade head</userinput></screen>
|
||||
</step>
|
||||
<step>
|
||||
<para>Enable the option in the
|
||||
<filename>/usr/share/openstack-dashboard/openstack_dashboard/local/local_settings.py</filename>
|
||||
file, which is typically located on the controller
|
||||
node:</para>
|
||||
<programlisting language="ini">OPENSTACK_NEUTRON_NETWORK = {
|
||||
...
|
||||
'enable_firewall' = True,
|
||||
...}</programlisting>
|
||||
</step>
|
||||
<step>
|
||||
<para>Restart the <systemitem class="service">neutron-l3-agent</systemitem> and
|
||||
<systemitem class="service">neutron-server</systemitem> services
|
||||
to apply the settings.</para>
|
||||
</step>
|
||||
</procedure>
|
||||
<procedure>
|
||||
<title>To configure Firewall-as-a-Service</title>
|
||||
<para>Create the firewall rules and create a policy that
|
||||
contains them. Then, create a firewall that applies
|
||||
the policy.</para>
|
||||
<step>
|
||||
<para>Create a firewall rule:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-rule-create --protocol {tcp|udp|icmp|any} --destination-port <replaceable>PORT_RANGE</replaceable> --action {allow|deny}</userinput></screen>
|
||||
<para>The Networking client requires a protocol value;
|
||||
if the rule is protocol agnostic, you can use the
|
||||
<literal>any</literal> value.</para>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a firewall policy:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-policy-create --firewall-rules "<replaceable>FIREWALL_RULE_IDS_OR_NAMES</replaceable>" myfirewallpolicy</userinput></screen>
|
||||
<para>Separate firewall rule IDs or names with spaces. The order in which you
|
||||
specify the rules is important.</para>
|
||||
<para>You can create a firewall policy without any
|
||||
rules and add rules later, as follows:<itemizedlist>
|
||||
<listitem>
|
||||
<para>To add multiple rules, use the
|
||||
update operation.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>To add a single rule, use the
|
||||
insert-rule operation.</para>
|
||||
</listitem>
|
||||
</itemizedlist> For more details, see <link
|
||||
xlink:href="http://docs.openstack.org/cli-reference/content/neutronclient_commands.html#neutronclient_subcommand_firewall-policy-create"
|
||||
>Networking command-line client</link> in the
|
||||
<citetitle>OpenStack Command-Line Interface
|
||||
Reference</citetitle>.</para>
|
||||
<note>
|
||||
<para>FWaaS always adds a default <option>deny
|
||||
all</option> rule at the lowest precedence
|
||||
of each policy. Consequently, a firewall
|
||||
policy with no rules blocks all traffic by
|
||||
default.</para>
|
||||
</note>
|
||||
</step>
|
||||
<step>
|
||||
<para>Create a firewall:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron firewall-create <replaceable>FIREWALL_POLICY_UUID</replaceable></userinput></screen>
|
||||
<note>
|
||||
<para>The firewall remains in
|
||||
<guilabel>PENDING_CREATE</guilabel> state
|
||||
until you create a Networking router and
|
||||
attach an interface to it.</para>
|
||||
</note>
|
||||
</step>
|
||||
</procedure>
|
||||
<formalpara>
|
||||
<title>Allowed-address-pairs</title>
|
||||
<para><option>Allowed-address-pairs</option> enable you to
|
||||
specify mac_address/ip_address(cidr) pairs that pass
|
||||
through a port regardless of subnet. This enables the
|
||||
use of protocols such as VRRP, which floats an IP
|
||||
address between two instances to enable fast data
|
||||
plane failover.</para>
|
||||
</formalpara>
|
||||
<note>
|
||||
<para>Currently, only the ML2, Open vSwitch, and VMware
|
||||
NSX plug-ins support the allowed-address-pairs
|
||||
extension.</para>
|
||||
</note>
|
||||
<formalpara>
|
||||
<title>Basic allowed-address-pairs operations</title>
|
||||
<para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Create a port with a specified allowed
|
||||
address pairs:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron port-create net1 --allowed-address-pairs type=dict list=true mac_address=<replaceable>MAC_ADDRESS</replaceable>,ip_address=<replaceable>IP_CIDR</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Update a port by adding allowed address
|
||||
pairs:</para>
|
||||
<screen><prompt>$</prompt> <userinput>neutron port-update <replaceable>PORT_UUID</replaceable> --allowed-address-pairs type=dict list=true mac_address=<replaceable>MAC_ADDRESS</replaceable>,ip_address=<replaceable>IP_CIDR</replaceable></userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist></para>
|
||||
</formalpara>
|
||||
<note>
|
||||
<para>In releases earlier than Juno, OpenStack Networking
|
||||
prevents setting an allowed address pair on a port
|
||||
that matches the MAC address and one of the fixed IP
|
||||
addresses of the port.</para>
|
||||
</note>
|
||||
</section>
|
||||
</section>
|
@ -1,153 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_orchestration-auth-model">
|
||||
<title>Orchestration authorization model</title>
|
||||
<para>Orchestration authorization model defines the process of
|
||||
authorization that orchestration module uses to authorize requests during
|
||||
so called deferred operations. The typical example of such operation is
|
||||
autoscaling group update when heat requests another components
|
||||
(nova, neutron or others) to extend (reduce) capacity of autoscaling
|
||||
group.</para>
|
||||
<para>At the current moment, Orchestration provides two kinds of
|
||||
authorization models:</para>
|
||||
<para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Password authorization.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Authorization with OpenStack Identity trusts.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<section xml:id="section_orchestration-password-authorization">
|
||||
<title>Password authorization</title>
|
||||
<para>Password authorization is the initial authorization model that was
|
||||
supported by Orchestration module. This kind of authorization requires
|
||||
from a user to pass a password to Orchestration. Orchestration
|
||||
stores the encrypted password in database and uses it for deferred
|
||||
operations.</para>
|
||||
<para>The following steps are executed for password authorization:</para>
|
||||
<para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>User requests stack creation, providing a token and
|
||||
username/password (python-heatclient or OpenStack dashboard
|
||||
normally requests the token for you).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>If the stack contains any resources marked as requiring
|
||||
deferred operations orchestration engine will fail validation
|
||||
checks if no username/password is provided.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The username/password are encrypted and stored in the
|
||||
orchestration DB.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Stack creation is completed.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>At some later stage Orchestration retrieves the
|
||||
credentials and requests another token on behalf of the user,
|
||||
the token is not limited in scope and provides access to all
|
||||
roles of the stack owner.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="section_orchestration-keystone-trusts-authorization">
|
||||
<title>Keystone trusts authorization</title>
|
||||
<para>OpenStack Identity trusts is the new authorization method available
|
||||
since Icehouse release.</para>
|
||||
<para>Trusts are an OpenStack Identity extension, which provide a method to
|
||||
enable delegation, and optionally impersonation via OpenStack
|
||||
Identity. The key terminology is <emphasis>trustor</emphasis>
|
||||
(the user delegating) and <emphasis>trustee</emphasis>
|
||||
(the user being delegated to).</para>
|
||||
<para>To create a trust, the <emphasis>trustor</emphasis>(in this case
|
||||
the user creating the stack in Orchestration module) provides
|
||||
OpenStack Identity with the following information:</para>
|
||||
<para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>The ID of the <emphasis>trustee</emphasis>(who you want to
|
||||
delegate to, in this case the Orchestration service user).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The roles to be delegated(configurable via the
|
||||
<filename>heat.conf</filename>, but it needs to contain whatever
|
||||
roles are required to perform the deferred operations on the
|
||||
users behalf, e.g launching a OpenStack Compute instance in
|
||||
response to an AutoScaling event).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Whether to enable impersonation.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>OpenStack Identity then provides a trust_id, which can be consumed by
|
||||
the trustee (and <emphasis>only</emphasis> the trustee) to obtain a
|
||||
<emphasis>trust scoped token</emphasis>. This token is limited in
|
||||
scope such that the trustee has limited access to those roles
|
||||
delegated, along with effective impersonation of the trustor user, if
|
||||
it was selected when creating the trust. More information is available
|
||||
in Identity management section.</para>
|
||||
<para>The following steps are executed for trusts authorization:</para>
|
||||
<para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>User creates a stack via an API request (only the token is
|
||||
required).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Orchestration uses the token to create a trust between
|
||||
the stack owner (trustor) and the heat service user (trustee),
|
||||
delegating a special role (or roles) as defined in the
|
||||
<emphasis>trusts_delegated_roles</emphasis> list in the
|
||||
heat configuration file. By default heat sets all roles from
|
||||
trustor available for trustee. Deployers may modify this list to
|
||||
reflect local RBAC policy, e.g to ensure the heat process can
|
||||
only access those services expected while impersonating
|
||||
a stack owner.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Orchestration stores the encrypted
|
||||
<emphasis>trust id</emphasis> in the Orchestration DB.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When a deferred operation is required, Orchestration
|
||||
retrieves the <emphasis>trust id</emphasis>, and requests a
|
||||
trust scoped token which enables the service user to impersonate
|
||||
the stack owner for the duration of the deferred operation, e.g
|
||||
to launch some OpenStack Compute instances on behalf of
|
||||
the stack owner in response to an AutoScaling event.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="section_orchestration-authorization-model-configuration">
|
||||
<title>Authorization model configuration</title>
|
||||
<para>Password authorization model had been the default authorization
|
||||
model enabled for Orchestration module before Kilo release. Since Kilo
|
||||
release trusts authorization model has been enabled by default.</para>
|
||||
<para>To enable password authorization model the following change should
|
||||
be made in <filename>heat.conf</filename>:</para>
|
||||
<programlisting language="ini">deferred_auth_method=password</programlisting>
|
||||
<para>To enable trusts authorization model the following change should be
|
||||
made in <filename>heat.conf</filename>:</para>
|
||||
<programlisting language="ini">deferred_auth_method=trusts</programlisting>
|
||||
<para>To specify trustor roles that will be delegated to trustee during
|
||||
authorization <literal>trusts_delegated_roles</literal> parameter
|
||||
should be specified in <filename>heat.conf</filename>. If
|
||||
<literal>trusts_delegated_roles</literal> is not defined then all
|
||||
trustor roles will be delegated to trustee. Please pay attention that
|
||||
trust delegated roles should be pre-configured in OpenStack Identity
|
||||
before using it in Orchestration module.</para>
|
||||
</section>
|
||||
</section>
|
||||
|
@ -1,155 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="section_orchestration-stack-domain-users">
|
||||
<title>Stack domain users</title>
|
||||
<para>Orchestration stack domain users allows heat to authorize inside
|
||||
VMs booted and execute the following operations:</para>
|
||||
<para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Provide metadata to agents inside instances, which poll for
|
||||
changes and apply the configuration expressed in the metadata to
|
||||
the instance.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Detect signal completion of some action, typically configuration
|
||||
of software on a VM after it is booted (because OpenStack Compute
|
||||
moves the state of a VM to "Active" as soon as it spawns it, not
|
||||
when orchestration has fully configured it).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Provide application level status or meters from inside the
|
||||
instance. For example, allow AutoScaling actions to be performed in
|
||||
response to some measure of performance or quality of service.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>Orchestration provides API's which enable all of these things,
|
||||
but all of those API's require some sort of authentication. For example,
|
||||
credentials to access the instance agent is running on. The heat-cfntools
|
||||
agents use signed requests, which requires an ec2 keypair created via
|
||||
OpenStack Identity, which is then used to sign requests to
|
||||
the Orchestration cloudformation and cloudwatch compatible API's,
|
||||
which are authenticated by Orchestration via signature validation
|
||||
(which uses the OpenStack Identity ec2tokens extension). Stack domain
|
||||
users allow to encapsulate all stack-defined users (users created
|
||||
as a result of things contained in a Orchestration template) in
|
||||
a separate domain, which is created specifically to contain things
|
||||
related only to Orchestration stacks. A user is created which is
|
||||
the <emphasis>domain admin</emphasis>, and Orchestration uses that user
|
||||
to manage the lifecycle of the users in the
|
||||
<emphasis>stack user domain</emphasis>.</para>
|
||||
<section xml:id="section_orchestration_stack-domain-users-configuration">
|
||||
<title>Stack domain users configuration</title>
|
||||
<para>To configure stack domain users the following steps shall be
|
||||
executed:</para>
|
||||
<para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>A special OpenStack Identity service domain is created.
|
||||
For example, the one called <literal>heat</literal> and
|
||||
the ID is set in the <literal>stack_user_domain</literal> option
|
||||
in <filename>heat.conf</filename>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A user with sufficient permissions to create and delete
|
||||
projects and users in the <literal>heat</literal> domain is
|
||||
created.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The username and password for the domain admin user is set in
|
||||
<filename>heat.conf</filename>
|
||||
(<literal>stack_domain_admin</literal> and
|
||||
<literal>stack_domain_admin_password</literal>). This user
|
||||
administers <emphasis>stack domain users</emphasis> on behalf
|
||||
of stack owners, so they no longer need to be admins themselves,
|
||||
and the risk of this escalation path is limited because the
|
||||
<option>heat_domain_admin</option> is only given administrative
|
||||
permission for the <literal>heat</literal> domain.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
<para>You must complete the following steps to setup stack domain users:</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>Create the domain:</para>
|
||||
<para><literal>$OS_TOKEN</literal> refers to a token. For example,
|
||||
the service admin token or some other valid token for a user
|
||||
with sufficient roles to create users and domains.
|
||||
<literal>$KS_ENDPOINT_V3</literal> refers to the v3
|
||||
OpenStack Identity endpoint (for example
|
||||
<literal>http://<replaceable>keystone_address:5000/v3</replaceable></literal>
|
||||
where <emphasis>keystone_address</emphasis> is the IP address or
|
||||
resolvable name for the OpenStack Identity service).</para>
|
||||
<screen><prompt>$</prompt> <userinput>openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 --os-identity-api-version=3 domain create heat --description "Owns users and projects created by heat"</userinput></screen>
|
||||
<para>The domain ID is returned by this command, and is referred
|
||||
to as <literal>$HEAT_DOMAIN_ID below</literal>.</para>
|
||||
<para>The domain ID is returned by this command, and is referred
|
||||
to as <literal>$HEAT_DOMAIN_ID</literal> below.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Create the user:</para>
|
||||
<screen><prompt>$</prompt> <userinput>openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 --os-identity-api-version=3 user create --password $PASSWORD --domain $HEAT_DOMAIN_ID heat_domain_admin --description "Manages users and projects created by heat" </userinput></screen>
|
||||
<para>The user ID is returned by this command and is referred
|
||||
to as <literal>$DOMAIN_ADMIN_ID</literal> below.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Make the user a domain admin:</para>
|
||||
<screen><prompt>$</prompt> <userinput>openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 --os-identity-api-version=3 role add --user $DOMAIN_ADMIN_ID --domain $HEAT_DOMAIN_ID admin</userinput></screen>
|
||||
<para>Then you need to add the domain ID, username and password
|
||||
from these steps to <filename>heat.conf</filename>:
|
||||
<programlisting language="ini">stack_domain_admin_password = <replaceable>password</replaceable>
|
||||
stack_domain_admin = heat_domain_admin
|
||||
stack_user_domain = <replaceable>domain id returned from domain create above</replaceable></programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</section>
|
||||
<section xml:id="section_orchestration_usage_workflow">
|
||||
<title>Usage workflow</title>
|
||||
<para>The following steps will be executed during stack creation:</para>
|
||||
<para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>Orchestration creates a new "stack domain project"
|
||||
in the "heat" domain, if the stack contains any resources
|
||||
which require creation of a "stack domain user".</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Any resources which require a user, Orchestration
|
||||
creates the user in the "stack domain project", which is
|
||||
associated with the heat stack in the heat database, but
|
||||
is completely separate and unrelated (from an
|
||||
authentication perspective) to the stack owners project
|
||||
(the users created in the stack domain are still assigned
|
||||
the <literal>heat_stack_user</literal> role, so the API
|
||||
surface they can access is limited via policy.json.
|
||||
See OpenStack Identity documentation for more info).</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>When API requests are processed, Heat Orchestration does
|
||||
an internal lookup, and allow stack details for a given
|
||||
stack to be retrieved from the database for both the
|
||||
stack owner's project (the default API path to the stack),
|
||||
and also the <emphasis>stack domain project</emphasis>,
|
||||
subject to the policy.json restrictions.</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
<para>To clarify that last point, that means there are now two paths
|
||||
which can result in retrieval of the same data via the
|
||||
Orchestration API. The example for resource-metadata is below:</para>
|
||||
<programlisting language="ini">GET v1/{stack_owner_project_id}/stacks/{stack_name}/{stack_id}/resources/{resource_name}/metadata</programlisting>
|
||||
<para>or:</para>
|
||||
<programlisting language="ini">GET v1/{stack_domain_project_id}/stacks/{stack_name}/{stack_id}/resources/{resource_name}/metadata</programlisting>
|
||||
<para>The stack owner would use the former
|
||||
(via <literal>heat resource-metadata {stack_name}
|
||||
{resource_name}</literal>), and any agents in the instance will
|
||||
use the latter.</para>
|
||||
</section>
|
||||
</section>
|
@ -1,79 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<parent>
|
||||
<groupId>org.openstack.docs</groupId>
|
||||
<artifactId>parent-pom</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>openstack-admin-guide-cloud</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>OpenStack Cloud Administrator Guide</name>
|
||||
<properties>
|
||||
<!-- This is set by Jenkins according to the branch. -->
|
||||
<release.path.name></release.path.name>
|
||||
<comments.enabled>1</comments.enabled>
|
||||
</properties>
|
||||
<!-- ################################################ -->
|
||||
<!-- USE "mvn clean generate-sources" to run this POM -->
|
||||
<!-- ################################################ -->
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>com.rackspace.cloud.api</groupId>
|
||||
<artifactId>clouddocs-maven-plugin</artifactId>
|
||||
<!-- version set in ../pom.xml -->
|
||||
<executions>
|
||||
<execution>
|
||||
<id>generate-webhelp</id>
|
||||
<goals>
|
||||
<goal>generate-webhelp</goal>
|
||||
</goals>
|
||||
<phase>generate-sources</phase>
|
||||
<configuration>
|
||||
<!-- These parameters only apply to webhelp -->
|
||||
<enableDisqus>${comments.enabled}</enableDisqus>
|
||||
<disqusShortname>os-cloud-admin-guide</disqusShortname>
|
||||
<enableGoogleAnalytics>1</enableGoogleAnalytics>
|
||||
<googleAnalyticsId>UA-17511903-1</googleAnalyticsId>
|
||||
<generateToc>
|
||||
appendix toc,title
|
||||
article/appendix nop
|
||||
article toc,title
|
||||
book toc,title,figure,table,example,equation
|
||||
chapter toc,title
|
||||
section toc
|
||||
part toc,title
|
||||
qandadiv toc
|
||||
qandaset toc
|
||||
reference toc,title
|
||||
set toc,title
|
||||
</generateToc>
|
||||
<!-- The following elements sets the autonumbering of sections in output for chapter numbers but no numbered sections-->
|
||||
<sectionAutolabel>0</sectionAutolabel>
|
||||
<tocSectionDepth>1</tocSectionDepth>
|
||||
<sectionLabelIncludesComponentLabel>0</sectionLabelIncludesComponentLabel>
|
||||
<webhelpDirname>admin-guide-cloud</webhelpDirname>
|
||||
<pdfFilenameBase>admin-guide-cloud</pdfFilenameBase>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<!-- These parameters apply to pdf and webhelp -->
|
||||
<xincludeSupported>true</xincludeSupported>
|
||||
<sourceDirectory>.</sourceDirectory>
|
||||
<includes>
|
||||
bk-admin-guide-cloud.xml
|
||||
</includes>
|
||||
<canonicalUrlBase>http://docs.openstack.org/admin-guide-cloud/content</canonicalUrlBase>
|
||||
<glossaryCollection>${basedir}/../glossary/glossary-terms.xml</glossaryCollection>
|
||||
<branding>openstack</branding>
|
||||
<formalProcedures>0</formalProcedures>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
@ -1,31 +0,0 @@
|
||||
Roadmap for Cloud Admin Guide
|
||||
-----------------------------
|
||||
|
||||
This file is stored with the source to offer ideas for what to work on.
|
||||
Put your name next to a task if you want to work on it and put a WIP
|
||||
review up on review.openstack.org.
|
||||
|
||||
May 20, 2014
|
||||
To do tasks:
|
||||
|
||||
- Add a chapter describing monitoring; deeper dive into Telemetry (ceilomter)
|
||||
- Update networking information with the goal of starting a new
|
||||
Network Admin Guide*
|
||||
- Add more networking diagrams
|
||||
- Add audience information; who is this book intended for
|
||||
|
||||
Ongoing tasks:
|
||||
|
||||
- Ensure it meets conventions and standards
|
||||
- Continually update with latest OpenStack dashboard (horizon)
|
||||
information including great descriptions of fields and why you set a
|
||||
setting
|
||||
- Continually add Python examples to SDK chapter
|
||||
|
||||
Wishlist tasks:
|
||||
|
||||
- Replace all individual client commands (like keystone, nova) with
|
||||
openstack client commands
|
||||
|
||||
* At Pycon Australia in August, a one-day "swarm" will focus on the
|
||||
Network Admin Guide
|
@ -1,19 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ch_running-openstack-object-storage">
|
||||
<title>System administration for Object Storage</title>
|
||||
<para>By understanding Object Storage concepts, you can better
|
||||
monitor and administer your storage solution. The majority of
|
||||
the administration information is maintained in developer
|
||||
documentation at <link
|
||||
xlink:href="http://docs.openstack.org/developer/swift/"
|
||||
>docs.openstack.org/developer/swift/</link>.</para>
|
||||
<para>See the <link
|
||||
xlink:href="http://docs.openstack.org/kilo/config-reference/content/"
|
||||
><citetitle>OpenStack Configuration
|
||||
Reference</citetitle></link> for a list of
|
||||
configuration options for Object Storage.</para>
|
||||
</section>
|
@ -1,303 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
version="5.0"
|
||||
xml:id="ch_introduction-to-openstack-object-storage-monitoring">
|
||||
<!-- ... Based on a blog, should be replaced with original material... -->
|
||||
<title>Object Storage monitoring</title>
|
||||
<?dbhtml stop-chunking?>
|
||||
<para>Excerpted from a blog post by <link
|
||||
xlink:href="http://swiftstack.com/blog/2012/04/11/swift-monitoring-with-statsd"
|
||||
>Darrell Bishop</link></para>
|
||||
<para>An OpenStack Object Storage cluster is a collection of many
|
||||
daemons that work together across many nodes. With so many
|
||||
different components, you must be able to tell what is going
|
||||
on inside the cluster. Tracking server-level meters like CPU
|
||||
utilization, load, memory consumption, disk usage and
|
||||
utilization, and so on is necessary, but not
|
||||
sufficient.</para>
|
||||
<para>What are different daemons are doing on each server? What is
|
||||
the volume of object replication on node8? How long is it
|
||||
taking? Are there errors? If so, when did they happen?</para>
|
||||
<para>In such a complex ecosystem, you can use multiple approaches
|
||||
to get the answers to these questions. This section describes
|
||||
several approaches.</para>
|
||||
<section xml:id="monitoring-swiftrecon">
|
||||
<title>Swift Recon</title>
|
||||
<para>The Swift Recon middleware (see <link
|
||||
xlink:href="http://swift.openstack.org/admin_guide.html#cluster-telemetry-and-monitoring"
|
||||
>http://swift.openstack.org/admin_guide.html#cluster-telemetry-and-monitoring</link>)
|
||||
provides general machine statistics, such as load average,
|
||||
socket statistics, <code>/proc/meminfo</code> contents,
|
||||
and so on, as well as Swift-specific meters:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>The MD5 sum of each ring file.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>The most recent object replication time.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Count of each type of quarantined file: Account,
|
||||
container, or object.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Count of "async_pendings" (deferred container
|
||||
updates) on disk.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Swift Recon is middleware that is installed in the
|
||||
object servers pipeline and takes one required option: A
|
||||
local cache directory. To track
|
||||
<literal>async_pendings</literal>, you must set up an
|
||||
additional cron job for each object server. You access
|
||||
data by either sending HTTP requests directly to the
|
||||
object server or using the <command>swift-recon</command>
|
||||
command-line client.</para>
|
||||
<para>There are some good Object Storage cluster statistics
|
||||
but the general server meters overlap with existing
|
||||
server monitoring systems. To get the Swift-specific
|
||||
meters into a monitoring system, they must be polled.
|
||||
Swift Recon essentially acts as a middleware meters
|
||||
collector. The process that feeds meters to your
|
||||
statistics system, such as <literal>collectd</literal> and
|
||||
<literal>gmond</literal>, probably already runs on the
|
||||
storage node. So, you can choose to either talk to Swift
|
||||
Recon or collect the meters directly.</para>
|
||||
</section>
|
||||
<section xml:id="monitoring-swift-informant">
|
||||
<title>Swift-Informant</title>
|
||||
<para>Florian Hines developed the Swift-Informant middleware
|
||||
(see <link
|
||||
xlink:href="https://github.com/pandemicsyn/swift-informant"
|
||||
>https://github.com/pandemicsyn/swift-informant</link>)
|
||||
to get real-time visibility into Object Storage client
|
||||
requests. It sits in the pipeline for the proxy server,
|
||||
and after each request to the proxy server, sends three
|
||||
meters to a StatsD server (see <link
|
||||
xlink:href="http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/"
|
||||
>http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/</link>):</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>A counter increment for a meter like
|
||||
<code>obj.GET.200</code> or
|
||||
<code>cont.PUT.404</code>.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Timing data for a meter like
|
||||
<code>acct.GET.200</code> or
|
||||
<code>obj.GET.200</code>. [The README says the
|
||||
meters look like
|
||||
<code>duration.acct.GET.200</code>, but I do
|
||||
not see the <literal>duration</literal> in the
|
||||
code. I am not sure what the Etsy server does but
|
||||
our StatsD server turns timing meters into five
|
||||
derivative meters with new segments appended, so
|
||||
it probably works as coded. The first meter turns
|
||||
into <code>acct.GET.200.lower</code>,
|
||||
<code>acct.GET.200.upper</code>,
|
||||
<code>acct.GET.200.mean</code>,
|
||||
<code>acct.GET.200.upper_90</code>, and
|
||||
<code>acct.GET.200.count</code>].</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>A counter increase by the bytes transferred for
|
||||
a meter like
|
||||
<code>tfer.obj.PUT.201</code>.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>This is good for getting a feel for the quality of
|
||||
service clients are experiencing with the timing meters,
|
||||
as well as getting a feel for the volume of the various
|
||||
permutations of request server type, command, and response
|
||||
code. Swift-Informant also requires no change to core
|
||||
Object Storage code because it is implemented as
|
||||
middleware. However, it gives you no insight into the
|
||||
workings of the cluster past the proxy server. If the
|
||||
responsiveness of one storage node degrades, you can only
|
||||
see that some of your requests are bad, either as high
|
||||
latency or error status codes. You do not know exactly why
|
||||
or where that request tried to go. Maybe the container
|
||||
server in question was on a good node but the object
|
||||
server was on a different, poorly-performing node.</para>
|
||||
</section>
|
||||
<section xml:id="monitoring-statsdlog">
|
||||
<title>Statsdlog</title>
|
||||
<para>Florian's <link
|
||||
xlink:href="https://github.com/pandemicsyn/statsdlog"
|
||||
>Statsdlog</link> project increments StatsD counters
|
||||
based on logged events. Like Swift-Informant, it is also
|
||||
non-intrusive, but statsdlog can track events from all
|
||||
Object Storage daemons, not just proxy-server. The daemon
|
||||
listens to a UDP stream of syslog messages and StatsD
|
||||
counters are incremented when a log line matches a regular
|
||||
expression. Meter names are mapped to regex match
|
||||
patterns in a JSON file, allowing flexible configuration
|
||||
of what meters are extracted from the log stream.</para>
|
||||
<para>Currently, only the first matching regex triggers a
|
||||
StatsD counter increment, and the counter is always
|
||||
incremented by one. There is no way to increment a counter
|
||||
by more than one or send timing data to StatsD based on
|
||||
the log line content. The tool could be extended to handle
|
||||
more meters for each line and data extraction, including
|
||||
timing data. But a coupling would still exist between the
|
||||
log textual format and the log parsing regexes, which
|
||||
would themselves be more complex to support multiple
|
||||
matches for each line and data extraction. Also, log
|
||||
processing introduces a delay between the triggering event
|
||||
and sending the data to StatsD. It would be preferable to
|
||||
increment error counters where they occur and send timing
|
||||
data as soon as it is known to avoid coupling between a
|
||||
log string and a parsing regex and prevent a time delay
|
||||
between events and sending data to StatsD.</para>
|
||||
<para>The next section describes another method for gathering
|
||||
Object Storage operational meters.</para>
|
||||
</section>
|
||||
<section xml:id="monitoring-statsD">
|
||||
<title>Swift StatsD logging</title>
|
||||
<para>StatsD (see <link
|
||||
xlink:href="http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/"
|
||||
>http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/</link>)
|
||||
was designed for application code to be deeply
|
||||
instrumented; meters are sent in real-time by the code
|
||||
that just noticed or did something. The overhead of
|
||||
sending a meter is extremely low: a <code>sendto</code>
|
||||
of one UDP packet. If that overhead is still too high, the
|
||||
StatsD client library can send only a random portion of
|
||||
samples and StatsD approximates the actual number when
|
||||
flushing meters upstream.</para>
|
||||
<para>To avoid the problems inherent with middleware-based
|
||||
monitoring and after-the-fact log processing, the sending
|
||||
of StatsD meters is integrated into Object Storage
|
||||
itself. The submitted change set (see <link
|
||||
xlink:href="https://review.openstack.org/#change,6058"
|
||||
>https://review.openstack.org/#change,6058</link>)
|
||||
currently reports 124 meters across 15 Object Storage
|
||||
daemons and the tempauth middleware. Details of the
|
||||
meters tracked are in the <link
|
||||
xlink:href="http://docs.openstack.org/developer/swift/admin_guide.html"
|
||||
>Administrator's Guide</link>.</para>
|
||||
<para>The sending of meters is integrated with the logging
|
||||
framework. To enable, configure
|
||||
<code>log_statsd_host</code> in the relevant config
|
||||
file. You can also specify the port and a default sample
|
||||
rate. The specified default sample rate is used unless a
|
||||
specific call to a statsd logging method (see the list
|
||||
below) overrides it. Currently, no logging calls override
|
||||
the sample rate, but it is conceivable that some meters
|
||||
may require accuracy (sample_rate == 1) while others may
|
||||
not.</para>
|
||||
<literallayout class="monospaced">[DEFAULT]
|
||||
...
|
||||
log_statsd_host = 127.0.0.1
|
||||
log_statsd_port = 8125
|
||||
log_statsd_default_sample_rate = 1</literallayout>
|
||||
<para>Then the LogAdapter object returned by
|
||||
<code>get_logger()</code>, usually stored in
|
||||
<code>self.logger</code>, has these new
|
||||
methods:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para><code>set_statsd_prefix(self, prefix)</code>
|
||||
Sets the client library stat prefix value which
|
||||
gets prefixed to every meter. The default prefix
|
||||
is the "name" of the logger such as "object-server",
|
||||
"container-auditor", and so on. This is currently used to
|
||||
turn "proxy-server" into one of "proxy-server.Account",
|
||||
"proxy-server.Container", or "proxy-server.Object"
|
||||
as soon as the Controller object is determined and
|
||||
instantiated for the request.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><code>update_stats(self, metric, amount,
|
||||
sample_rate=1)</code> Increments the supplied
|
||||
meter by the given amount. This is used when you
|
||||
need to add or subtract more that one from a
|
||||
counter, like incrementing "suffix.hashes" by the
|
||||
number of computed hashes in the object
|
||||
replicator.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><code>increment(self, metric,
|
||||
sample_rate=1)</code> Increments the given
|
||||
counter meter by one.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><code>decrement(self, metric,
|
||||
sample_rate=1)</code> Lowers the given counter
|
||||
meter by one.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><code>timing(self, metric, timing_ms,
|
||||
sample_rate=1)</code> Record that the given
|
||||
meter took the supplied number of
|
||||
milliseconds.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para><code>timing_since(self, metric, orig_time,
|
||||
sample_rate=1)</code> Convenience method to
|
||||
record a timing meter whose value is "now" minus
|
||||
an existing timestamp.</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para>Note that these logging methods may safely be called
|
||||
anywhere you have a logger object. If StatsD logging has
|
||||
not been configured, the methods are no-ops. This avoids
|
||||
messy conditional logic each place a meter is recorded.
|
||||
These example usages show the new logging methods:</para>
|
||||
<programlisting language="bash"># swift/obj/replicator.py
|
||||
def update(self, job):
|
||||
# ...
|
||||
begin = time.time()
|
||||
try:
|
||||
hashed, local_hash = tpool.execute(tpooled_get_hashes, job['path'],
|
||||
do_listdir=(self.replication_count % 10) == 0,
|
||||
reclaim_age=self.reclaim_age)
|
||||
# See tpooled_get_hashes "Hack".
|
||||
if isinstance(hashed, BaseException):
|
||||
raise hashed
|
||||
self.suffix_hash += hashed
|
||||
self.logger.update_stats('suffix.hashes', hashed)
|
||||
# ...
|
||||
finally:
|
||||
self.partition_times.append(time.time() - begin)
|
||||
self.logger.timing_since('partition.update.timing', begin)</programlisting>
|
||||
<programlisting language="bash"># swift/container/updater.py
|
||||
def process_container(self, dbfile):
|
||||
# ...
|
||||
start_time = time.time()
|
||||
# ...
|
||||
for event in events:
|
||||
if 200 <= event.wait() < 300:
|
||||
successes += 1
|
||||
else:
|
||||
failures += 1
|
||||
if successes > failures:
|
||||
self.logger.increment('successes')
|
||||
# ...
|
||||
else:
|
||||
self.logger.increment('failures')
|
||||
# ...
|
||||
# Only track timing data for attempted updates:
|
||||
self.logger.timing_since('timing', start_time)
|
||||
else:
|
||||
self.logger.increment('no_changes')
|
||||
self.no_changes += 1</programlisting>
|
||||
<para>The development team of StatsD wanted to use the <link
|
||||
xlink:href="https://github.com/sivy/py-statsd"
|
||||
>pystatsd</link> client library (not to be confused
|
||||
with a <link
|
||||
xlink:href="https://github.com/sivy/py-statsd"
|
||||
>similar-looking project</link> also hosted on
|
||||
GitHub), but the released version on PyPI was missing two
|
||||
desired features the latest version in GitHub had: the
|
||||
ability to configure a meters prefix in the client object
|
||||
and a convenience method for sending timing data between
|
||||
"now" and a "start" timestamp you already have. So they
|
||||
just implemented a simple StatsD client library from
|
||||
scratch with the same interface. This has the nice fringe
|
||||
benefit of not introducing another external library
|
||||
dependency into Object Storage.</para>
|
||||
</section>
|
||||
</section>
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user