From bc7a9f0da7ca0b218cf54f5a8f3fc14a3f9f9391 Mon Sep 17 00:00:00 2001 From: Diane Fleming Date: Mon, 18 Nov 2013 10:26:49 -0600 Subject: [PATCH] Editorial updates to common files, including sentence-style headings and consistency/clarity edits Partial-Bug: #1250515 backport: havana Change-Id: I9675dffd130c8aa6343143d9806adb4e0b74a55d author: diane fleming --- doc/common/app_support.xml | 36 +- doc/common/section_cli_install.xml | 2 +- doc/common/section_cli_openrc.xml | 3 +- doc/common/section_compute-configure-ec2.xml | 32 +- .../section_compute-configure-quotas.xml | 29 +- .../section_compute-configure-spice.xml | 39 +- doc/common/section_compute-configure-vnc.xml | 289 ++++++------- doc/common/section_compute-options.xml | 244 ++++++----- doc/common/section_compute_config-api.xml | 185 +++++---- doc/common/section_config_keystone_ldap.xml | 146 +++---- doc/common/section_customize_flavors.xml | 2 +- doc/common/section_dashboard_access.xml | 2 +- doc/common/section_dashboard_customizing.xml | 6 +- ..._dashboard_launch_instances_from_image.xml | 2 +- doc/common/section_dashboard_sessions.xml | 13 +- doc/common/section_fibrechannel.xml | 81 ++-- doc/common/section_getstart_compute.xml | 382 +++++++++--------- doc/common/section_getstart_logical_arch.xml | 2 +- doc/common/section_getstart_metering.xml | 4 +- doc/common/section_getstart_networking.xml | 88 ++-- .../section_getstart_object-storage.xml | 93 +++-- doc/common/section_getstart_orchestration.xml | 82 ++-- .../section_glance_cli_manage_images.xml | 4 +- doc/common/section_host_aggregates.xml | 240 ++++++----- doc/common/section_identity-configure.xml | 42 +- doc/common/section_keystone-external-auth.xml | 53 +-- .../section_keystone-sample-conf-files.xml | 20 +- doc/common/section_keystone-ssl-config.xml | 142 ++++--- .../section_keystone_certificates-for-pki.xml | 58 +-- .../section_keystone_cli_credentials.xml | 2 +- doc/common/section_keystone_db_sync.xml | 64 +-- doc/common/section_kvm_enable.xml | 182 ++++----- doc/common/section_multiple-compute-nodes.xml | 123 +++--- doc/common/section_networking-quotas.xml | 262 +++++++----- doc/common/section_neutron_cli_commands.xml | 4 +- doc/common/section_nova_boot_from_volume.xml | 30 +- doc/common/section_nova_cli_baremetal.xml | 81 ++-- doc/common/section_nova_cli_boot.xml | 2 +- doc/common/section_nova_cli_evacuate.xml | 14 +- doc/common/section_nova_cli_fileinjection.xml | 2 +- doc/common/section_nova_cli_images.xml | 2 +- doc/common/section_nova_cli_metadata.xml | 4 +- doc/common/section_nova_cli_quotas.xml | 58 +-- doc/common/section_nova_cli_resizerebuild.xml | 25 +- doc/common/section_nova_cli_secgroups.xml | 14 +- doc/common/section_nova_cli_sshkeys.xml | 8 +- doc/common/section_nova_cli_userdata.xml | 36 +- doc/common/section_rpc-for-networking.xml | 171 ++++---- doc/common/section_rpc.xml | 202 ++++----- doc/common/section_storage-concepts.xml | 2 +- doc/common/section_support-compute.xml | 291 +++++++------ doc/common/section_support-object-storage.xml | 191 ++++++--- .../section_tenant-specific-image-storage.xml | 75 ++-- doc/common/section_trusted-compute-pools.xml | 270 +++++++------ doc/common/section_user-data.xml | 2 +- doc/common/section_using-vnc-console.xml | 126 +++--- doc/common/section_xapi-ami-setup.xml | 35 +- doc/common/section_xapi-install-plugins.xml | 140 ++++--- doc/common/section_xapi-resize-setup.xml | 39 +- doc/common/section_xen-install.xml | 250 ++++++------ 60 files changed, 2639 insertions(+), 2389 deletions(-) diff --git a/doc/common/app_support.xml b/doc/common/app_support.xml index cc026d743d..8a32a6978e 100644 --- a/doc/common/app_support.xml +++ b/doc/common/app_support.xml @@ -29,21 +29,21 @@ The following books explain how to install an OpenStack cloud and its components: - + Installation Guide for Debian 7.0 - + Installation Guide for openSUSE and SUSE Linux Enterprise Server - + @@ -51,14 +51,14 @@ Installation Guide for Red Hat Enterprise Linux, CentOS, and Fedora - + Installation Guide for Ubuntu 12.04 (LTS) - + The following books explain how to configure and run an @@ -69,13 +69,13 @@ Cloud Administrator Guide - + Configuration Reference - + High Availability Guide - + Security Guide - + Virtual Machine Image Guide - + The following books explain how to use the OpenStack @@ -109,21 +109,21 @@ API Quick Start - + End User Guide - + Admin User Guide - + The following documentation provides reference and @@ -325,7 +325,7 @@ xlink:href="https://bugs.launchpad.net/ceilometer" or report a bug. - +
@@ -333,7 +333,7 @@ xlink:href="https://bugs.launchpad.net/ceilometer" The following Linux distributions provide community-supported packages for OpenStack: - + Debian: CentOS, Fedora, and Red Hat Enterprise Linux: http://openstack.redhat.com/ - + openSUSE and SUSE Linux Enterprise Server: http://en.opensuse.org/Portal:OpenStack - + Ubuntu: https://wiki.ubuntu.com/ServerTeam/CloudArchive - +
diff --git a/doc/common/section_cli_install.xml b/doc/common/section_cli_install.xml index 1b9c7d243c..befdd92341 100644 --- a/doc/common/section_cli_install.xml +++ b/doc/common/section_cli_install.xml @@ -57,7 +57,7 @@ package directly from http://pypi.python.org/pypi/setuptools. - + diff --git a/doc/common/section_cli_openrc.xml b/doc/common/section_cli_openrc.xml index 884d09e7d2..26194f9e8e 100644 --- a/doc/common/section_cli_openrc.xml +++ b/doc/common/section_cli_openrc.xml @@ -63,13 +63,12 @@ password. - Alternatively, you can create the openrc.sh file from scratch. Create the openrc.sh file - containing the authentication: + and add the authentication information: export OS_USERNAME=USERNAME export OS_PASSWORD=PASSWORD export OS_TENANT_NAME=PROJECT_NAME diff --git a/doc/common/section_compute-configure-ec2.xml b/doc/common/section_compute-configure-ec2.xml index 1fb51c6e6c..5e13fb0968 100644 --- a/doc/common/section_compute-configure-ec2.xml +++ b/doc/common/section_compute-configure-ec2.xml @@ -1,21 +1,17 @@
- Configuring the EC2 API - - You can use nova.conf configuration - options to control which network address and port the EC2 API will - listen on, the formatting of some API responses, and authentication - related options. - + xmlns="http://docbook.org/ns/docbook" + xmlns:xlink="http://www.w3.org/1999/xlink" + xmlns:xi="http://www.w3.org/2001/XInclude" + xmlns:ns5="http://www.w3.org/1999/xhtml" + xmlns:ns4="http://www.w3.org/2000/svg" + xmlns:ns3="http://www.w3.org/1998/Math/MathML" + xmlns:ns="http://docbook.org/ns/docbook"> + Configure the EC2 API + You can set options in the nova.conf + configuration file to control which network address and port the + EC2 API listens on, the formatting of some API responses, and + authentication related options. To customize these options for OpenStack EC2 API, use these - configuration option settings. - - - + configuration option settings: +
diff --git a/doc/common/section_compute-configure-quotas.xml b/doc/common/section_compute-configure-quotas.xml index 9f8f575834..d019e0fdf2 100644 --- a/doc/common/section_compute-configure-quotas.xml +++ b/doc/common/section_compute-configure-quotas.xml @@ -1,18 +1,17 @@
- Configuring Quotas - To prevent system capacities from being exhausted without - notification, you can set up quotas. Quotas are operational limits. - For example, the number of gigabytes allowed per tenant can be - controlled so that cloud resources are optimized. - Quotas are currently enforced at the tenant (or project) level, - rather than by user. - + xmlns="http://docbook.org/ns/docbook" + xmlns:xlink="http://www.w3.org/1999/xlink" + xmlns:xi="http://www.w3.org/2001/XInclude" + xmlns:ns5="http://www.w3.org/1999/xhtml" + xmlns:ns4="http://www.w3.org/2000/svg" + xmlns:ns3="http://www.w3.org/1998/Math/MathML" + xmlns:ns="http://docbook.org/ns/docbook" version="5.0"> + Configure quotas + To prevent system capacities from being exhausted without + notification, you can set up quotas. Quotas are operational + limits. For example, the number of gigabytes allowed per tenant + can be controlled so that cloud resources are optimized. Quotas + are currently enforced at the tenant (or project) level, rather + than by user.
diff --git a/doc/common/section_compute-configure-spice.xml b/doc/common/section_compute-configure-spice.xml index 352e52d3ba..6ad6a66371 100644 --- a/doc/common/section_compute-configure-spice.xml +++ b/doc/common/section_compute-configure-spice.xml @@ -1,26 +1,25 @@ -
- Spice Console - OpenStack Compute has long had support for VNC consoles to - guests. The VNC protocol is fairly limited, lacking support for - multiple monitors, bi-directional audio, reliable cut+paste, video - streaming and more. SPICE is a new protocol which aims to address - all the limitations in VNC, to provide good remote desktop - support. + SPICE console + OpenStack Compute supports VNC consoles to guests. The VNC + protocol is fairly limited, lacking support for multiple monitors, + bi-directional audio, reliable cut-and-paste, video streaming and + more. SPICE is a new protocol that aims to address the limitations + in VNC and provide good remote desktop support. SPICE support in OpenStack Compute shares a similar - architecture to the VNC implementation. The OpenStack Dashboard - uses a SPICE-HTML5 widget in its console tab, that communicates to - the nova-spicehtml5proxy service using - SPICE-over-websockets. The nova-spicehtml5proxy - service communicates directly with the hypervisor process using SPICE. - If Spice is not configured correctly, Compute will fall - back upon VNC. + architecture to the VNC implementation. The OpenStack dashboard + uses a SPICE-HTML5 widget in its console tab that communicates to + the nova-spicehtml5proxy + service by using SPICE-over-websockets. The nova-spicehtml5proxy service + communicates directly with the hypervisor process by using SPICE. + If you do not configure SPICE correctly, Compute falls + back on VNC. - Options for configuring SPICE as the console for OpenStack Compute can be found below. - + The following table shows the options to configure SPICE as + the console for OpenStack Compute: +
diff --git a/doc/common/section_compute-configure-vnc.xml b/doc/common/section_compute-configure-vnc.xml index 4e8ae58485..7e7263d8cf 100644 --- a/doc/common/section_compute-configure-vnc.xml +++ b/doc/common/section_compute-configure-vnc.xml @@ -3,9 +3,10 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="getting-started-with-vnc-proxy"> - VNC Console Proxy + VNC console proxy The VNC proxy is an OpenStack component that enables compute - service users to access their instances through VNC clients. + service users to access their instances through VNC + clients. The VNC console connection works as follows: @@ -15,17 +16,18 @@ - The user pastes the URL in a browser or as a client - parameter. + The user pastes the URL in a browser or uses it as a + client parameter. The browser or client connects to the proxy. - The proxy talks to nova-consoleauth to - authorize the user's token, and maps the token to the - private host and port of an instance's - VNC server. + The proxy talks to nova-consoleauth to authorize the token for + the user, and maps the token to the + private host and port of the VNC server + for an instance. The compute host specifies the address that the proxy should use to connect through the nova.conf file option, @@ -34,35 +36,34 @@ private host network. - The proxy initiates the connection to VNC server, and + The proxy initiates the connection to VNC server and continues to proxy until the session ends. The proxy also tunnels the VNC protocol over WebSockets so - that the noVNC client has a way to talk VNC. + that the noVNC client can talk VNC. In general, the VNC proxy: - Bridges between the public network, where the clients - live, and the private network, where vncservers live. + Bridges between the public network where the clients live + and the private network where vncservers live. Mediates token authentication. Transparently deals with hypervisor-specific connection - details to provide a uniform client experience.
- noVNC process - - - - - -
-
+ details to provide a uniform client experience. +
+ noVNC process + + + + + +
@@ -70,148 +71,156 @@ About nova-consoleauth Both client proxies leverage a shared service to manage - token auth called nova-consoleauth. This - service must be running for either proxy to work. Many proxies - of either type can be run against a single - nova-consoleauth service in a cluster + token authentication called nova-consoleauth. This service must be running + for either proxy to work. Many proxies of either type can be run + against a single nova-consoleauth service in a cluster configuration. - Do not confuse the nova-consoleauth - shared service with nova-console, which is a - XenAPI-specific service that most recent VNC proxy architectures - do not use. + Do not confuse the nova-consoleauth shared service with + nova-console, which is a XenAPI-specific + service that most recent VNC proxy architectures do not + use.
- - Typical deployment - - A typical deployment consists of the following components: + Typical deployment + A typical deployment has the following components: - A nova-consoleauth process. Typically - runs on the controller host. + A nova-consoleauth process. Typically runs on + the controller host. - One or more nova-novncproxy services. - Supports browser-based noVNC clients. For simple - deployments, this service typically runs on the same machine - as nova-api because it proxies between the public network - and the private compute host network. + One or more nova-novncproxy services. Supports + browser-based noVNC clients. For simple deployments, this + service typically runs on the same machine as nova-api because it operates + as a proxy between the public network and the private + compute host network. One or more nova-xvpvncproxy services. Supports the special Java client discussed here. For simple deployments, this service typically runs on the - same machine as nova-api because it proxies between the - public network and the private compute host network. + same machine as nova-api because it acts as a proxy between + the public network and the private compute host + network. One or more compute hosts. These compute hosts must have correctly configured options, as follows. -
-
- VNC configuration options - - - To support live migration, you cannot specify a specific IP - address for vncserver_listen, because - that IP address does not exist on the destination - host. - - - The vncserver_proxyclient_address - defaults to 127.0.0.1, which is the - address of the compute host that nova instructs proxies to - use when connecting to instance servers. - For all-in-one XenServer domU deployments, set this to - 169.254.0.1. - For multi-host XenServer domU deployments, set to a dom0 - management IP on the same network as the proxies. - For multi-host libvirt deployments, set to a host - management IP on the same network as the proxies. - -
-
- - nova-novncproxy (noVNC) - - You must install the noVNC package, which contains the - nova-novncproxy service. - As root, run the following command: - # apt-get install novnc - The service starts automatically on installation. - To restart it, run the following command: - # service novnc restart - The configuration option parameter should point to your - nova.conf file, which includes the - message queue server address and credentials. - By default, nova-novncproxy binds on - 0.0.0.0:6080. - To connect the service to your nova deployment, add the - following configuration options to your - nova.conf file: - - - - vncserver_listen=0.0.0.0 - - Specifies the address on which the VNC service should - bind. Make sure it is assigned one of the compute node - interfaces. This address is the one used by your domain - file. - <graphics type="vnc" autoport="yes" keymap="en-us" listen="0.0.0.0"/> - - - To use live migration, make sure to use the - 0.0.0.0address. - - - - - vncserver_ proxyclient_ address - =127.0.0.1 - - The address of the compute host that nova instructs - proxies to use when connecting to instance - vncservers. - - -
-
+
+
+ VNC configuration options + + + To support live migration, you cannot specify a specific IP + address for vncserver_listen, because that + IP address does not exist on the destination host. + + + The vncserver_proxyclient_address + defaults to 127.0.0.1, which is the address + of the compute host that nova instructs proxies to use when + connecting to instance servers. + For all-in-one XenServer domU deployments, set this to + 169.254.0.1. + For multi-host XenServer domU deployments, set to a dom0 + management IP on the same network as the proxies. + For multi-host libvirt deployments, set to a host + management IP on the same network as the proxies. + +
+
- Frequently asked questions about VNC access to - virtual machines + nova-novncproxy (noVNC) + + You must install the noVNC package, which contains the + nova-novncproxy + service. + As root, run the following command: + # apt-get install novnc + The service starts automatically on installation. + To restart it, run the following command: + # service novnc restart + The configuration option parameter should point to your + nova.conf file, which includes the + message queue server address and credentials. + By default, nova-novncproxy binds on + 0.0.0.0:6080. + To connect the service to your nova deployment, add the + following configuration options to your + nova.conf file: + + + + vncserver_listen=0.0.0.0 + + Specifies the address on which the VNC service should + bind. Make sure it is assigned one of the compute node + interfaces. This address is the one used by your domain + file. + <graphics type="vnc" autoport="yes" keymap="en-us" listen="0.0.0.0"/> + + To use live migration, use the + 0.0.0.0 address. + + + + + vncserver_ proxyclient_ address + =127.0.0.1 + + The address of the compute host that nova instructs + proxies to use when connecting to instance + vncservers. + + +
+
+ + Frequently asked questions about VNC access to virtual + machines Q: What is the difference between - nova-xvpvncproxy and - nova-novncproxy? + nova-xvpvncproxy and nova-novncproxy? A: nova-xvpvncproxy, which ships with nova, is a proxy that supports a simple Java client. - nova-novncproxy uses noVNC to provide - VNC support through a web browser. + nova-novncproxy + uses noVNC to provide VNC support through a web + browser. Q: I want VNC support in the Dashboard. What services do I need? - A: You need nova-novncproxy, - nova-consoleauth, and correctly - configured compute hosts. + A: You need nova-novncproxy, nova-consoleauth, and correctly configured + compute hosts. Q: When I use nova get-vnc-console or click on the VNC tab of the Dashboard, it hangs. Why? - A: Make sure you are running - nova-consoleauth (in addition to - nova-novncproxy). The proxies rely on - nova-consoleauth to validate tokens, - and waits for a reply from them until a timeout is reached. + A: Make sure you are running nova-consoleauth (in addition to nova-novncproxy). The proxies + rely on nova-consoleauth to validate tokens, and + waits for a reply from them until a timeout is reached. @@ -224,7 +233,8 @@ two servers: PROXYSERVER (public_ip=172.24.1.1, management_ip=192.168.1.1) COMPUTESERVER (management_ip=192.168.1.2) - Your nova-compute configuration file must set the + Your nova-compute configuration file must set the following values: # These flags help construct a connection data structure vncserver_proxyclient_address=192.168.1.2 @@ -248,11 +258,12 @@ vncserver_listen=192.168.1.2 Q: My noVNC does not work with recent - versions of web browsers. Why? + versions of web browsers. Why? - A: Make sure you have python-numpy - installed, which is required to support a newer version of - the WebSocket protocol (HyBi-07+). + A: Make sure you have installed + python-numpy, which is required to + support a newer version of the WebSocket protocol + (HyBi-07+). @@ -265,9 +276,9 @@ vncserver_listen=192.168.1.2 location of this file varies based on Linux distribution. On Ubuntu 12.04, the file is at /usr/share/pyshared/horizon/dashboards/nova/instances/templates/instances/_detail_vnc.html. - Modify the width and - height parameters, as follows: - <iframe src="{{ vnc_url }}" width="720" height="430"></iframe> + Modify the and + options, as follows: + <iframe src="{{ vnc_url }}" width="720" height="430"></iframe>
diff --git a/doc/common/section_compute-options.xml b/doc/common/section_compute-options.xml index 0521bc06a5..c351a1bc43 100644 --- a/doc/common/section_compute-options.xml +++ b/doc/common/section_compute-options.xml @@ -1,126 +1,138 @@
+ xmlns="http://docbook.org/ns/docbook" + xmlns:xi="http://www.w3.org/2001/XInclude" + xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> File format for nova.conf Overview - The Compute service supports a large number of configuration options. These options - are specified in a configuration file whose default location in - /etc/nova/nova.conf. - The configuration file is in INI file format, with options specified as key=value - pairs, grouped into sections. Almost all of the configuration options are in the - DEFAULT section. Here's a brief - example:[DEFAULT] + The Compute service supports a large number of + configuration options. These options are specified in the + /etc/nova/nova.conf configuration + file. + The configuration file is in INI file format, with options specified as + key=value pairs, grouped into + sections. Almost all configuration options are in the + DEFAULT section. For + example: + [DEFAULT] debug=true verbose=true [trusted_computing] -server=10.3.4.2 - - +server=10.3.4.2 + Types of configuration options - Each configuration option has an associated type that indicates what values can be - set. The supported option types are as follows: - - BoolOpt - - Boolean option. Value must be either true or - false . - Example:debug=false - - - - StrOpt - - String option. Value is an arbitrary string. - Example:my_ip=10.0.0.1 - - - - IntOption - - Integer option. Value must be an integer. Example: - glance_port=9292 - - - - MultiStrOpt - - String option. Same as StrOpt, except that it can be declared multiple - times to indicate multiple values. - Example:ldap_dns_servers=dns1.example.org -ldap_dns_servers=dns2.example.org - - - - ListOpt - - List option. Value is a list of arbitrary strings separated by commas. - Example:enabled_apis=ec2,osapi_compute,metadata - - - - FloatOpt - - Floating-point option. Value must be a floating-point number. - Example:ram_allocation_ratio=1.5 - - - - - - - Nova options should not - be quoted. - - + Each configuration option has an associated type that + indicates which values can be set. The supported option + types are: + + + BoolOpt + + Boolean option. Value must be either + true or + false . + Example:debug=false + + + + StrOpt + + String option. Value is an arbitrary string. + Example:my_ip=10.0.0.1 + + + + IntOption + + Integer option. Value must be an integer. + Example: + glance_port=9292 + + + + MultiStrOpt + + String option. Same as StrOpt, except that + it can be declared multiple times to indicate + multiple values. Example: + ldap_dns_servers=dns1.example.org +ldap_dns_servers=dns2.example.org + + + + ListOpt + + List option. Value is a list of arbitrary + strings separated by commas. Example: + enabled_apis=ec2,osapi_compute,metadata + + + + FloatOpt + + Floating-point option. Value must be a + floating-point number. Example: + ram_allocation_ratio=1.5 + + + + + Do not specify quotes around Nova options. + + Sections - Configuration options are grouped by section. The Compute config file supports the - following sections. + Configuration options are grouped by section. The + Compute configuration file supports the following sections. [DEFAULT] - Almost all of the configuration options are organized into this - section. If the documentation for a configuration option does not - specify its section, assume that it should be placed in this one. + Contains most configuration options. If + the documentation for a configuration + option does not specify its section, + assume that it appears in this + section. [cells] - The cells section is - used for options for configuring cells - functionality. See the Cells - section of the OpenStack Compute Admin - Manual for more details. + Use options in this section to configure + cells functionality. For details, see the + Cells section () in the OpenStack + Configuration + Reference. [baremetal] - This section is used for options that relate to the baremetal - hypervisor driver. + Use options in this section to configure + the baremetal hypervisor driver. [conductor] - The conductor section is used for options for - configuring the nova-conductor service. + Use options in this section to configure + the nova-conductor + service. [trusted_computing] - The trusted_computing section is used for options - that relate to the trusted computing pools functionality. Options in - this section describe how to connect to a remote attestation + Use options in this section to configure + the trusted computing pools functionality + and how to connect to a remote attestation service. @@ -128,35 +140,47 @@ ldap_dns_servers=dns2.example.org Variable substitution - The configuration file supports variable substitution. Once a configuration option is - set, it can be referenced in later configuration values when preceded by - $. Consider the following example where my_ip - is defined and then $my_ip is used as a - variable.my_ip=10.2.3.4 + The configuration file supports variable substitution. + After you set a configuration option, it can be referenced + in later configuration values when you precede it with + $. This example defines + my_ip and then uses + $my_ip as a + variable:my_ip=10.2.3.4 glance_host=$my_ip metadata_host=$my_ip - If you need a value to contain the $ symbol, escape it by doing - $$. For example, if your LDAP DNS password was - $xkj432, you would - do:ldap_dns_password=$$xkj432 - The Compute code uses Python's string.Template.safe_substitute() - method to implement variable substitution. For more details on how variable substitution - is resolved, see Python - documentation on template strings and PEP 292. + If you need a value to contain the $ + symbol, escape it with $$. For example, + if your LDAP DNS password was $xkj432, + specify it, as + follows:ldap_dns_password=$$xkj432 + The Compute code uses the Python + string.Template.safe_substitute() + method to implement variable substitution. For more + details on how variable substitution is resolved, see + http://docs.python.org/2/library/string.html#template-strings + and http://www.python.org/dev/peps/pep-0292/. Whitespace - To include whitespace in a configuration value, use a quoted string. For - example:ldap_dns_passsword='a password with spaces' + To include whitespace in a configuration value, use a + quoted string. For example: + ldap_dns_passsword='a password with spaces' - Specifying an alternate location for nova.conf - The configuration file is loaded by all of the nova-* services, as well as the - nova-manage command-line tool. To specify an alternate location - for the configuration file, pass the --config-file - /path/to/nova.conf argument when starting a - nova-* service or calling nova-manage. + Define an alternate location for nova.conf + All nova-* + services and the nova-manage + command-line client load the configuration file. To define + an alternate location for the configuration file, pass the + --config-file + /path/to/nova.conf + parameter when you start a nova-* service or call a + nova-manage command.
diff --git a/doc/common/section_compute_config-api.xml b/doc/common/section_compute_config-api.xml index 778a074471..f9e778f8bc 100644 --- a/doc/common/section_compute_config-api.xml +++ b/doc/common/section_compute_config-api.xml @@ -1,89 +1,94 @@ -
- Configuring the Compute API - The Compute API, run by the - nova-api - daemon, is the component of OpenStack Compute that - receives and responds to user requests, whether they - be direct API calls, or via the CLI tools or dashboard. + Configure the Compute API + The Compute API, run by the nova-api daemon, is the component of + OpenStack Compute that receives and responds to user requests, + whether they be direct API calls, or via the CLI tools or + dashboard. - Configuring Compute API password handling - The OpenStack Compute API allows the user to specify an - admin password when creating (or rebuilding) a server - instance. If no password is specified, a randomly generated - password is used. The password is returned in the API + Configure Compute API password handling + The OpenStack Compute API enables users to specify an + administrative password when they create or rebuild a + server instance. If the user does not specify a password, + a random password is generated and returned in the API response. - In practice, the handling of the admin password depends on - the hypervisor in use, and may require additional - configuration of the instance, such as installing an agent to - handle the password setting. If the hypervisor and instance - configuration do not support the setting of a password at - server create time, then the password returned by the create - API call will be misleading, since it was ignored. - To prevent this confusion, the configuration option - enable_instance_password can be used to - disable the return of the admin password for installations - that don't support setting instance passwords. + In practice, how the admin password is handled depends + on the hypervisor in use and might require additional + configuration of the instance. For example, you might have + to install an agent to handle the password setting. If the + hypervisor and instance configuration do not support + setting a password at server create time, the password + that is returned by the create API call is misleading + because it was ignored. + To prevent this confusion, use the + + configuration option to disable the return of the admin + password for installations that do not support setting + instance passwords. - Configuring Compute API Rate Limiting + Configure Compute API rate limiting OpenStack Compute supports API rate limiting for the - OpenStack API. The rate limiting allows an administrator to - configure limits on the type and number of API calls that can - be made in a specific time interval. - When API rate limits are exceeded, HTTP requests will - return a error with a status code of 413 "Request entity too - large", and will also include a 'Retry-After' HTTP header. The - response body will include the error details, and the delay - before the request should be retried. + OpenStack API. The rate limiting allows an administrator + to configure limits on the type and number of API calls + that can be made in a specific time interval. + When API rate limits are exceeded, HTTP requests return + an error with a status code of 413 + Request entity too large, and + includes an HTTP Retry-After header. + The response body includes the error details and the delay + before you should retry the request. Rate limiting is not available for the EC2 API. - Specifying Limits - Limits are specified using five values: + Define limits + To define limits, set these values: - The HTTP method used - in the API call, typically one of GET, PUT, POST, or - DELETE. + The HTTP method + used in the API call, typically one of GET, PUT, + POST, or DELETE. - A human readable URI - that is used as a friendly description of where the limit - is applied. + A human readable + URI that is used as a friendly + description of where the limit is applied. - A regular expression. - The limit will be applied to all URI's that match the - regular expression and HTTP Method. + A regular + expression. The limit is applied to + all URIs that match the regular expression and + HTTP method. - A limit value that - specifies the maximum count of units before the limit - takes effect. + A limit value + that specifies the maximum count of units before + the limit takes effect. - An interval that - specifies time frame the limit is applied to. The interval - can be SECOND, MINUTE, HOUR, or DAY. + An interval + that specifies time frame to which the limit is + applied. The interval can be SECOND, MINUTE, HOUR, + or DAY. - Rate limits are applied in order, relative to the HTTP + Rate limits are applied in relative order to the HTTP method, going from least to most specific. For example, - although the default threshold for POST to */servers is 50 per - day, one cannot POST to */servers more than 10 times within a - single minute because the rate limits for any POST is - 10/min. + although the default threshold for POST to */servers is 50 + each day, you cannot POST to */servers more than 10 times + in a single minute because the rate limits for any POST is + 10 each minute. - Default Limits - OpenStack compute is normally installed with the following - limits enabled: + Default limits + Normally, you install OpenStack Compute with the + following limits enabled: - + @@ -127,40 +132,54 @@
Default API Rate LimitsDefault API rate limits
HTTP method
- Configuring and Changing Limits - The actual limits are specified in the file - etc/nova/api-paste.ini, as part of the - WSGI pipeline. - To enable limits, ensure the - 'ratelimit' filter is included in the API - pipeline specification. If the 'ratelimit' - filter is removed from the pipeline, limiting will be - disabled. There should also be a definition for the rate limit - filter. The lines will appear as follows: - -[pipeline:openstack_compute_api_v2] + Configure and change limits + As part of the WSGI pipeline, the + etc/nova/api-paste.ini file + defines the actual limits. + To enable limits, include the + ' filter in the API pipeline + specification. If the filter is + removed from the pipeline, limiting is disabled. You must + also define the rate limit filter. The lines appear as + follows: + [pipeline:openstack_compute_api_v2] pipeline = faultwrap authtoken keystonecontext ratelimit osapi_compute_app_v2 [pipeline:openstack_volume_api_v1] pipeline = faultwrap authtoken keystonecontext ratelimit osapi_volume_app_v1 [filter:ratelimit] -paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory - - To modify the limits, add a 'limits' +paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory + To modify the limits, add a limits specification to the [filter:ratelimit] - section of the file. The limits are specified in the order - HTTP method, friendly URI, regex, limit, and interval. The - following example specifies the default rate limiting + section of the file. Specify the limits in this + order: + + + HTTP method + + + friendly URI + + + regex + + + limit + + + interval + + + The following example shows the default rate-limiting values: - -[filter:ratelimit] + [filter:ratelimit] paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory -limits =(POST, "*", .*, 10, MINUTE);(POST, "*/servers", ^/servers, 50, DAY);(PUT, "*", .*, 10, MINUTE);(GET, "*changes-since*", .*changes-since.*, 3, MINUTE);(DELETE, "*", .*, 100, MINUTE) - +limits =(POST, "*", .*, 10, MINUTE);(POST, "*/servers", ^/servers, 50, DAY);(PUT, "*", .*, 10, MINUTE);(GET, "*changes-since*", .*changes-since.*, 3, MINUTE);(DELETE, "*", .*, 100, MINUTE) - - List of configuration options for Compute API - + + Configuration reference + The following table lists the Compute API configuration options: +
diff --git a/doc/common/section_config_keystone_ldap.xml b/doc/common/section_config_keystone_ldap.xml index cb8433768c..bb4ba49411 100644 --- a/doc/common/section_config_keystone_ldap.xml +++ b/doc/common/section_config_keystone_ldap.xml @@ -1,15 +1,14 @@ -
- Configuring OpenStack Identity for an LDAP backend - As an alternative to the SQL Database backing store, Identity can use - a directory server to provide the Identity service. An example schema - for AcmeExample would look like this: - -dn: dc=AcmeExample,dc=org + Configure the Identity Service with an LDAP + back-end + As an alternative to the SQL database backing store, the + Identity Service can use a directory server to provide the + Identity Service, for example: + dn: dc=AcmeExample,dc=org dc: AcmeExample objectClass: dcObject objectClass: organizationalUnit @@ -28,12 +27,11 @@ ou: users dn: ou=Roles,dc=AcmeExample,dc=org objectClass: top objectClass: organizationalUnit -ou: roles - - The corresponding entries in the keystone.conf - configuration file are: - -[ldap] +ou: roles + The corresponding entries in the + keystone.conf configuration file + are: + [ldap] url = ldap://localhost user = dc=Manager,dc=AcmeExample,dc=org password = badpassword @@ -48,30 +46,26 @@ tenant_tree_dn = ou=Groups,dc=AcmeExample,dc=com tenant_objectclass = groupOfNames role_tree_dn = ou=Roles,dc=AcmeExample,dc=com -role_objectclass = organizationalRole - +role_objectclass = organizationalRole The default object classes and attributes are intentionally - simplistic. They reflect the common standard objects according to the - LDAP RFCs. However, in a live deployment, the correct attributes can be - overridden to support a preexisting, more complex schema. For example, - in the user object, the objectClass posixAccount from RFC2307 is very - common. If this is the underlying objectclass, then the - uid field should probably be - uidNumber and username - field either uid or cn. To - change these two fields, the corresponding entries in the Keystone - configuration file are: - -[ldap] + simple. They reflect the common standard objects according to + the LDAP RFCs. However, in a live deployment, you can override + the correct attributes to support a preexisting, complex + schema. For example, in the user object, the objectClass + posixAccount from RFC2307 is very common. If this is the + underlying objectclass, then the uid + field should probably be uidNumber and + username field either + uid or cn. To + change these two fields, the corresponding entries in the + Keystone configuration file are: + [ldap] user_id_attribute = uidNumber -user_name_attribute = cn - - There is a set of allowed actions per object type that you can modify - depending on your specific deployment. For example, the users are - managed by another tool and you have only read access, in such case the - configuration is: - -[ldap] +user_name_attribute = cn + Depending on your deployment, you can modify a set of + allowed actions for each object type. For example, you might + set the following options: + [ldap] user_allow_create = False user_allow_update = False user_allow_delete = False @@ -82,55 +76,42 @@ tenant_allow_delete = True role_allow_create = True role_allow_update = True -role_allow_delete = True - - There are some configuration options for filtering users, tenants and - roles, if the backend is providing too much output, in such case the - configuration will look like: - -[ldap] +role_allow_delete = True + If the back-end provides too much output, you can filter + users, tenants, and roles. For example: + [ldap] user_filter = (memberof=CN=acme-users,OU=workgroups,DC=AcmeExample,DC=com) tenant_filter = -role_filter = - - - In case that the directory server does not have an attribute enabled - of type boolean for the user, there are several configuration - parameters that can be used to extract the value from an integer - attribute like in Active Directory: - - -[ldap] +role_filter = + If the directory server has not enabled the + boolean type for the user, you can use + configuration options to extract the value from an integer + attribute. For example, in an Active Directory, as + follows: + [ldap] user_enabled_attribute = userAccountControl user_enabled_mask = 2 -user_enabled_default = 512 - - - In this case the attribute is an integer and the enabled attribute - is listed in bit 1, so the if the mask configured - user_enabled_mask is different from 0, it gets - the value from the field user_enabled_attribute - and it makes an ADD operation with the value indicated on - user_enabled_mask and if the value matches the - mask then the account is disabled. - - - It also saves the value without mask to the user identity in the - attribute enabled_nomask. This is needed in - order to set it back in case that we need to change it to - enable/disable a user because it contains more information than the - status like password expiration. Last setting - user_enabled_mask is needed in order to create - a default value on the integer attribute (512 = NORMAL ACCOUNT on - AD) - - - In case of Active Directory the classes and attributes could not - match the specified classes in the LDAP module so you can configure - them like so: - - -[ldap] +user_enabled_default = 512 + The attribute is an integer. Bit 1 contains the enabled + attribute. If the user_enabled_mask mask + is not 0, it gets its value from the + field and it + performs an ADD operation by using the + user_enabled_mask value. If the value + matches the mask, the account is disabled. + It also saves the value without mask to the + identity user in the + attribute. In case you + must change it to enable or disable a user, you can use this + value because it contains more information than the status + such as, password expiration. The + user_enabled_mask value is required + to create a default value on the integer attribute (512 = + NORMAL ACCOUNT on AD). + If Active Directory classes and attributes do not match the + specified classes in the LDAP module, so you can modify them, + as follows: + [ldap] user_objectclass = person user_id_attribute = cn user_name_attribute = cn @@ -150,6 +131,5 @@ role_objectclass = organizationalRole role_id_attribute = cn role_name_attribute = ou role_member_attribute = roleOccupant -role_attribute_ignore = - +role_attribute_ignore =
diff --git a/doc/common/section_customize_flavors.xml b/doc/common/section_customize_flavors.xml index f5053314c5..2c57aead20 100644 --- a/doc/common/section_customize_flavors.xml +++ b/doc/common/section_customize_flavors.xml @@ -170,7 +170,7 @@ vif_outbound_peak - + Incoming and outgoing traffic can be shaped independently. The bandwidth element can have at most one inbound and at most one outbound child element. Leaving any of these children diff --git a/doc/common/section_dashboard_access.xml b/doc/common/section_dashboard_access.xml index ffb738d166..a27c099770 100644 --- a/doc/common/section_dashboard_access.xml +++ b/doc/common/section_dashboard_access.xml @@ -41,7 +41,7 @@ for the dashboard: $ https://IP_ADDRESS_OR_HOSTNAME/ - Certificate Warning + Certificate warning If a certificate warning appears when you try to access the URL for the first time, a self-signed certificate is in use, which is not considered diff --git a/doc/common/section_dashboard_customizing.xml b/doc/common/section_dashboard_customizing.xml index d1f6639f93..004daaeaa2 100644 --- a/doc/common/section_dashboard_customizing.xml +++ b/doc/common/section_dashboard_customizing.xml @@ -127,13 +127,13 @@ text-decoration: none; Restart apache: On Ubuntu: $ sudo service apache2 restart - + On Fedora, RHEL, CentOS: $ sudo service httpd restart - + On openSUSE: $ sudo service apache2 restart - +
Reload the dashboard in your browser to view your diff --git a/doc/common/section_dashboard_launch_instances_from_image.xml b/doc/common/section_dashboard_launch_instances_from_image.xml index 4c1cf48346..9fc26aee94 100644 --- a/doc/common/section_dashboard_launch_instances_from_image.xml +++ b/doc/common/section_dashboard_launch_instances_from_image.xml @@ -31,7 +31,7 @@ A name for your instance. - + The flavor for your diff --git a/doc/common/section_dashboard_sessions.xml b/doc/common/section_dashboard_sessions.xml index 3294a2666d..90eb0d86c0 100644 --- a/doc/common/section_dashboard_sessions.xml +++ b/doc/common/section_dashboard_sessions.xml @@ -14,7 +14,7 @@ /etc/openstack-dashboard/local_settings, on Ubuntu and Debian: /etc/openstack-dashboard/local_settings.py and on openSUSE: /usr/share/openstack-dashboard/openstack_dashboard/local/local_settings.py). - + The following sections describe the pros and cons of each option as it pertains to deploying the dashboard.
@@ -48,7 +48,7 @@ CACHES = { You can use applications such as Memcached or Redis for external caching. These applications offer persistence and shared storage and are useful for small-scale deployments and/or development. - +
Memcached Memcached is an high-performance and distributed memory object caching system @@ -96,7 +96,7 @@ CACHES = {
- Database + Initialize and configure the database Database-backed sessions are scalable, persistent, and can be made high-concurrency and highly-available. However, database-backed sessions are one of the slower @@ -105,7 +105,6 @@ CACHES = { can also be a substantial undertaking and is far beyond the scope of this documentation. - To initialize and configure the database: Start the mysql command line client: $ mysql -u root -p @@ -173,14 +172,14 @@ No fixtures found. symbolic link settings: On Ubuntu: # /etc/init.d/apache2 restart - + On Fedora/RHEL/CentOS: # service httpd restart # service apache2 restart - + On openSUSE: # systemctl restart apache2.service - + On Ubuntu, restart the nova-api service to ensure that the diff --git a/doc/common/section_fibrechannel.xml b/doc/common/section_fibrechannel.xml index 44bc832893..56cdc6c833 100644 --- a/doc/common/section_fibrechannel.xml +++ b/doc/common/section_fibrechannel.xml @@ -1,62 +1,49 @@ -
- Nova Compute Fibre Channel Support -
Overview of Fibre Channel Support - +
+ Fibre Channel support in Compute + Fibre Channel support in OpenStack Compute is remote block + storage attached to Compute nodes for VMs. + In the Grizzly release, Fibre Channel supports only the KVM + hypervisor. + Nova and Cinder for Fibre Channel do not support automatic + zoning. Fibre Channel arrays must be pre-zoned or directly + attached to the KVM hosts. +
+ KVM host requirements + You must install these packages on the KVM host: - Fibre Channel support in OpenStack Compute is remote block storage attached - to Compute nodes for VMs. + + sysfstools - Nova uses the + systool application in this + package. - In the Grizzly release, Fibre Channel only supports the KVM hypervisor. - - - There is no automatic zoning support in Nova or Cinder for Fibre Channel.  - Fibre Channel arrays must be pre-zoned or directly attached to the KVM - hosts. + + sg3-utils - Nova uses the + sg_scan and + sginfo applications. - -
-
- Requirements for KVM Hosts - The KVM host must have the following system packages installed: - - - - - sysfstools - Nova uses the systool - application in this package. - - - - sg3-utils - Nova uses the sg_scan - and sginfo applications. - - - - Installing the multipath-tools package is optional. + Installing the multipath-tools + package is optional.
- Installing the Required Packages - Use the following commands to install the system packages. - - - - For systems running Ubuntu: - - $ sudo apt-get install sysfstools sg3-utils multipath-tools - - - - + Install required packages + Use these commands to install the system + packages: + + + For systems running Ubuntu: + $ sudo apt-get install sysfstools sg3-utils multipath-tools + + For systems running Red Hat: - - $ sudo yum install sysfstools sg3_utils multipath-tools - + $ sudo yum install sysfstools sg3_utils multipath-tools
diff --git a/doc/common/section_getstart_compute.xml b/doc/common/section_getstart_compute.xml index 801f4a008c..68fb6e401a 100644 --- a/doc/common/section_getstart_compute.xml +++ b/doc/common/section_getstart_compute.xml @@ -1,145 +1,140 @@
- Compute service - The Compute service is a cloud computing fabric - controller, which is the main part of an IaaS system. Use it to - host and manage cloud computing systems. The main modules are - implemented in Python. - Compute interacts with the Identity Service for + xmlns:xi="http://www.w3.org/2001/XInclude" + xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" + xml:id="compute-service"> + Compute service + The Compute service is a cloud computing fabric controller, + which is the main part of an IaaS system. Use it to host and + manage cloud computing systems. The main modules are implemented + in Python. + Compute interacts with the Identity Service for authentication, Image Service for images, and the Dashboard for the user and administrative interface. Access to images is limited by project and by user; quotas are limited per project (for example, the number of instances). The Compute service scales horizontally on standard hardware, and downloads images to launch instances as required. - The Compute Service is made up of the following functional - areas and their underlying components: - - API - - nova-api - service. Accepts and responds to end user compute API - calls. Supports the OpenStack Compute API, the Amazon EC2 - API, and a special Admin API for privileged users to - perform administrative actions. Also, initiates most - orchestration activities, such as running an instance, and - enforces some policies. - - - nova-api-metadata service. Accepts - metadata requests from instances. The nova-api-metadata service - is generally only used when you run in multi-host mode - with nova-network - installations. For details, see - Metadata service - in the Cloud Administrator Guide. - Note for Debian users: on Debian system, it is included in the - nova-api - package, and can be selected through debconf. - - - - Compute core - - nova-compute - process. A worker daemon that creates and terminates - virtual machine instances through hypervisor APIs. For - example, XenAPI for XenServer/XCP, libvirt for KVM or - QEMU, VMwareAPI for VMware, and so on. The process by - which it does so is fairly complex but the basics are - simple: Accept actions from the queue and perform a series - of system commands, like launching a KVM instance, to - carry them out while updating state in the - database. - - - nova-scheduler process. Conceptually the - simplest piece of code in Compute. Takes a virtual machine - instance request from the queue and determines on which - compute server host it should run. - - - nova-conductor module. Mediates - interactions between nova-compute and the database. Aims to - eliminate direct accesses to the cloud database made by - nova-compute. - The nova-conductor module scales horizontally. - However, do not deploy it on any nodes where nova-compute runs. For more - information, see A new Nova service: nova-conductor. - - - - Networking for VMs - - nova-network - worker daemon. Similar to nova-compute, it accepts networking tasks - from the queue and performs tasks to manipulate the - network, such as setting up bridging interfaces or - changing iptables rules. This functionality is being - migrated to OpenStack Networking, which is a separate - OpenStack service. - - - nova-dhcpbridge script. Tracks IP address - leases and records them in the database by using the - dnsmasq dhcp-script facility. This - functionality is being migrated to OpenStack Networking. - OpenStack Networking provides a different script. - - - - - Console interface - - nova-consoleauth daemon. Authorizes tokens - for users that console proxies provide. See nova-novncproxy and - nova-xvpnvcproxy. This service must be - running for console proxies to work. Many proxies of - either type can be run against a single nova-consoleauth service in - a cluster configuration. For information, see About nova-consoleauth. - - - nova-novncproxy daemon. Provides a proxy - for accessing running instances through a VNC connection. - Supports browser-based novnc clients. - - - nova-console - daemon. Deprecated for use with Grizzly. Instead, the - nova-xvpnvncproxy is used. - - - nova-xvpnvncproxy daemon. A proxy for - accessing running instances through a VNC connection. - Supports a Java client specifically designed for - OpenStack. - - - nova-cert - daemon. Manages x509 certificates. - - - In Debian, a unique + The Compute Service is made up of the following functional + areas and their underlying components: + + API + + nova-api service. + Accepts and responds to end user compute API calls. Supports + the OpenStack Compute API, the Amazon EC2 API, and a special + Admin API for privileged users to perform administrative + actions. Also, initiates most orchestration activities, such + as running an instance, and enforces some policies. + + + nova-api-metadata + service. Accepts metadata requests from instances. The + nova-api-metadata + service is generally only used when you run in multi-host mode + with nova-network + installations. For details, see Metadata service in the Cloud + Administrator Guide. + On Debian systems, it is included in the nova-api package, and can be + selected through debconf. + + + + Compute core + + nova-compute + process. A worker daemon that creates and terminates virtual + machine instances through hypervisor APIs. For example, XenAPI + for XenServer/XCP, libvirt for KVM or QEMU, VMwareAPI for + VMware, and so on. The process by which it does so is fairly + complex but the basics are simple: Accept actions from the + queue and perform a series of system commands, like launching + a KVM instance, to carry them out while updating state in the + database. + + + nova-scheduler + process. Conceptually the simplest piece of code in Compute. + Takes a virtual machine instance request from the queue and + determines on which compute server host it should run. + + + nova-conductor + module. Mediates interactions between nova-compute and the database. + Aims to eliminate direct accesses to the cloud database made + by nova-compute. The + nova-conductor + module scales horizontally. However, do not deploy it on any + nodes where nova-compute runs. For more information, see + A new Nova service: nova-conductor. + + + + Networking for VMs + + nova-network + worker daemon. Similar to nova-compute, it accepts networking tasks from + the queue and performs tasks to manipulate the network, such + as setting up bridging interfaces or changing iptables rules. + This functionality is being migrated to OpenStack Networking, + which is a separate OpenStack service. + + + nova-dhcpbridge + script. Tracks IP address leases and records them in the + database by using the dnsmasq dhcp-script + facility. This functionality is being migrated to OpenStack + Networking. OpenStack Networking provides a different + script. + + + + + Console interface + + nova-consoleauth + daemon. Authorizes tokens for users that console proxies + provide. See nova-novncproxy and nova-xvpnvcproxy. This service + must be running for console proxies to work. Many proxies of + either type can be run against a single nova-consoleauth service in a + cluster configuration. For information, see About nova-consoleauth. + + + nova-novncproxy + daemon. Provides a proxy for accessing running instances + through a VNC connection. Supports browser-based novnc + clients. + + + nova-console + daemon. Deprecated for use with Grizzly. Instead, the + nova-xvpnvncproxy + is used. + + + nova-xvpnvncproxy + daemon. A proxy for accessing running instances through a VNC + connection. Supports a Java client specifically designed for + OpenStack. + + + nova-cert daemon. + Manages x509 certificates. + + + In Debian, a unique nova-consoleproxy package provides the nova-novncproxy, nova-spicehtml5proxy, and @@ -149,64 +144,61 @@ the debconf interface. You can also manually edit the /etc/default/nova-consoleproxy file and stop and start the console daemons. - - Image Management (EC2 scenario) - - nova-objectstore daemon. Provides an S3 - interface for registering images with the Image Service. - Mainly used for installations that must support euca2ools. - The euca2ools tools talk to nova-objectstore in S3 language, and nova-objectstore translates - S3 requests into Image Service requests. - - - euca2ools client. A set of command-line interpreter - commands for managing cloud resources. Though not an - OpenStack module, you can configure nova-api to support this - EC2 interface. For more information, see the Eucalyptus 2.0 Documentation. - - - - Command Line Interpreter/Interfaces - - nova client. Enables users to submit commands as a - tenant administrator or end user. - - - nova-manage client. Enables cloud administrators to - submit commands. - - - - Other components - - The queue. A central hub for passing messages between - daemons. Usually implemented with RabbitMQ, - but could be any AMPQ message queue, such as Apache Qpid - or Zero - MQ. - - - SQL database. Stores most build-time and runtime - states for a cloud infrastructure. Includes instance types - that are available for use, instances in use, available - networks, and projects. Theoretically, OpenStack Compute - can support any database that SQL-Alchemy supports, but - the only databases widely used are sqlite3 databases - (only appropriate for test and development work), MySQL, - and PostgreSQL. - - - The Compute Service interacts with other OpenStack - services: Identity Service for authentication, Image Service - for images, and the OpenStack dashboard for a web - interface. + + Image management (EC2 scenario) + + nova-objectstore + daemon. Provides an S3 interface for registering images with + the Image Service. Mainly used for installations that must + support euca2ools. The euca2ools tools talk to nova-objectstore in S3 language, and nova-objectstore translates S3 + requests into Image Service requests. + + + euca2ools client. A set of command-line interpreter + commands for managing cloud resources. Though not an OpenStack + module, you can configure nova-api to support this EC2 interface. For + more information, see the Eucalyptus 2.0 Documentation. + + + + Command-line clients and other interfaces + + nova client. Enables users to submit commands as a tenant + administrator or end user. + + + nova-manage client. Enables cloud administrators to submit + commands. + + + + Other components + + The queue. A central hub for passing messages between + daemons. Usually implemented with RabbitMQ, but + could be any AMPQ message queue, such as Apache Qpid or + Zero + MQ. + + + SQL database. Stores most build-time and runtime states + for a cloud infrastructure. Includes instance types that are + available for use, instances in use, available networks, and + projects. Theoretically, OpenStack Compute can support any + database that SQL-Alchemy supports, but the only databases + widely used are sqlite3 databases (only appropriate for test + and development work), MySQL, and PostgreSQL. + + + The Compute Service interacts with other OpenStack services: + Identity Service for authentication, Image Service for images, and + the OpenStack dashboard for a web interface.
diff --git a/doc/common/section_getstart_logical_arch.xml b/doc/common/section_getstart_logical_arch.xml index 11ba8d84eb..35b39dcd84 100644 --- a/doc/common/section_getstart_logical_arch.xml +++ b/doc/common/section_getstart_logical_arch.xml @@ -26,7 +26,7 @@ architecture for an OpenStack cloud:
- OpenStack logical architecture + Logical architecture - + The system consists of the following basic components: @@ -63,7 +63,7 @@ >ceilometer-alarm-notifier). Runs on one or more central management servers to allow settting alarms based on threshold evaluation for a collection of samples. - + A data store. A database capable of handling diff --git a/doc/common/section_getstart_networking.xml b/doc/common/section_getstart_networking.xml index beca0ad9be..c3e21ecfd4 100644 --- a/doc/common/section_getstart_networking.xml +++ b/doc/common/section_getstart_networking.xml @@ -1,45 +1,43 @@ -
- Networking Service Overview - Provides network-connectivity-as-a-service between - interface devices that are managed by other OpenStack - services, usually Compute. Enables users to create and attach - interfaces to networks. Like many OpenStack services, - OpenStack Networking is highly configurable due to its plug-in - architecture. These plug-ins accommodate different networking - equipment and software. Consequently, the architecture and - deployment vary dramatically. - Includes the following components: - - - neutron-server. Accepts and routes API - requests to the appropriate OpenStack Networking plug-in - for action. - - - OpenStack Networking plug-ins and agents. Plugs and - unplugs ports, creates networks or subnets, and provides - IP addressing. These plug-ins and agents differ depending - on the vendor and technologies used in the particular - cloud. OpenStack Networking ships with plug-ins and agents - for Cisco virtual and physical switches, Nicira NVP - product, NEC OpenFlow products, Open vSwitch, Linux - bridging, and the Ryu Network Operating System. - The common agents are L3 (layer 3), DHCP (dynamic host - IP addressing), and a plug-in agent. - - - Messaging queue. Most OpenStack Networking - installations make use of a messaging queue to route - information between the neutron-server and various agents - as well as a database to store networking state for - particular plug-ins. - - - OpenStack Networking interacts mainly with OpenStack - Compute, where it provides networks and connectivity for its - instances. -
+
+ Networking service overview + Provides network-connectivity-as-a-service between interface + devices that are managed by other OpenStack services, usually + Compute. Enables users to create and attach interfaces to + networks. Like many OpenStack services, OpenStack Networking is + highly configurable due to its plug-in architecture. These + plug-ins accommodate different networking equipment and software. + Consequently, the architecture and deployment vary + dramatically. + Includes the following components: + + + neutron-server. + Accepts and routes API requests to the appropriate OpenStack + Networking plug-in for action. + + + OpenStack Networking plug-ins and agents. Plugs and + unplugs ports, creates networks or subnets, and provides IP + addressing. These plug-ins and agents differ depending on the + vendor and technologies used in the particular cloud. + OpenStack Networking ships with plug-ins and agents for Cisco + virtual and physical switches, Nicira NVP product, NEC + OpenFlow products, Open vSwitch, Linux bridging, and the Ryu + Network Operating System. + The common agents are L3 (layer 3), DHCP (dynamic host IP + addressing), and a plug-in agent. + + + Messaging queue. Most OpenStack Networking installations + make use of a messaging queue to route information between the + neutron-server and various agents as well as a database to + store networking state for particular plug-ins. + + + OpenStack Networking interacts mainly with OpenStack Compute, + where it provides networks and connectivity for its + instances. +
diff --git a/doc/common/section_getstart_object-storage.xml b/doc/common/section_getstart_object-storage.xml index 30b013d494..ecd5c6eedc 100644 --- a/doc/common/section_getstart_object-storage.xml +++ b/doc/common/section_getstart_object-storage.xml @@ -1,47 +1,46 @@ -
- Object Storage Service - The Object Storage Service is a highly scalable and - durable multi-tenant object storage system for large amounts - of unstructured data at low cost through a RESTful http - API. - It includes the following components: - - - Proxy Servers (swift-proxy-server). Accepts Object Storage - API and raw HTTP requests to upload files, modify - metadata, and create containers. It also serves file or - container listings to web browsers. To improve - performance, the proxy server can use an optional cache - usually deployed with memcache. - - - Account servers (swift-account-server). Manage - accounts defined with the Object Storage Service. - - - Container servers (swift-container-server). Manage - a mapping of containers, or folders, within the Object - Storage Service. - - - Object servers (swift-object-server). Manage - actual objects, such as files, on the storage nodes. - - - A number of periodic processes. Performs housekeeping - tasks on the large data store. The replication services - ensure consistency and availability through the cluster. - Other periodic processes include auditors, updaters, and - reapers. - - - Configurable WSGI middleware, which is usually the - Identity Service, handles authentication. -
+
+ Object Storage service + The Object Storage service is a highly scalable and durable + multi-tenant object storage system for large amounts of + unstructured data at low cost through a RESTful HTTP API. + It includes the following components: + + + Proxy servers (swift-proxy-server). Accepts Object Storage + API and raw HTTP requests to upload files, modify metadata, + and create containers. It also serves file or container + listings to web browsers. To improve performance, the proxy + server can use an optional cache usually deployed with + memcache. + + + Account servers (swift-account-server). Manage accounts defined + with the Object Storage service. + + + Container servers (swift-container-server). Manage a mapping of + containers, or folders, within the Object Storage + service. + + + Object servers (swift-object-server). Manage actual objects, + such as files, on the storage nodes. + + + A number of periodic processes. Performs housekeeping + tasks on the large data store. The replication services ensure + consistency and availability through the cluster. Other + periodic processes include auditors, updaters, and + reapers. + + + Configurable WSGI middleware that handles authentication. + Usually the Identity Service. +
diff --git a/doc/common/section_getstart_orchestration.xml b/doc/common/section_getstart_orchestration.xml index 370a331f03..3b4dbfcd8b 100644 --- a/doc/common/section_getstart_orchestration.xml +++ b/doc/common/section_getstart_orchestration.xml @@ -1,43 +1,43 @@ -
- Orchestration Service overview - The Orchestration service provides a template-based orchestration - for describing a cloud application by running OpenStack API calls to - generate running cloud applications. The software integrates other core - components of OpenStack into a one-file template system. The templates - enable you to create most OpenStack resource types, such as instances, - floating IPs, volumes, security groups, users, and so on. Also, provides - some more advanced functionality, such as instance high availability, +
+ Orchestration service overview + The Orchestration service provides a template-based + orchestration for describing a cloud application by running + OpenStack API calls to generate running cloud applications. The + software integrates other core components of OpenStack into a + one-file template system. The templates enable you to create most + OpenStack resource types, such as instances, floating IPs, + volumes, security groups, users, and so on. Also, provides some + more advanced functionality, such as instance high availability, instance auto-scaling, and nested stacks. By providing very tight - integration with other OpenStack core projects, all OpenStack core projects - could receive a larger user base. - The service enables deployers to integrate with the Orchestration - service directly or through custom plug-ins. - The Orchestration service consists of the following + integration with other OpenStack core projects, all OpenStack core + projects could receive a larger user base. + The service enables deployers to integrate with the + Orchestration service directly or through custom plug-ins. + The Orchestration service consists of the following components: - - - heat tool. A CLI that communicates with the - heat-api to run AWS CloudFormation APIs. End developers could also use - the Orchestration REST API directly. - - - heat-api component. Provides an - OpenStack-native REST API that processes API requests by - sending them to the heat-engine over RPC. - - - heat-api-cfn component. Provides an AWS - Query API that is compatible with AWS CloudFormation and - processes API requests by sending them to the heat-engine - over RPC. - - - heat-engine. Orchestrates the launching - of templates and provides events back to the API - consumer. - - -
+ + + heat command-line client. A CLI that communicates with the + heat-api to run AWS CloudFormation APIs. End developers could + also use the Orchestration REST API directly. + + + heat-api component. Provides an + OpenStack-native REST API that processes API requests by + sending them to the heat-engine over RPC. + + + heat-api-cfn component. Provides an AWS Query + API that is compatible with AWS CloudFormation and processes + API requests by sending them to the heat-engine over + RPC. + + + heat-engine. Orchestrates the launching of + templates and provides events back to the API consumer. + + +
diff --git a/doc/common/section_glance_cli_manage_images.xml b/doc/common/section_glance_cli_manage_images.xml index c89cd585d5..a9d35aa76e 100644 --- a/doc/common/section_glance_cli_manage_images.xml +++ b/doc/common/section_glance_cli_manage_images.xml @@ -105,7 +105,7 @@ }]
- + After you restart the Image Service, you can use the following syntax to view the image's location information: $ glance --os-image-api-version=2 image-show imageID For example: @@ -302,7 +302,7 @@ e1000 - + diff --git a/doc/common/section_host_aggregates.xml b/doc/common/section_host_aggregates.xml index 1fa166aaea..f4c16018bc 100644 --- a/doc/common/section_host_aggregates.xml +++ b/doc/common/section_host_aggregates.xml @@ -1,96 +1,114 @@
-Host aggregates - - Overview -Host aggregates are a mechanism to further partition an availability zone; while availability - zones are visible to users, host aggregates are only visible to administrators. - Host Aggregates provide a mechanism to allow administrators to assign key-value pairs to - groups of machines. Each node can have multiple aggregates, each aggregate can have - multiple key-value pairs, and the same key-value pair can be assigned to multiple - aggregate. This information can be used in the scheduler to enable advanced scheduling, - to set up hypervisor resource pools or to define logical groups for migration. - + xmlns:xi="http://www.w3.org/2001/XInclude" + xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" + xml:id="host-aggregates"> + Host aggregates + Host aggregates are a mechanism to further partition an + availability zone; while availability zones are visible to + users, host aggregates are only visible to administrators. + Host Aggregates provide a mechanism to allow administrators to + assign key-value pairs to groups of machines. Each node can + have multiple aggregates, each aggregate can have multiple + key-value pairs, and the same key-value pair can be assigned + to multiple aggregates. This information can be used in the + scheduler to enable advanced scheduling, to set up hypervisor + resource pools or to define logical groups for + migration. Command-line interface - The nova command-line tool supports the following aggregate-related - commands. + The nova command-line tool supports + the following aggregate-related commands. - nova aggregate-list + nova + aggregate-list Print a list of all aggregates. - nova aggregate-create <name> + nova aggregate-create + <name> <availability-zone> Create a new aggregate named - <name> in - availability zone + <name> + in availability zone <availability-zone>. - Returns the ID of the newly created aggregate. Hosts - can be made available to multiple availability - zones, but administrators should be careful when - adding the host to a different host aggregate within - the same availability zone and pay attention when - using the aggregate-set-metadata and - aggregate-update commands to avoid user confusion - when they boot instances in different availability - zones. You will see an error message if you cannot - add a particular host in an aggregate zone it is not - intended for. + Returns the ID of the newly created + aggregate. Hosts can be made available to + multiple availability zones, but + administrators should be careful when + adding the host to a different host + aggregate within the same availability + zone and pay attention when using the + aggregate-set-metadata + and aggregate-update + commands to avoid user confusion when they + boot instances in different availability + zones. An error occurs if you cannot add a + particular host to an aggregate zone for + which it is not intended. nova aggregate-delete - <id> + <id> - Delete an aggregate with id <id>. + Delete an aggregate with id + <id>. nova aggregate-details - <id> + <id> Show details of the aggregate with id <id>. - nova aggregate-add-host <id> + nova aggregate-add-host + <id> <host> - Add host with name <host> to aggregate - with id <id>. + Add host with name + <host> + to aggregate with id + <id>. - nova aggregate-remove-host <id> + nova aggregate-remove-host + <id> <host> - Remove the host with name <host> from - the aggregate with id <id>. + Remove the host with name + <host> + from the aggregate with id + <id>. - nova aggregate-set-metadata <id> + nova aggregate-set-metadata + <id> <key=value> - [<key=value> ...] + [<key=value> + ...] - Add or update metadata (key-value pairs) associated with the aggregate - with id <id>. + Add or update metadata (key-value pairs) + associated with the aggregate with id + <id>. - nova aggregate-update <id> + nova aggregate-update + <id> <name> [<availability_zone>] - Update the aggregate's name and optionally availability zone. + Update the name and availability zone + (optional) for the aggregate. @@ -100,45 +118,59 @@ xml:id="host-aggregates"> - nova host-update --maintenance [enable | - disable] + nova host-update --maintenance + [enable | disable] - Put/resume host into/from maintenance. + Put/resume host into/from + maintenance. - These commands are only accessible to administrators. If the username and tenant - you are using to access the Compute service do not have the admin - role, or have not been explicitly granted the appropriate privileges, you will see - one of the following errors when trying to use these - commands:ERROR: Policy doesn't allow compute_extension:aggregates to be performed. (HTTP 403) (Request-ID: req-299fbff6-6729-4cef-93b2-e7e1f96b4864) -ERROR: Policy doesn't allow compute_extension:hosts to be performed. (HTTP 403) (Request-ID: req-ef2400f6-6776-4ea3-b6f1-7704085c27d1) - + + Only administrators can access these commands. If + you try to use these commands and the user name and + tenant that you use to access the Compute service do + not have the admin role or the + appropriate privileges, these errors occur: + ERROR: Policy doesn't allow compute_extension:aggregates to be performed. (HTTP 403) (Request-ID: req-299fbff6-6729-4cef-93b2-e7e1f96b4864) + + ERROR: Policy doesn't allow compute_extension:hosts to be performed. (HTTP 403) (Request-ID: req-ef2400f6-6776-4ea3-b6f1-7704085c27d1) + + Configure scheduler to support host aggregates - One common use case for host aggregates is when you want to support scheduling - instances to a subset of compute hosts because they have a specific capability. For - example, you may want to allow users to request compute hosts that have SSD drives if - they need access to faster disk I/O, or access to compute hosts that have GPU cards to - take advantage of GPU-accelerated code. - To configure the scheduler to support host aggregates, the - scheduler_default_filters configuration option must contain the - AggregateInstanceExtraSpecsFilter in addition to the other - filters used by the scheduler. Add the following line to - /etc/nova/nova.conf on the host that runs the nova-scheduler - service to enable host aggregates filtering, as well as the other filters that are - typically + One common use case for host aggregates is when you want + to support scheduling instances to a subset of compute + hosts because they have a specific capability. For + example, you may want to allow users to request compute + hosts that have SSD drives if they need access to faster + disk I/O, or access to compute hosts that have GPU cards + to take advantage of GPU-accelerated code. + To configure the scheduler to support host aggregates, + the scheduler_default_filters + configuration option must contain the + AggregateInstanceExtraSpecsFilter + in addition to the other filters used by the scheduler. + Add the following line to + /etc/nova/nova.conf on the host + that runs the nova-scheduler service to enable host + aggregates filtering, as well as the other filters that + are typically enabled:scheduler_default_filters=AggregateInstanceExtraSpecsFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter - Example: specify compute hosts with SSDs - In this example, we configure the Compute service to allow users to request nodes that - have solid-state drives (SSDs). We create a new host aggregate called - fast-io in the availability zone called nova, - we add the key-value pair ssd=true to the aggregate, and then we add - compute nodes node1, and node2 to - it.$ nova aggregate-create fast-io nova + Example: Specify compute hosts with SSDs + This example configures the Compute service to enable + users to request nodes that have solid-state drives + (SSDs). You create a fast-io host + aggregate in the nova availability zone + and you add the ssd=true key-value pair + to the aggregate. Then, you add the + node1, and node2 + compute nodes to it. + $ nova aggregate-create fast-io nova +----+---------+-------------------+-------+----------+ | Id | Name | Availability Zone | Hosts | Metadata | +----+---------+-------------------+-------+----------+ @@ -165,25 +197,30 @@ xml:id="host-aggregates"> +----+---------+-------------------+----------------------+-------------------+ | 1 | fast-io | nova | [u'node1', u'node2'] | {u'ssd': u'true'} | +----+---------+-------------------+----------------------+-------------------+ - - Next, we use the nova flavor-create command to create a new flavor - called ssd.large with an ID of 6, 8GB of RAM, 80GB root disk, and 4 - vCPUs. - $ nova flavor-create ssd.large 6 8192 80 4 + + Use the nova flavor-create command to + create the ssd.large flavor called with + an ID of 6, 8GB of RAM, 80GB root disk, and 4 + vCPUs. + $ nova flavor-create ssd.large 6 8192 80 4 +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+-------------+ | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public | extra_specs | +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+-------------+ | 6 | ssd.large | 8192 | 80 | 0 | | 4 | 1 | True | {} | -+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+-------------+ - Once the flavor has been created, we specify one or more key-value pair that must - match the key-value pairs on the host aggregates. In this case, there's only one - key-value pair, ssd=true. Setting a key-value pair on a flavor is - done using the nova flavor-key set_key - command.# nova flavor-key set_key --name=ssd.large --key=ssd --value=true - Once it is set, you should see the extra_specs property of the - ssd.large flavor populated with a key of ssd - and a corresponding value of - true.$ nova flavor-show ssd.large ++----+-----------+-----------+------+-----------+------+-------+-------------+-----------+-------------+ + Once the flavor is created, specify one or more + key-value pairs that match the key-value pairs on the host + aggregates. In this case, that is the + ssd=true key-value pair. Setting a + key-value pair on a flavor is done using the nova + flavor-key set_key command. + # nova flavor-key set_key --name=ssd.large --key=ssd --value=true + Once it is set, you should see the + extra_specs property of the + ssd.large flavor populated with a + key of ssd and a corresponding value of + true. + $ nova flavor-show ssd.large +----------------------------+-------------------+ | Property | Value | +----------------------------+-------------------+ @@ -198,17 +235,22 @@ xml:id="host-aggregates"> | rxtx_factor | 1.0 | | swap | | | vcpus | 4 | -+----------------------------+-------------------+ - Now, when a user requests an instance with the ssd.large flavor, - the scheduler will only consider hosts with the ssd=true key-value - pair. In this example, that would only be node1 and - node2. ++----------------------------+-------------------+ + Now, when a user requests an instance with the + ssd.large flavor, the scheduler + only considers hosts with the ssd=true + key-value pair. In this example, these are + node1 and + node2. - XenServer hypervisor pools to support live migration - When using the XenAPI-based hypervisor, the Compute service uses host aggregates to - manage XenServer Resource pools, which are used in supporting live migration. - +
diff --git a/doc/common/section_identity-configure.xml b/doc/common/section_identity-configure.xml index 8c683fbd09..ec60aabcf4 100644 --- a/doc/common/section_identity-configure.xml +++ b/doc/common/section_identity-configure.xml @@ -3,16 +3,19 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="keystone-configuration-file"> - Identity Configuration Files + Identity Service configuration files - keystone.conf - The Identity Service - /etc/keystone/keystone.conf configuration - file is an INI-format file with sections. - The [DEFAULT] section configures general - configuration values. - Specific sections, such as the [sql] and - [ec2] sections, configure individual + + keystone.conf + + The Identity Service + /etc/keystone/keystone.conf + configuration file is an INI-format file with + sections. + The [DEFAULT] section configures + general configuration values. + Specific sections, such as the [sql] + and [ec2] sections, configure individual services. @@ -31,7 +34,7 @@ - + @@ -68,11 +71,11 @@
keystone.conf file sections
[sql]Optional storage backend configuration.Optional storage back-end configuration.
[ec2]
When you start the Identity Service, you can use the - --config-file parameter to specify a - configuration file. + --config-file parameter to specify + a configuration file.
If you do not specify a configuration file, the Identity Service looks for the keystone.conf - configuration file in the following directories in the following + configuration file in these directories in this order: @@ -96,11 +99,16 @@ -
+
- keystone-paste.ini - The /etc/keystone/keystone-paste.ini file - configures the Identity Service WSGI middleware pipeline. + + keystone-paste.ini + + The + /etc/keystone/keystone-paste.ini file + configures the Identity Service WSGI middleware + pipeline. +
diff --git a/doc/common/section_keystone-external-auth.xml b/doc/common/section_keystone-external-auth.xml index eab239297d..903cd85d69 100644 --- a/doc/common/section_keystone-external-auth.xml +++ b/doc/common/section_keystone-external-auth.xml @@ -1,32 +1,34 @@ -
- Using External Authentication with OpenStack Identity - When Keystone is executed in apache-httpd - it is possible to use external authentication methods different - from the authentication provided by the identity store backend. - For example, this makes possible to use a SQL identity backend - together with X.509 authentication, Kerberos, etc. instead of using - the username/password combination. - +
+ External authentication with the Identity + Service + When Keystone runs in apache-httpd, you + can use external authentication methods that differ from the + authentication provided by the identity store back-end. For + example, you can use an SQL identity back-end together with + X.509 authentication, Kerberos, and so on instead of using the + user name and password combination.
- Using HTTPD authentication - Webservers like Apache HTTP support many methods of - authentication. Keystone can profit from this feature and let the - authentication be done in the webserver, that will pass down the - authenticated user to Keystone using the REMOTE_USER - environment variable. This user must exist in advance in the identity - backend so as to get a token from the controller. To use this method, - OpenStack Identity should be running on apache-httpd. - + Use HTTPD authentication + Web servers, like Apache HTTP, support many methods of + authentication. Keystone can allow the web server to + perform the authentication. The web server then passes the + authenticated user to Keystone by using the + REMOTE_USER environment variable. + This user must already exist in the Identity Service + back-end so as to get a token from the controller. To use + this method, the Identity Service should run on + apache-httpd.
-
- Using X.509 - The following snippet for the Apache conf will authenticate - the user based on a valid X.509 certificate from a known CA: - <VirtualHost _default_:5000> + Use X.509 + The following Apache configuration snippet authenticates + the user based on a valid X.509 certificate from a known + CA: + <VirtualHost _default_:5000> SSLEngine on SSLCertificateFile /etc/ssl/certs/ssl.cert SSLCertificateKeyFile /etc/ssl/private/ssl.key @@ -39,6 +41,5 @@ (...) </VirtualHost> -
diff --git a/doc/common/section_keystone-sample-conf-files.xml b/doc/common/section_keystone-sample-conf-files.xml index 30985ceed3..92369f0ad4 100644 --- a/doc/common/section_keystone-sample-conf-files.xml +++ b/doc/common/section_keystone-sample-conf-files.xml @@ -1,28 +1,28 @@ -
- Identity Sample Configuration Files + Identity Service sample configuration files etc/keystone.conf.sample - - + + + etc/keystone-paste.ini - - + + + etc/logging.conf.sample - + diff --git a/doc/common/section_keystone-ssl-config.xml b/doc/common/section_keystone-ssl-config.xml index 27b064bc3b..b159ea214c 100644 --- a/doc/common/section_keystone-ssl-config.xml +++ b/doc/common/section_keystone-ssl-config.xml @@ -1,12 +1,10 @@ -
- Configure the Identity Service with SSL - You can configure the Identity Service to support 2-way + Configure the Identity Service with SSL + You can configure the Identity Service to support two-way SSL. You must obtain the x509 certificates externally and configure them. @@ -15,50 +13,49 @@ >examples/pki/certs and examples/pki/private directories: - Certificate types - - cacert.pem - - - Certificate Authority chain to validate against. - - - - ssl_cert.pem - - - Public certificate for Identity Service - server. - - - - middleware.pem - - - Public and private certificate for - Identity Service middleware/client. - - - - cakey.pem - - - Private key for the CA. - - - - ssl_key.pem - - - Private key for the Identity Service - server. - - - - You can choose names for - these certificates. You can also combine the public/private keys in the - same file, if you wish. These certificates are provided as - an example. + + Certificate types + + cacert.pem + + Certificate Authority chain to validate + against. + + + + ssl_cert.pem + + Public certificate for Identity Service + server. + + + + middleware.pem + + Public and private certificate for Identity + Service middleware/client. + + + + cakey.pem + + Private key for the CA. + + + + ssl_key.pem + + Private key for the Identity Service + server. + + + + + You can choose names for these certificates. You can + also combine the public/private keys in the same file, if + you wish. These certificates are provided as an + example. +
SSL configuration To enable SSL with client authentication, modify the @@ -66,35 +63,36 @@ etc/keystone.conf file. The following SSL configuration example uses the included sample certificates: - [ssl] + [ssl] enable = True certfile = <path to keystone.pem> keyfile = <path to keystonekey.pem> ca_certs = <path to ca.pem> cert_required = True - Options + + Options enable. True enables SSL. Default is False. - - - certfile. Path to the Identity - Service public certificate file. - - - keyfile. Path to the - Identity Service private certificate file. If you - include the private key in the certfile, you can - omit the keyfile. - - - ca_certs. Path to the CA trust chain. - - - - cert_required. Requires + + + certfile. Path to the + Identity Service public certificate file. + + + keyfile. Path to the Identity + Service private certificate file. If you include + the private key in the certfile, you can omit the + keyfile. + + + ca_certs. Path to the CA + trust chain. + + + cert_required. Requires client certificate. Default is False. - - -
+ +
+
diff --git a/doc/common/section_keystone_certificates-for-pki.xml b/doc/common/section_keystone_certificates-for-pki.xml index dcd570c8b9..db558a18f7 100644 --- a/doc/common/section_keystone_certificates-for-pki.xml +++ b/doc/common/section_keystone_certificates-for-pki.xml @@ -73,10 +73,12 @@ None. - If token_format=UUID, a typical token will look like - 53f7f6ef0cc344b5be706bcc8b1479e1. If - token_format=PKI, a typical token will be a much longer string, e.g.: - MIIKtgYJKoZIhvcNAQcCoIIKpzCCCqMCAQExCTAHBgUrDgMCGjCCCY8GCSqGSIb3DQEHAaCCCYAEggl8eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxMy0wNS0z + If token_format=UUID, a typical token + looks like + 53f7f6ef0cc344b5be706bcc8b1479e1. If + token_format=PKI, a typical token is a + much longer string, such as: + MIIKtgYJKoZIhvcNAQcCoIIKpzCCCqMCAQExCTAHBgUrDgMCGjCCCY8GCSqGSIb3DQEHAaCCCYAEggl8eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxMy0wNS0z MFQxNTo1MjowNi43MzMxOTgiLCAiZXhwaXJlcyI6ICIyMDEzLTA1LTMxVDE1OjUyOjA2WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogbnVs bCwgImVuYWJsZWQiOiB0cnVlLCAiaWQiOiAiYzJjNTliNGQzZDI4NGQ4ZmEwOWYxNjljYjE4MDBlMDYiLCAibmFtZSI6ICJkZW1vIn19LCAic2VydmljZUNhdGFsb2ciOiBbeyJlbmRw b2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTkyLjE2OC4yNy4xMDA6ODc3NC92Mi9jMmM1OWI0ZDNkMjg0ZDhmYTA5ZjE2OWNiMTgwMGUwNiIsICJyZWdpb24iOiAiUmVnaW9u @@ -102,28 +104,27 @@ OiBbeyJuYW1lIjogImFub3RoZXJyb2xlIn0sIHsibmFtZSI6ICJNZW1iZXIifV0sICJuYW1lIjogImRl YWRiODM3NDVkYzQzNGJhMzk5ODllNjBjOTIzYWZhMjgiLCAiMzM2ZTFiNjE1N2Y3NGFmZGJhNWUwYTYwMWUwNjM5MmYiXX19fTGB-zCB-AIBATBcMFcxCzAJBgNVBAYTAlVTMQ4wDAYD VQQIEwVVbnNldDEOMAwGA1UEBxMFVW5zZXQxDjAMBgNVBAoTBVVuc2V0MRgwFgYDVQQDEw93d3cuZXhhbXBsZS5jb20CAQEwBwYFKw4DAhowDQYJKoZIhvcNAQEBBQAEgYCAHLpsEs2R nouriuiCgFayIqCssK3SVdhOMINiuJtqv0sE-wBDFiEj-Prcudqlz-n+6q7VgV4mwMPszz39-rwp+P5l4AjrJasUm7FrO-4l02tPLaaZXU1gBQ1jUG5e5aL5jPDP08HbCWuX6wr-QQQB -SrWY8lF3HrTcJT23sZIleg== +SrWY8lF3HrTcJT23sZIleg==
- Sign certificate issued by External CA - You may use a signing certificate issued by an external + Sign certificate issued by external CA + You can use a signing certificate issued by an external CA instead of generated by keystone-manage. However, certificate issued by external CA must satisfy the following conditions: - all certificate and key files must be in - Privacy Enhanced Mail (PEM) format + all certificate and key files must be in Privacy + Enhanced Mail (PEM) format private key files must not be protected by a password - When using signing certificate issued by an external - CA, you do not need to specify - key_size, - valid_days, and + When using signing certificate issued by an external CA, + you do not need to specify key_size, + valid_days, and ca_password as they will be ignored. The basic workflow for using a signing certificate @@ -131,7 +132,7 @@ SrWY8lF3HrTcJT23sZIleg== Request Signing Certificate from External CA - + Convert certificate and private key to PEM if @@ -143,7 +144,8 @@ SrWY8lF3HrTcJT23sZIleg==
- Request a signing certificate from external CA + Request a signing certificate from an external + CA One way to request a signing certificate from an external CA is to first generate a PKCS #10 Certificate Request Syntax (CRS) using OpenSSL CLI. @@ -169,18 +171,18 @@ emailAddress = keystone@openstack.org Then generate a CRS with OpenSSL CLI. Do not encrypt the generated private key. Must use the -nodes option. - + For example: $ openssl req -newkey rsa:1024 -keyout signing_key.pem -keyform PEM \ -out signing_cert_req.pem -outform PEM -config cert_req.conf -nodes If everything is successfully, you should end up with signing_cert_req.pem and signing_key.pem. Send - signing_cert_req.pem to your CA to - request a token signing certificate and make sure to ask - the certificate to be in PEM format. Also, make sure your - trusted CA certificate chain is also in PEM format. - + signing_cert_req.pem to your CA + to request a token signing certificate and make sure to + ask the certificate to be in PEM format. Also, make sure + your trusted CA certificate chain is also in PEM format. +
Install an external signing certificate @@ -193,8 +195,9 @@ emailAddress = keystone@openstack.org - signing_key.pem - corresponding - (non-encrypted) private key in PEM format + signing_key.pem - + corresponding (non-encrypted) private key in PEM + format @@ -214,10 +217,9 @@ emailAddress = keystone@openstack.org Make sure the certificate directory is only accessible by root. - If your certificate directory path is different from - the default /etc/keystone/ssl/certs, - make sure it is reflected in the - [signing] section of the - configuration file. + If your certificate directory path is different from the + default /etc/keystone/ssl/certs, make + sure it is reflected in the [signing] + section of the configuration file.
diff --git a/doc/common/section_keystone_cli_credentials.xml b/doc/common/section_keystone_cli_credentials.xml index 958a02cb35..d3a7ac8941 100644 --- a/doc/common/section_keystone_cli_credentials.xml +++ b/doc/common/section_keystone_cli_credentials.xml @@ -33,7 +33,7 @@ An endpoint to use instead of the one in the service catalog. Defaults to env[OS_SERVICE_ENDPOINT]. -
+ diff --git a/doc/common/section_keystone_db_sync.xml b/doc/common/section_keystone_db_sync.xml index 2b7c915669..a22a2cfb92 100644 --- a/doc/common/section_keystone_db_sync.xml +++ b/doc/common/section_keystone_db_sync.xml @@ -3,37 +3,42 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="keystone-db_sync"> - Migrate the Identity Service Database - Between revisions of the Identity service project code-named - keystone, SQL migrations may need to happen. The keystone - project uses Migrate the Identity Service database + Between revisions of the Identity Service project, you might + need to complete SQL migrations. The Identity Service project + uses SQLAlchemy-migrate (see SQLAlchemy-migrate to migrate the SQL database - between revisions. For core components, the source code stores - migrations in a central repository under a - keystone/common/sql/migrate_repo + >http://code.google.com/p/sqlalchemy-migrate/) to + migrate the SQL database between revisions. For core + components, the source code stores migrations in a central + repository under a + keystone/common/sql/migrate_repo directory. - Extensions to the Identity service may require SQL - migrations as well. The directory + Extensions to the Identity Service might also require SQL + migrations. The directory keystone/contrib/example in the keystone repository contains a sample extension migration. To set up a migration for an extension - Create a directory structure where "my_extension" is - the name of the extension: - keystone/contrib/my_extension/migrate_repo/versions/ + Create a directory structure where + my_extension is the name of the + extension, as follows: + keystone/contrib/my_extension/migrate_repo/versions/. - Create empty __init__.py files in the migrate_repo - and versions subdirectories. + Create empty __init__.py files + in the migrate_repo and + versions + subdirectories. - Create a configuration file in the migrate_repo - subdirectory named migrate.cfg conforming to a - key/value ini file format. - Here is an example config file. + Create a migrate.cfg + configuration file in the + migrate_repo subdirectory, + which conforms to a key/value .ini file format. + An example configuration file: [db_settings] repository_id=my_extension version_table=migrate_version @@ -41,27 +46,28 @@ - To test and run a migration for a specific - extension - You can use the keystone-manage command with the - parameter --extension both the db_sync and db_version - commands. Ensure the required configuration files exist - before doing these steps. + To test a migration for a specific extension + You can use the keystone-manage + command with the --extension parameter + for both the db_sync and + db_version commands. Ensure that the + required configuration files exist before completing these + steps. - Test your migrations with "example" as a named + Test your migrations with an example extension: # bin/keystone-manage db_sync --extension example - Migrate to version 1 with this command: + Migrate to version 1: # bin/keystone-manage db_sync --extension example 1 - Migrate back to version 0 with this command: + Migrate back to version 0: # bin/keystone-manage db_sync --extension example 0 - Use this command to check the version: + Check the version: # bin/keystone-manage db_version --extension example diff --git a/doc/common/section_kvm_enable.xml b/doc/common/section_kvm_enable.xml index c06c75e1a6..891ab518ca 100644 --- a/doc/common/section_kvm_enable.xml +++ b/doc/common/section_kvm_enable.xml @@ -3,99 +3,99 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="section_kvm_enable"> - Enabling KVM - To perform the following steps, you must be logged in as - the root user. - - - To determine whether the svm - or vmx CPU extensions are - present, run the following command: - # grep -E 'svm|vmx' /proc/cpuinfo - This command generates output if the CPU is - hardware-virtualization capable. Even if output is - shown, you may still need to enable virtualization - in the system BIOS for full support. - If no output appears, consult your system - documentation to ensure that your CPU and - motherboard support hardware virtualization. - Verify that any relevant hardware virtualization - options are enabled in the system BIOS. - Each manufacturer's BIOS is different. If you need to enable virtualization in - the BIOS, look for an option containing the words "virtualization", "VT", "VMX", or - "SVM." - - - To list the loaded kernel modules and verify - that the kvm modules are - loaded, run the following command: - # lsmod | grep kvm - If the output includes kvm_intel or - kvm_amd, the kvm hardware - virtualization modules are loaded and your kernel meets the module requirements for - OpenStack Compute. - If the output does not show that the kvm module - is loaded, run the following command to load - it: - # modprobe -a kvm - Run the command for your CPU. For Intel, run - this command: - # modprobe -a kvm-intel - For AMD, run this command: - # modprobe -a kvm-amd - Because a KVM installation can change user group - membership, you might need to log in again for - changes to take effect. - If the kernel modules do not load automatically, please use the procedures listed - in the subsections below. - - - This completes the required checks to ensure that - hardware virtualization support is available and enabled, - and that the correct kernel modules are loaded. - If the checks indicate that required hardware - virtualization support or kernel modules are disabled or - not available, you must either enable this support on the - system or find a system with this support. - - Some systems require that you enable VT support in - the system BIOS. If you believe your processor - supports hardware acceleration but the previous - command did not produce output, you might need to - reboot your machine, enter the system BIOS, and enable - the VT option. - - If KVM acceleration is not supported, configure Compute - to use a different hypervisor, such as QEMU or Xen. - The following procedures will help you load the kernel modules for Intel-based and - AMD-based processors if they did not load automatically during KVM installation. -
- Intel-based processors - If your compute host is Intel-based, run the - following command as root to load the kernel - modules: - # modprobe kvm + Enable KVM + To perform these steps, you must be logged in as the + root user. + + + To determine whether the svm or + vmx CPU extensions are present, + run this command: + # grep -E 'svm|vmx' /proc/cpuinfo + This command generates output if the CPU is + hardware-virtualization capable. Even if output is + shown, you might still need to enable virtualization + in the system BIOS for full support. + If no output appears, consult your system + documentation to ensure that your CPU and motherboard + support hardware virtualization. Verify that any + relevant hardware virtualization options are enabled + in the system BIOS. + The BIOS for each manufacturer is different. If you + must enable virtualization in the BIOS, look for an + option containing the words + virtualization, + VT, VMX, or + SVM. + + + To list the loaded kernel modules and verify that + the kvm modules are loaded, run + this command: + # lsmod | grep kvm + If the output includes + kvm_intel or + kvm_amd, the + kvm hardware + virtualization modules are loaded and your kernel + meets the module requirements for OpenStack + Compute. + If the output does not show that the + kvm module is loaded, run this + command to load it: + # modprobe -a kvm + Run the command for your CPU. For Intel, run this + command: + # modprobe -a kvm-intel + For AMD, run this command: + # modprobe -a kvm-amd + Because a KVM installation can change user group + membership, you might need to log in again for changes + to take effect. + If the kernel modules do not load automatically, use + the procedures listed in these subsections. + + + If the checks indicate that required hardware virtualization + support or kernel modules are disabled or unavailable, you + must either enable this support on the system or find a system + with this support. + + Some systems require that you enable VT support in the + system BIOS. If you believe your processor supports + hardware acceleration but the previous command did not + produce output, reboot your machine, enter the system + BIOS, and enable the VT option. + + If KVM acceleration is not supported, configure Compute to + use a different hypervisor, such as QEMU or Xen. + These procedures help you load the kernel modules for + Intel-based and AMD-based processors if they do not load + automatically during KVM installation. +
+ Intel-based processors + If your compute host is Intel-based, run these commands + as root to load the kernel modules: + # modprobe kvm # modprobe kvm-intel - Add the following lines to the - /etc/modules file so that - these modules load on reboot: - kvm + Add these lines to the /etc/modules + file so that these modules load on reboot: + kvm kvm-intel -
-
- AMD-based processors - If your compute host is AMD-based, run the following - command as root to load the kernel modules: - # modprobe kvm +
+
+ AMD-based processors + If your compute host is AMD-based, run these commands as + root to load the kernel modules: + # modprobe kvm # modprobe kvm-amd - Add the following lines to - /etc/modules file so that - these modules load on reboot: - kvm + Add these lines to /etc/modules + file so that these modules load on reboot: + kvm kvm-amd -
+
diff --git a/doc/common/section_multiple-compute-nodes.xml b/doc/common/section_multiple-compute-nodes.xml index cdda8abe43..a5ebb22065 100644 --- a/doc/common/section_multiple-compute-nodes.xml +++ b/doc/common/section_multiple-compute-nodes.xml @@ -1,32 +1,35 @@ -
- Configuring Multiple Compute Nodes - If your goal is to split your VM load across more than one - server, you can connect an additional nova-compute node to a cloud - controller node. This configuring can be reproduced on - multiple compute servers to start building a true multi-node - OpenStack Compute cluster. - To build out and scale the Compute platform, you spread - out services amongst many servers. While there are additional - ways to accomplish the build-out, this section describes - adding compute nodes, and the service we are scaling out is - called nova-compute. - For a multi-node install you only make changes to - nova.conf and copy it to additional - compute nodes. Ensure each nova.conf file - points to the correct IP addresses for the respective - services. - By default, nova-network - sets the bridge device based on the - setting in flat_network_bridge. Now you can - edit /etc/network/interfaces with the - following template, updated with your IP information. - # The loopback network interface + Configure multiple Compute nodes + To distribute your VM load across more than one server, you + can connect an additional nova-compute node to a cloud controller + node. You can reproduce this configuration on multiple compute + servers to build a true multi-node OpenStack Compute + cluster. + To build and scale the Compute platform, you distribute + services across many servers. While you can accomplish this in + other ways, this section describes how to add compute nodes + and scale out the nova-compute service. + For a multi-node installation, you make changes to only the + nova.conf file and copy it to + additional compute nodes. Ensure that each + nova.conf file points to the correct + IP addresses for the respective services. + + + By default, nova-network sets the bridge device + based on the setting in + flat_network_bridge. Update + your IP information in the + /etc/network/interfaces file + by using this template: + # The loopback network interface auto lo iface lo inet loopback @@ -44,30 +47,40 @@ iface br100 inet static gateway xxx.xxx.xxx.xxx # dns-* options are implemented by the resolvconf package, if installed dns-nameservers xxx.xxx.xxx.xxx - Restart networking: - $ sudo service networking restart - With nova.conf updated and networking - set, configuration is nearly complete. First, bounce the - relevant services to take the latest updates: - $ sudo service libvirtd restart -$ sudo service nova-compute restart - To avoid issues with KVM and permissions with Nova, run - the following commands to ensure we have VM's that are running - optimally: - # chgrp kvm /dev/kvm + + + Restart networking: + $ sudo service networking restart + + + Bounce the relevant services to take the latest + updates: + $ sudo service libvirtd restart +$ sudo service nova-compute restart + + + To avoid issues with KVM and permissions with Nova, + run these commands to ensure that your VMs run + optimally: + # chgrp kvm /dev/kvm # chmod g+rwx /dev/kvm - Any server that does not have - nova-api running on it needs this - iptables entry so that images can get metadata info. On - compute nodes, configure the iptables with this next - step: - # iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773 - Lastly, confirm that your compute node is talking to your - cloud controller. From the cloud controller, run this database - query: - $ mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;' - In return, you should see something similar to - this: +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+ + + + Any server that does not have + nova-api running on it requires + an iptables entry so that images can get metadata + information. + On compute nodes, configure iptables with this + command: + # iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773 + + + Confirm that your compute node can talk to your + cloud controller. + From the cloud controller, run this database + query: + $ mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;' + +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+ | created_at | updated_at | deleted_at | deleted | id | host | binary | topic | report_count | disabled | availability_zone | +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+ | 2011-01-28 22:52:46 | 2011-02-03 06:55:48 | NULL | 0 | 1 | osdemo02 | nova-network | network | 46064 | 0 | nova | @@ -77,10 +90,12 @@ $ sudo service nova-compute restart | 2011-01-30 23:42:24 | 2011-02-03 06:55:44 | NULL | 0 | 9 | osdemo04 | nova-compute | compute | 28484 | 0 | nova | | 2011-01-30 21:27:28 | 2011-02-03 06:54:23 | NULL | 0 | 8 | osdemo05 | nova-compute | compute | 29284 | 0 | nova | +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+ - You can see that osdemo0{1,2,4,5} are - all running nova-compute. When you start spinning up - instances, they will allocate on any node that is running - nova-compute from - this list. + In this example, the osdemo hosts + all run the nova-compute service. When you + launch instances, they allocate on any node that runs + nova-compute from this list. + +
diff --git a/doc/common/section_networking-quotas.xml b/doc/common/section_networking-quotas.xml index 86517456ad..c759171022 100644 --- a/doc/common/section_networking-quotas.xml +++ b/doc/common/section_networking-quotas.xml @@ -3,24 +3,27 @@ xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> - Manage Networking service quotas - A quota is a function used to limit the number of resources. A default quota may be - enforced for all tenants. Attempting to create resources over the limit triggers an - error. - $ neutron net-create test_net -Quota exceeded for resources: ['network'] - Per-tenant quota configuration is also supported by the quota extension API. See Per-tenant quota configuration for details. -
- Basic quota configuration - In the Networking default quota mechanism, all tenants have the same quota value, such - as the number of resources that a tenant can create. This is enabled by default. - The quota value is defined in the OpenStack Networking configuration file - (neutron.conf). If you want to disable quotas for a specific resource - (e.g., network, subnet, port), remove a corresponding item from - quota_items. Each of the quota values in the example below is the default - value. - [quotas] + Manage Networking service quotas + A quota limits the number of available resources. A default + quota might be enforced for all tenants. When you try to create + more resources than the quota allows, an errors: + $ neutron net-create test_net + Quota exceeded for resources: ['network'] + Per-tenant quota configuration is also supported by the quota + extension API. See + Per-tenant quota configuration for details. +
+ Basic quota configuration + In the Networking default quota mechanism, all tenants have + the same quota values, such as the number of resources that a + tenant can create. + The quota value is defined in the OpenStack Networking + neutron.conf configuration file. To + disable quotas for a specific resource, such as network, subnet, + or port, remove a corresponding item from + . This example shows the default + quota values: + [quotas] # resource name(s) that are supported in quota features quota_items = network,subnet,port @@ -35,38 +38,79 @@ quota_port = 50 # default driver to use for quota checks quota_driver = neutron.quota.ConfDriver - OpenStack Networking also supports quotas for L3 resources: router and floating IP. You - can configure them by adding the following lines to quotas section in - neutron.conf. (Note that quota_items does not - affect these quotas.) - [quotas] + OpenStack Networking also supports quotas for L3 resources: + router and floating IP. Add these lines to the + quotas section in the + neutron.conf file. + [quotas] # number of routers allowed per tenant, and minus means unlimited quota_router = 10 # number of floating IPs allowed per tenant, and minus means unlimited quota_floatingip = 50 - OpenStack Networking also supports quotas for security group resources: number of - security groups and the number of rules per security group. You can configure them by adding - the following lines to quotas section in - neutron.conf. (Note that quota_items does not - affect these quotas.) - [quotas] + + The option does not affect + these quotas. + + OpenStack Networking also supports quotas for security group + resources: number of security groups and the number of rules for + each security group. Add these lines to the + quotas section in the + neutron.conf file: + [quotas] # number of security groups per tenant, and minus means unlimited quota_security_group = 10 # number of security rules allowed per tenant, and minus means unlimited quota_security_group_rule = 100 -
-
- Per-tenant quota configuration - OpenStack Networking also supports per-tenant quota limit by quota extension API. To - enable per-tenant quota, you need to set quota_driver in - neutron.conf. For example: + + The option does not affect + these quotas. + +
+
+ Configure per-tenant quotas + OpenStack Networking also supports per-tenant quota limit by + quota extension API. + Use these commands to manage per-tenant quotas: + + neutron quota-delete. Deletes + defined quotas for a specified tenant. + + + neutron quota-list. Lists defined + quotas for all tenants. + + + neutron quota-show. Shows quotas + for a specified tenant. + + + neutron quota-update. Updates + quotas for a specified tenant. + + Only users with the admin role + can change a quota value. By default, the default set of quotas + are enforced for all tenants, so no + quota-create command exists. + + + Configure Networking to show per-tenant quotas + Set the quota_driver option in the + neutron.conf file: quota_driver = neutron.db.quota_db.DbQuotaDriver - When per-tenant quota is enabled, the output of the following commands contain - quotas. - $ neutron ext-list -c alias -c name -+-----------------+--------------------------+ + When you set this option, the output for Networking + commands shows quotas. + + + List Networking extensions + To list the Networking extensions, run this + command: + $ neutron ext-list -c alias -c name + The command shows the quotas + extension, which provides per-tenant quota management + support: + +-----------------+--------------------------+ | alias | name | +-----------------+--------------------------+ | agent_scheduler | Agent Schedulers | @@ -79,9 +123,13 @@ quota_security_group_rule = 100 | lbaas | LoadBalancing service | | extraroute | Neutron Extra Route | +-----------------+--------------------------+ - -$ neutron ext-show quotas -+-------------+------------------------------------------------------------+ + + + Show information for the quotas extension + To show information for the quotas + extension, run this command: + $ neutron ext-show quotas + +-------------+------------------------------------------------------------+ | Field | Value | +-------------+------------------------------------------------------------+ | alias | quotas | @@ -91,49 +139,38 @@ quota_security_group_rule = 100 | namespace | http://docs.openstack.org/network/ext/quotas-sets/api/v2.0 | | updated | 2012-07-29T10:00:00-00:00 | +-------------+------------------------------------------------------------+ - - Per-tenant quotas are supported only supported by some plugins. At least Open vSwitch, - Linux Bridge, and Nicira NVP are known to work but new versions of other plugins may - bring additional functionality - consult the documentation for each plugin. - - There are four CLI commands to manage per-tenant quotas: - - neutron quota-delete - Delete defined quotas of a given - tenant. - - - neutron quota-list - List defined quotas of all tenants. - - - neutron quota-show - Show quotas of a given tenant. - - - neutron quota-update - Define tenant's quotas not to use - defaults. - - Only users with 'admin' role can change a quota value. Note that the default - set of quotas are enforced for all tenants by default, so there is no - quota-create command. - - quota-list displays a list of tenants for which per-tenant quota is enabled. - The tenants who have the default set of quota limits are not listed. - This command is permitted to only 'admin' users. - - $ neutron quota-list -+------------+---------+------+--------+--------+----------------------------------+ + + Only some plug-ins support per-tenant quotas. + Specifically, Open vSwitch, Linux Bridge, and Nicira NVP + support them, but new versions of other plug-ins might + bring additional functionality. See the documentation for + each plug-in. + + + + List tenants who have per-tenant quota support + The quota-list command lists tenants + for which the per-tenant quota is enabled. The command does + not list tenants with default quota support. You must be an + administrative user to run this command: + $ neutron quota-list + +------------+---------+------+--------+--------+----------------------------------+ | floatingip | network | port | router | subnet | tenant_id | +------------+---------+------+--------+--------+----------------------------------+ | 20 | 5 | 20 | 10 | 5 | 6f88036c45344d9999a1f971e4882723 | | 25 | 10 | 30 | 10 | 10 | bff5c9455ee24231b5bc713c1b96d422 | +------------+---------+------+--------+--------+----------------------------------+ - - quota-show reports the current set of quota limits for the specified tenant. - Regular (non-admin) users can call this command (without --tenant_id parameter). - If per-tenant quota limits are not defined for the tenant, the default set of - quotas are displayed. - - $ neutron quota-show --tenant_id 6f88036c45344d9999a1f971e4882723 -+------------+-------+ + + + Show per-tenant quota values + The quota-show reports the current + set of quota limits for the specified tenant. + Non-administrative users can run this command without the + --tenant_id parameter. If + per-tenant quota limits are not enabled for the tenant, the + command shows the default set of quotas: + $ neutron quota-show --tenant_id 6f88036c45344d9999a1f971e4882723 + +------------+-------+ | Field | Value | +------------+-------+ | floatingip | 20 | @@ -142,11 +179,10 @@ quota_security_group_rule = 100 | router | 10 | | subnet | 5 | +------------+-------+ - - The below is an example called by a non-admin user. - - $ neutron quota-show -+------------+-------+ + The following command shows the command output for a + non-administrative user: + $ neutron quota-show + +------------+-------+ | Field | Value | +------------+-------+ | floatingip | 20 | @@ -155,8 +191,11 @@ quota_security_group_rule = 100 | router | 10 | | subnet | 5 | +------------+-------+ - You can update a quota of the given tenant by quota-update command. - Update the limit of network quota. + + + Update quota values for a specified tenant + Use the quota-update command to + update a quota for a specified tenant: $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --network 5 +------------+-------+ | Field | Value | @@ -167,7 +206,8 @@ quota_security_group_rule = 100 | router | 10 | | subnet | 10 | +------------+-------+ - You can update quotas of multiple resources in one command. + You can update quotas for multiple resources through one + command: $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --subnet 5 --port 20 +------------+-------+ | Field | Value | @@ -178,13 +218,13 @@ quota_security_group_rule = 100 | router | 10 | | subnet | 5 | +------------+-------+ - - To update the limits of L3 resource (router, floating IP), we need to - specify new values of the quotas after '--'. The example below updates - the limit of the number of floating IPs for the given tenant. - - $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 -- --floatingip 20 -+------------+-------+ + To update the limits for an L3 resource such as, router + or floating IP, you must define new values for the quotas + after the -- directive. + This example updates the limit of the number of floating + IPs for the specified tenant: + $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 -- --floatingip 20 + +------------+-------+ | Field | Value | +------------+-------+ | floatingip | 20 | @@ -193,9 +233,9 @@ quota_security_group_rule = 100 | router | 10 | | subnet | 5 | +------------+-------+ - - You can update the limits of multiple resources including L2 resources and L3 resource in one command. - + You can update the limits of multiple resources by + including L2 resources and L3 resource through one + command. $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --network 3 --subnet 3 --port 3 -- --floatingip 3 --router 3 +------------+-------+ | Field | Value | @@ -206,16 +246,18 @@ quota_security_group_rule = 100 | router | 3 | | subnet | 3 | +------------+-------+ - - To clear per-tenant quota limits, use quota-delete. - After quota-delete, quota limits enforced to the tenant are reset to - the default set of quotas. - - $ neutron quota-delete --tenant_id 6f88036c45344d9999a1f971e4882723 -Deleted quota: 6f88036c45344d9999a1f971e4882723 - -$ neutron quota-show --tenant_id 6f88036c45344d9999a1f971e4882723 -+------------+-------+ + + + Delete per-tenant quota values + To clear per-tenant quota limits, use the + quota-delete command: + $ neutron quota-delete --tenant_id 6f88036c45344d9999a1f971e4882723 + Deleted quota: 6f88036c45344d9999a1f971e4882723 + After you run this command, you can see that quota + values for the tenant are reset to the default + values: + $ neutron quota-show --tenant_id 6f88036c45344d9999a1f971e4882723 + +------------+-------+ | Field | Value | +------------+-------+ | floatingip | 50 | @@ -224,5 +266,7 @@ quota_security_group_rule = 100 | router | 10 | | subnet | 10 | +------------+-------+ -
+ + +
diff --git a/doc/common/section_neutron_cli_commands.xml b/doc/common/section_neutron_cli_commands.xml index c5b7eb77a5..e06df66bd8 100644 --- a/doc/common/section_neutron_cli_commands.xml +++ b/doc/common/section_neutron_cli_commands.xml @@ -21,9 +21,7 @@ [--os-cacert <ca-certificate>] [--insecure] - Positional Arguments - - + Positional arguments agent-delete Delete a given agent. agent-list List agents. diff --git a/doc/common/section_nova_boot_from_volume.xml b/doc/common/section_nova_boot_from_volume.xml index 329d5af060..6d70d167f2 100644 --- a/doc/common/section_nova_boot_from_volume.xml +++ b/doc/common/section_nova_boot_from_volume.xml @@ -7,25 +7,26 @@ After you create a bootable volume, you can launch an instance from that volume. Optionally, to configure your volume, see the OpenStack Configuration + xlink:href="http://docs.openstack.org/trunk/config-reference/content/config_overview.html" + >OpenStack Configuration Reference. - To launch an instance from a volume - To choose an image to create a bootable volume from, run the - following command to list images: - $ nova image-list + + For a list of images to choose from to create a bootable + volume, run this command: + $ nova image-list +--------------------------------------+---------------------------------+--------+--------+ | ID | Name | Status | Server | +--------------------------------------+---------------------------------+--------+--------+ | e0b7734d-2331-42a3-b19e-067adc0da17d | cirros-0.3.1-x86_64-uec | ACTIVE | | | 75bf193b-237b-435e-8712-896c51484de9 | cirros-0.3.1-x86_64-uec-kernel | ACTIVE | | | 19eee81c-f972-44e1-a952-1dceee148c47 | cirros-0.3.1-x86_64-uec-ramdisk | ACTIVE | | -+--------------------------------------+---------------------------------+--------+--------+ ++--------------------------------------+---------------------------------+--------+--------+ +
To create a bootable volume from an image, include the image ID in the command: - # cinder create --image-id e0b7734d-2331-42a3-b19e-067adc0da17d --display-name my-boot-vol 8 + # cinder create --image-id e0b7734d-2331-42a3-b19e-067adc0da17d --display-name my-boot-vol 8 +---------------------+--------------------------------------+ | Property | Value | +---------------------+--------------------------------------+ @@ -66,7 +67,12 @@ $ nova boot --flavor FLAVOR --block_device_mapping DEVNAME=ID:TYPE:SIZE:DELETE_ON_TERMINATE NAME The command arguments are: - ParameterDescription + + + Parameter + Description + + @@ -145,8 +151,10 @@ Attempt to boot from volume - no image supplied error is returned. - You can also attach a swap disk on boot with the --swap - flag, or you can attach an ephemeral disk on boot with the --ephemeral flag. + You can also attach a swap disk on boot with the + --swap flag, or you can attach an + ephemeral disk on boot with the + --ephemeral flag. For example, you might enter the following command to boot from a volume. The volume is not deleted when the instance is terminated: diff --git a/doc/common/section_nova_cli_baremetal.xml b/doc/common/section_nova_cli_baremetal.xml index 6992d7b7ad..dca83ba63d 100644 --- a/doc/common/section_nova_cli_baremetal.xml +++ b/doc/common/section_nova_cli_baremetal.xml @@ -3,53 +3,51 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> Manage bare metal nodes - The bare metal driver for OpenStack Compute manages provisioning of - physical hardware using common cloud APIs and tools such as Orchestration - (Heat). The use case for this driver is for single tenant clouds such as a - high-performance computing cluster or deploying OpenStack itself. - Development efforts are focused on moving the driver out of the Compute code - base in the Icehouse release. If you use the bare metal driver, you must - create and add a network interface to a bare metal node. Then, you can - launch an instance from a bare metal image. - You can list and delete bare metal nodes. When you delete a node, any - associated network interfaces are removed. You can list and remove network - interfaces that are associated with a bare metal node. + The bare metal driver for OpenStack Compute manages + provisioning of physical hardware using common cloud APIs and + tools such as Orchestration (Heat). The use case for this driver + is for single tenant clouds such as a high-performance computing + cluster or deploying OpenStack itself. Development efforts are + focused on moving the driver out of the Compute code base in the + Icehouse release. If you use the bare metal driver, you must + create and add a network interface to a bare metal node. Then, you + can launch an instance from a bare metal image. + You can list and delete bare metal nodes. When you delete a + node, any associated network interfaces are removed. You can list + and remove network interfaces that are associated with a bare + metal node. Commands - baremetal-interface-add - Adds a network interface to a bare metal node. + baremetal-interface-add. Adds a network + interface to a bare metal node. - baremetal-interface-list - Lists network interfaces associated with a bare metal node. + baremetal-interface-list. Lists network + interfaces associated with a bare metal node. - - baremetal-interface-remove - Removes a network interface from a bare metal node. + baremetal-interface-remove. Removes a + network interface from a bare metal node. - - baremetal-node-create - Creates a bare metal node. + baremetal-node-create. Creates a bare + metal node. - - baremetal-node-delete - Removes a bare metal node and any associated interfaces. + baremetal-node-delete. Removes a bare + metal node and any associated interfaces. - baremetal-node-list - Lists available bare metal nodes. + baremetal-node-list. Lists available + bare metal nodes. - baremetal-node-show - Shows information about a bare metal node. + baremetal-node-show. Shows information + about a bare metal node. - To manage bare metal nodes Create a bare metal node: $ nova baremetal-node-create --pm_address=1.2.3.4 --pm_user=ipmi --pm_password=ipmi $(hostname -f) 1 512 10 aa:bb:cc:dd:ee:ff @@ -80,24 +78,24 @@ | id | 1 | | port_no | 0 | | address | aa:bb:cc:dd:ee:ff | -+-------------+-------------------+ ++-------------+-------------------+ Launch an instance from a bare metal image: $ nova boot --image my-baremetal-image --flavor my-baremetal-flavor test - +-----------------------------+--------------------------------------+ + +-----------------------------+--------------------------------------+ | Property | Value | +-----------------------------+--------------------------------------+ | status | BUILD | | id | cc302a8f-cd81-484b-89a8-b75eb3911b1b | -... wait for instance to become active ... +... wait for instance to become active ... - You can list bare metal nodes and interfaces, as follows: + List bare metal nodes and interfaces: $ nova baremetal-node-list - When a node is in use, its status includes the UUID of the instance - that runs on it: + When a node is in use, its status includes the UUID of the + instance that runs on it: +----+--------+------+-----------+---------+------------------- +------+------------+-------------+-------------+---------------+ | ID | Host | CPUs | Memory_MB | Disk_GB | MAC Address @@ -132,10 +130,13 @@
- Set the --availability_zone parameter to - specify which zone or node to start the server. You can separate the zone - from the hostname with a comma. As an example: - $ nova boot --availability_zone=zone:host,node - Specifying "host" is optional for the --availability_zone parameter, and "zone:,node" also works. + Set the --availability_zone parameter + to specify which zone or node to use to start the server. + Separate the zone from the host name with a comma. For + example: + $ nova boot --availability_zone=zone:host,node + host is optional for the + --availability_zone parameter. + zone:,node also works.
diff --git a/doc/common/section_nova_cli_boot.xml b/doc/common/section_nova_cli_boot.xml index 44599eb7dd..76a10163aa 100644 --- a/doc/common/section_nova_cli_boot.xml +++ b/doc/common/section_nova_cli_boot.xml @@ -22,7 +22,7 @@
A name for your instance. - + The flavor for your diff --git a/doc/common/section_nova_cli_evacuate.xml b/doc/common/section_nova_cli_evacuate.xml index a951f1f075..b827b85574 100644 --- a/doc/common/section_nova_cli_evacuate.xml +++ b/doc/common/section_nova_cli_evacuate.xml @@ -7,13 +7,13 @@ If a cloud compute node fails due to a hardware malfunction or another reason, you can evacuate instances to make them available again. - You can choose evacuation parameters for your use case. + You can choose evacuation parameters for your use + case. To preserve user data on server disk, you must configure shared storage on the target host. Also, you must validate that the current VM host is down. Otherwise the evacuation fails with an error. - To evacuate your server To find a different host for the evacuated instance, run the following command to lists hosts: @@ -40,9 +40,13 @@ To preserve the user disk data on the evacuated - server, deploy OpenStack Compute with shared - filesystem. To configure your system, see Configure migrations in OpenStack Configuration Reference. In this - example, the password remains unchanged. + server, deploy OpenStack Compute with shared file + system. To configure your system, see Configure migrations in + OpenStack Configuration + Reference. In this example, the + password remains unchanged. $ nova evacuate evacuated_server_name host_b --on-shared-storage
diff --git a/doc/common/section_nova_cli_fileinjection.xml b/doc/common/section_nova_cli_fileinjection.xml index 56e8fa3ef6..5af065f785 100644 --- a/doc/common/section_nova_cli_fileinjection.xml +++ b/doc/common/section_nova_cli_fileinjection.xml @@ -19,6 +19,6 @@ Run the following command: $ nova boot --image ubuntu-cloudimage --flavor 1 --file /root/.ssh/authorized_keys=special_authorized_keysfile - + diff --git a/doc/common/section_nova_cli_images.xml b/doc/common/section_nova_cli_images.xml index 0936943cf4..2df96d2f0c 100644 --- a/doc/common/section_nova_cli_images.xml +++ b/doc/common/section_nova_cli_images.xml @@ -20,7 +20,7 @@ xlink:href="http://docs.openstack.org/trunk/openstack-ops/content/snapsnots.html" >Taking Snapshots in the OpenStack Operations Guide. - + To create the image, list instances to get the diff --git a/doc/common/section_nova_cli_metadata.xml b/doc/common/section_nova_cli_metadata.xml index f03858588c..2cd6f170f1 100644 --- a/doc/common/section_nova_cli_metadata.xml +++ b/doc/common/section_nova_cli_metadata.xml @@ -14,7 +14,7 @@ value. For example, you could add a description and also the creator of the server. $ nova boot --image=natty-image --flavor=2 smallimage2 --meta description='Small test image' --meta creator=joecool - + When viewing the server information, you can see the metadata included on the metadata line: $ nova show smallimage2 @@ -44,5 +44,5 @@ | updated | 2012-05-16T20:48:35Z | | user_id | de3f4e99637743c7b6d27faca4b800a9 | +------------------------+---------------------------------------------------------------+ - + diff --git a/doc/common/section_nova_cli_quotas.xml b/doc/common/section_nova_cli_quotas.xml index b7e3c78417..c1250b3376 100644 --- a/doc/common/section_nova_cli_quotas.xml +++ b/doc/common/section_nova_cli_quotas.xml @@ -34,74 +34,74 @@ cores - + Number of instance cores (VCPUs) allowed per tenant. - + fixed-ips - + Number of fixed IP addresses allowed per tenant. This number must be equal to or greater than the number of allowed instances. - + floating-ips - + Number of floating IP addresses allowed per tenant. - + injected-file-content-bytes - + Number of content bytes allowed per injected file. - + injected-file-path-bytes - + Number of bytes allowed per injected file path. - + injected-files - + Number of injected files allowed per tenant. - + @@ -109,72 +109,72 @@ instances - + Number of instances allowed per tenant. - + key-pairs - + Number of key pairs allowed per user. - + metadata-items - + Number of metadata items allowed per instance. - + ram - + Megabytes of instance ram allowed per tenant. - + security-groups - + Number of security groups per tenant. - + security-group-rules - + Number of rules per security group. - + @@ -188,7 +188,7 @@ List all default quotas for all tenants, as follows: $ nova quota-defaults For example: - + $ nova quota-defaults +-----------------------------+-------+ | Quota | Limit | @@ -209,10 +209,10 @@ Update a default value for a new tenant, as follows: - + $ nova quota-class-update --key value default For example: - + $ nova quota-class-update --instances 15 default @@ -276,7 +276,7 @@ To view a list of options for the quota-update command, run: - + $ nova help quota-update @@ -351,7 +351,7 @@ To view a list of options for the quota-update command, run: - + $ nova help quota-update diff --git a/doc/common/section_nova_cli_resizerebuild.xml b/doc/common/section_nova_cli_resizerebuild.xml index cc441a6b46..d4f8763b19 100644 --- a/doc/common/section_nova_cli_resizerebuild.xml +++ b/doc/common/section_nova_cli_resizerebuild.xml @@ -6,7 +6,6 @@ Change the size of your server You change the size of a server by changing its flavor. - To change the size of your server List the available flavors: $ nova flavor-list @@ -23,7 +22,8 @@ +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+ - Show information about your server, including its size: + Show information about your server, including its + size: $ nova show myCirrosServer +-------------------------------------+----------------------------------------------------------------+ @@ -57,14 +57,15 @@ | config_drive | | +-------------------------------------+----------------------------------------------------------------+ The size of the server is m1.small - (2). + (2). + - To resize the server, pass the server ID and the desired flavor to the nova - resize command. - Include the --poll parameter to report the resize + To resize the server, pass the server ID and the desired + flavor to the nova resize command. Include + the --poll parameter to report the resize progress. $ nova resize myCirrosServer 4 --poll -Instance resizing... 100% complete + Instance resizing... 100% complete Finished @@ -78,12 +79,14 @@ Finished When the resize completes, the status becomes - VERIFY_RESIZE. To confirm the resize: + VERIFY_RESIZE. To confirm the + resize: $ nova resize-confirm 6beefcf7-9de6-48b3-9ba9-e11b343189b3 - The server status becomes ACTIVE. + The server status becomes ACTIVE. + - If the resize fails or does not work as expected, you - can revert the resize: + If the resize fails or does not work as expected, you can + revert the resize: $ nova resize-revert 6beefcf7-9de6-48b3-9ba9-e11b343189b3 The server status becomes ACTIVE. diff --git a/doc/common/section_nova_cli_secgroups.xml b/doc/common/section_nova_cli_secgroups.xml index 7ca9e80347..90009b377d 100644 --- a/doc/common/section_nova_cli_secgroups.xml +++ b/doc/common/section_nova_cli_secgroups.xml @@ -52,7 +52,7 @@ You can add extra rules into the default security group for handling the egress traffic. Rules are ingress only at this time. - + In the following example, the group secure1 is deleted. When you view the security group list, it no longer @@ -65,7 +65,7 @@ +---------+-------------+ | default | default | +---------+-------------+ - +
Modify security group rules @@ -118,7 +118,7 @@ indicates that all ICMP codes and types should be allowed. - + The CIDR notation @@ -141,7 +141,7 @@ +-------------+-----------+---------+-----------+--------------+ | tcp | 80 | 80 | 0.0.0.0/0 | | +-------------+-----------+---------+-----------+--------------+ - + In order to allow any IP address to ping an instance inside the default security group (Code 0, Type 8 for the ECHO @@ -151,7 +151,7 @@ +-------------+-----------+---------+-----------+--------------+ | icmp | 0 | 8 | 0.0.0.0/0 | | +-------------+-----------+---------+-----------+--------------+ - + $ nova secgroup-list-rules default +-------------+-----------+---------+-----------+--------------+ @@ -160,7 +160,7 @@ | tcp | 80 | 80 | 0.0.0.0/0 | | | icmp | 0 | 8 | 0.0.0.0/0 | | +-------------+-----------+---------+-----------+--------------+ - + In order to delete a rule, you need to specify the exact same arguments you used to create it: @@ -179,7 +179,7 @@ <cidr> CIDR for address range. $ nova secgroup-delete-rule default tcp 80 80 0.0.0.0/0 - +
diff --git a/doc/common/section_nova_cli_sshkeys.xml b/doc/common/section_nova_cli_sshkeys.xml index 401a5038e1..1378c69a55 100644 --- a/doc/common/section_nova_cli_sshkeys.xml +++ b/doc/common/section_nova_cli_sshkeys.xml @@ -2,15 +2,15 @@
Add keypair + xml:id="nova_cli_keygen"> + Add keypair Create at least one keypair for each project. If you have generated a keypair with an external tool, you can import it into OpenStack. The keypair can be used for multiple instances that belong to a project. - To add a keypair - Create a key + Create a key. To create a mykey key that you can associate with instances, run the following command: $ nova keypair-add mykey > mykey.pem @@ -19,7 +19,7 @@ the mykey key is associated. - Import a keypair + Alternatively, you can import a keypair. To import an existing public key, mykey.pub, and associate it with the mykey key, run the following diff --git a/doc/common/section_nova_cli_userdata.xml b/doc/common/section_nova_cli_userdata.xml index 8c9ebde3df..e8573833ea 100644 --- a/doc/common/section_nova_cli_userdata.xml +++ b/doc/common/section_nova_cli_userdata.xml @@ -1,24 +1,20 @@ -
- Providing User Data to Instances - User Data is a special key in the metadata - service which holds a file that cloud aware applications within - the guest instance can access. For example the + Provide user data to instances + User data is a special key in the + metadata service that holds a file that cloud-aware applications + in the guest instance can access. For example the cloudinit system is an open source package from Ubuntu - that handles early initialization of a cloud instance that makes - use of this user data. - - This user-data can be put in a file on your local system and - then passed in at instance creation with the flag - --user-data <user-data-file> for - example: - $ nova boot --image ubuntu-cloudimage --flavor 1 --user-data mydata.file - + >cloudinit system is a Ubuntu open + source package that handles early initialization of a cloud + instance and that makes use of user + data. + You can place user data in a local file and pass it through + the --user-data <user-data-file> + parameter at instance creation: + $ nova boot --image ubuntu-cloudimage --flavor 1 --user-data mydata.file
diff --git a/doc/common/section_rpc-for-networking.xml b/doc/common/section_rpc-for-networking.xml index 4c95074422..ba0ff97cfd 100644 --- a/doc/common/section_rpc-for-networking.xml +++ b/doc/common/section_rpc-for-networking.xml @@ -1,113 +1,122 @@
- - Configuration options for the Oslo RPC Messaging System + xmlns:xi="http://www.w3.org/2001/XInclude" + xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" + xml:id="networking-configuring-rpc"> + + Configure the Oslo RPC messaging system - Many OpenStack Networking plug-ins use RPC to enable agents to communicate with the main - neutron-server process. If your plugin requires - agents, they can use the same RPC mechanism used by other OpenStack components like Nova. - OpenStack projects use an open standard for messaging middleware known as AMQP. This messaging - middleware enables the OpenStack services which will exist across multiple servers to talk to - each other. OpenStack Oslo RPC supports three implementations of AMQP: - RabbitMQ, Qpid, and - ZeroMQ - -
- Configuration for RabbitMQ + OpenStack projects use an open standard for messaging + middleware known as AMQP. This messaging middleware enables the + OpenStack services that run on multiple servers to talk to each + other. OpenStack Oslo RPC supports three implementations of AMQP: + RabbitMQ, + Qpid, and + ZeroMQ. - OpenStack Oslo RPC uses RabbitMQ by - default. This section discusses the configuration options that are - relevant when RabbitMQ is used. The - rpc_backend option is not required as long as - RabbitMQ is the default messaging system. - However, if it is included the configuration, it must be set to - neutron.openstack.common.rpc.impl_kombu. +
+ Configure RabbitMQ + + OpenStack Oslo RPC uses RabbitMQ + by default. Use these options to configure the + RabbitMQ message system. The + option is optional as long as + RabbitMQ is the default messaging + system. However, if it is included the configuration, you must + set it to + neutron.openstack.common.rpc.impl_kombu. - + rpc_backend=neutron.openstack.common.rpc.impl_kombu - The following tables describe the rest of the options that - can be used when RabbitMQ is used - as the messaging system. You can configure the messaging - communication for different installation scenarios as well as - tune RabbitMQ's retries and the size of the RPC thread pool. - If you want to monitor notifications through RabbitMQ, you - must set the notification_driver option in - neutron.conf to - neutron.notifier.rabbit_notifier. - - - - - -
- -
- Configuration for Qpid - This section discusses the configuration options that are relevant if - Qpid is used as the messaging system for OpenStack Oslo RPC. - Qpid is not the default messaging system, so it must be enabled - by setting the rpc_backend option in - neutron.conf. + Use these options to configure the + RabbitMQ messaging system. You can + configure messaging communication for different installation + scenarios, tune retries for RabbitMQ, and define the size of the + RPC thread pool. To monitor notifications through RabbitMQ, you + must set the option to + neutron.notifier.rabbit_notifier in the + neutron.conf file: + + +
+
+ Configure Qpid + Use these options to configure the + Qpid messaging system for OpenStack + Oslo RPC. Qpid is not the default + messaging system, so you must enable it by setting the + option in the + neutron.conf file: rpc_backend=neutron.openstack.common.rpc.impl_qpid - This next critical option points the compute nodes to the Qpid - broker (server). Set qpid_hostname in neutron.conf to - be the hostname where the broker is running. + This critical option points the compute nodes to the + Qpid broker (server). Set the + option to the host name where + the broker runs in the neutron.conf + file. - The --qpid_hostname option accepts a value in the form of either a - hostname or an IP address. + The option accepts a host + name or IP address value. qpid_hostname=hostname.example.com - If the Qpid broker is listening on a port other than the AMQP - default of 5672, you will need to set the qpid_port - option: + + If the Qpid broker listens on a + port other than the AMQP default of 5672, you + must set the option to that + value: + qpid_port=12345 - If you configure the Qpid broker to require authentication, you - will need to add a username and password to the configuration: + + If you configure the Qpid broker + to require authentication, you must add a user name and password + to the configuration: + qpid_username=username qpid_password=password - By default, TCP is used as the transport. If you would like to enable SSL, set the - qpid_protocol option: + + By default, TCP is used as the transport. To enable SSL, set + the option: + qpid_protocol=ssl - The following table lists the rest of the options used by the Qpid messaging driver for - OpenStack Oslo RPC. It is not common that these options are used. + + Use these additional options to configure the Qpid messaging + driver for OpenStack Oslo RPC. These options are used + infrequently. + +
-
- Configuration for ZeroMQ - This section discusses the configuration options that are relevant - if ZeroMQ is used as the messaging system for - OpenStack Oslo RPC. ZeroMQ is not the default - messaging system, so it must be enabled by setting the - rpc_backend option in - neutron.conf. - -
-
- Common configuration for messaging +
+ Configure ZeroMQ + Use these options to configure the + ZeroMQ messaging system for + OpenStack Oslo RPC. ZeroMQ is not the + default messaging system, so you must enable it by setting the + option in the + neutron.conf file: + +
+
+ Configure messaging - This section lists options that are common between the - RabbitMQ, Qpid - and ZeroMq - messaging drivers. + Use these common options to configure the + RabbitMQ, + Qpid, and + ZeroMq messaging drivers: - - -
+ + +
- diff --git a/doc/common/section_rpc.xml b/doc/common/section_rpc.xml index 4336de38cf..0aeb117de5 100644 --- a/doc/common/section_rpc.xml +++ b/doc/common/section_rpc.xml @@ -1,119 +1,91 @@
- - Configuring the Oslo RPC Messaging System - - OpenStack projects use an open standard for messaging middleware - known as AMQP. This messaging middleware enables the OpenStack - services which will exist across multiple servers to talk to each other. - OpenStack Oslo RPC supports three implementations of AMQP: - RabbitMQ, - Qpid, and - ZeroMQ. - -
- Configuration for RabbitMQ - - OpenStack Oslo RPC uses RabbitMQ by - default. This section discusses the configuration options that are - relevant when RabbitMQ is used. The - rpc_backend option is not required as long as - RabbitMQ is the default messaging system. - However, if it is included the configuration, it must be set to - nova.openstack.common.rpc.impl_kombu. - - - rpc_backend=nova.openstack.common.rpc.impl_kombu - - The following tables describe the rest of the options that - can be used when RabbitMQ is used - as the messaging system. You can configure the messaging - communication for different installation scenarios as well as - tune RabbitMQ's retries and the size of the RPC thread pool. - If you want to monitor notifications through RabbitMQ, you - must set the notification_driver option in - nova.conf to - nova.notifier.rabbit_notifier. The default - for sending usage data is 60 seconds plus a randomized 0-60 seconds. - - - - -
- -
- Configuration for Qpid - - This section discusses the configuration options that are relevant - if Qpid is used as the messaging system for - OpenStack Oslo RPC. Qpid is not the default - messaging system, so it must be enabled by setting the - rpc_backend option in - nova.conf. - - rpc_backend=nova.openstack.common.rpc.impl_qpid - - This next critical option points the compute nodes to the - Qpid broker (server). Set - qpid_hostname in nova.conf to - be the hostname where the broker is running. - - - The --qpid_hostname option accepts a value in - the form of either a hostname or an IP address. - - - qpid_hostname=hostname.example.com - - If the Qpid broker is listening on a - port other than the AMQP default of 5672, you will - need to set the qpid_port option: - - qpid_port=12345 - - If you configure the Qpid broker to - require authentication, you will need to add a username and password to - the configuration: - - qpid_username=username + xmlns:xi="http://www.w3.org/2001/XInclude" + xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" + xml:id="configuring-rpc"> + + Configure the Oslo RPC messaging system + OpenStack projects use AMQP, an open standard for messaging + middleware. OpenStack services that run on multiple servers to + talk to each other. OpenStack Oslo RPC supports three + implementations of AMQP: RabbitMQ, + Qpid, and + ZeroMQ. +
+ Configure RabbitMQ + OpenStack Oslo RPC uses RabbitMQ + by default. Use these options to configure the + RabbitMQ message system. The + rpc_backend option is not required as long + as RabbitMQ is the default messaging + system. However, if it is included the configuration, you must + set it to + nova.openstack.common.rpc.impl_kombu. + rpc_backend=nova.openstack.common.rpc.impl_kombu + You can use these additional options to configure the + RabbitMQ messaging system. You can + configure messaging communication for different installation + scenarios, tune retries for RabbitMQ, and define the size of the + RPC thread pool. To monitor notifications through RabbitMQ, you + must set the option to + nova.notifier.rabbit_notifier in the + nova.conf file. The default for sending + usage data is sixty seconds plus a random number of seconds from + zero to sixty. + + +
+
+ Configure Qpid + Use these options to configure the + Qpid messaging system for OpenStack + Oslo RPC. Qpid is not the default + messaging system, so you must enable it by setting the + option in the + nova.conf file. + rpc_backend=nova.openstack.common.rpc.impl_qpid + This critical option points the compute nodes to the + Qpid broker (server). Set + to the host name where the + broker runs in the nova.conf file. + + The option accepts a host + name or IP address value. + + qpid_hostname=hostname.example.com + If the Qpid broker listens on a + port other than the AMQP default of 5672, you + must set the option to that + value: + qpid_port=12345 + If you configure the Qpid broker + to require authentication, you must add a user name and password + to the configuration: + qpid_username=username qpid_password=password - - By default, TCP is used as the transport. If you would like to - enable SSL, set the qpid_protocol option: - - qpid_protocol=ssl - - The following table lists the rest of the options used by the Qpid - messaging driver for OpenStack Oslo RPC. It is not common that these - options are used. - - - -
-
- Configuration Options for ZeroMQ - This section discusses the configuration options that are relevant - if ZeroMQ is used as the messaging system for - OpenStack Oslo RPC. ZeroMQ is not the default - messaging system, so it must be enabled by setting the - rpc_backend option in - nova.conf. - - - -
- -
- Common Configuration for Messaging - - This section lists options that are common between both the - RabbitMQ and Qpid - messaging drivers. - - -
+ By default, TCP is used as the transport. To enable SSL, set + the option: + qpid_protocol=ssl + This table lists additional options that you use to + configure the Qpid messaging driver for OpenStack Oslo RPC. + These options are used infrequently. + +
+
+ Configure ZeroMQ + Use these options to configure the + ZeroMQ messaging system for + OpenStack Oslo RPC. ZeroMQ is not the + default messaging system, so you must enable it by setting the + option in the + nova.conf file. + +
+
+ Configure messaging + Use these options to configure the + RabbitMQ and + Qpid messaging drivers. + +
- diff --git a/doc/common/section_storage-concepts.xml b/doc/common/section_storage-concepts.xml index f56e162e67..37235c4369 100644 --- a/doc/common/section_storage-concepts.xml +++ b/doc/common/section_storage-concepts.xml @@ -79,5 +79,5 @@ used independently of the Compute (nova) product. - +
diff --git a/doc/common/section_support-compute.xml b/doc/common/section_support-compute.xml index 786f0d0743..6d8e4263f3 100644 --- a/doc/common/section_support-compute.xml +++ b/doc/common/section_support-compute.xml @@ -1,143 +1,180 @@
-Troubleshooting Compute - Common problems for Compute typically involve misconfigured networking or credentials that are not sourced properly in the environment. Also, most flat networking configurations do not enable ping or ssh from a compute node to the instances running on that node. Another common problem is trying to run 32-bit images on a 64-bit compute node. This section offers more information about how to troubleshoot Compute. -
Log files for Compute - + xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" + xml:id="section_compute-troubleshooting"> + Troubleshoot Compute + Common problems for Compute typically involve misconfigured + networking or credentials that are not sourced properly in the + environment. Also, most flat networking configurations do not + enable ping or ssh from + a compute node to the instances that run on that node. Another + common problem is trying to run 32-bit images on a 64-bit + compute node. This section shows you how to troubleshoot + Compute. +
+ Compute log files Compute stores a log file for each service in - /var/log/nova. For example, - nova-compute.log is the log for the - nova-compute - service. You can set the following options to format log - strings for the nova.log module in - nova.conf: + /var/log/nova. For example, + nova-compute.log is the log for + the nova-compute + service. You can set the following options to format log + strings for the nova.log module in the + nova.conf file: - - logging_context_format_string - - - logging_default_format_string - + + logging_context_format_string + + + logging_default_format_string + - If the log level is set to debug, you can - also specify logging_debug_format_suffix - to append extra formatting. For information about what - variables are available for the formatter see: - http://docs.python.org/library/logging.html#formatter. - - You have two options for logging for OpenStack Compute based on configuration - settings. In nova.conf, include the - logfile option to enable logging. Alternatively - you can set use_syslog=1, and then the nova - daemon logs to syslog. + If the log level is set to debug, you + can also specify + logging_debug_format_suffix to + append extra formatting. For information about what + variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter. + You have two options for logging for OpenStack Compute + based on configuration settings. In + nova.conf, include the + logfile option to enable logging. + Alternatively you can set use_syslog=1 + so that the nova daemon logs to syslog.
-
Common errors and fixes for Compute - The ask.openstack.org site offers a place to ask and - answer questions, and you can also mark questions as + The ask.openstack.org site offers a place to ask + and answer questions, and you can also mark questions as frequently asked questions. This section describes some - errors people have posted previously. We - are constantly fixing bugs, so online resources are a - great way to get the most up-to-date errors and - fixes. + errors people have posted previously. Bugs are constantly + being fixed, so online resources are a great way to get + the most up-to-date errors and fixes.
- Credential errors, 401, 403 forbidden errors - A 403 forbidden error is caused by missing credentials. - Through current installation methods, there are basically - two ways to get the novarc file. The manual method - requires getting it from within a project zipfile, and the - scripted method just generates novarc out of the project - zip file and sources it for you. If you use the manual - method through a zip file, before sourcing novarc - be sure to save any credentials that were created previously, as they - can be overridden. - - When you run nova-api the - first time, it generates the certificate authority information, - including openssl.cnf. If the CA components are - started prior to this, you may not be able to create your zip file. - Restart the services, then once your CA information is available, - you should be able to create your zip file. - You may also need to check your http proxy settings to see if - they are causing problems with the novarc - creation. + Credential errors, 401, and 403 forbidden + errors + Missing credentials cause a + 403 + forbidden error. To resolve + this issue, use one of these methods: + + Manual + method. Get get the + novarc file from + the project ZIP file, save existing + credentials in case of override. and + manually source the + novarc + file. + + + Script + method. Generates + novarc from the + project ZIP file and sources it for + you. + + + When you run nova-api the first time, it + generates the certificate authority information, + including openssl.cnf. If you + start the CA services before this, you might not be + able to create your ZIP file. Restart the services. + When your CA information is available, create your ZIP + file. + Also, check your HTTP proxy settings to see whether + they cause problems with novarc + creation.
Instance errors - Sometimes a particular instance shows "pending" or you - cannot SSH to it. Sometimes the image itself is the - problem. For example, when using flat manager networking, - you do not have a dhcp server, and certain images - don't support interface injection so you cannot connect - to them. The fix for this type of problem is to use an - image that does support this method, such as Ubuntu, - which should obtain an IP address correctly - with FlatManager network settings. To troubleshoot other - possible problems with an instance, such as one that stays - in a spawning state, first check the directory for the particular - instance under /var/lib/nova/instances - on the nova-compute - host and make sure it has the following files: - - - libvirt.xml - - - disk - - - disk-raw - - - kernel - - - ramdisk - - - console.log (Once the instance actually starts you should - see a console.log.) - - - Check the file sizes to see if they are reasonable. If - any are missing/zero/very small then nova-compute has - somehow not completed download of the images from - the Image service. - Also check nova-compute.log for exceptions. - Sometimes they don't show up in the console output. - Next, check the log file for the instance in the directory - /var/log/libvirt/qemu - to see if it exists and has any useful error messages - in it. - Finally, from the directory for the instance under - /var/lib/nova/instances, try - # virsh create libvirt.xml and see if you - get an error when running this. + Sometimes a particular instance shows + pending or you cannot SSH to + it. Sometimes the image itself is the problem. For + example, when you use flat manager networking, you do + not have a DHCP server and certain images do not + support interface injection; you cannot connect to + them. The fix for this problem is to use an image that + does support this method, such as Ubuntu, which + obtains an IP address correctly with FlatManager + network settings. + To troubleshoot other possible problems with an + instance, such as an instance that stays in a spawning + state, check the directory for the particular instance + under /var/lib/nova/instances on + the nova-compute host and make sure that + these files are present: + + + libvirt.xml + + + disk + + + disk-raw + + + kernel + + + ramdisk + + + After the instance starts, + console.log + + + If any files are missing, empty, or very small, the + nova-compute service did not + successfully download the images from the Image + Service. + Also check nova-compute.log for + exceptions. Sometimes they do not appear in the + console output. + Next, check the log file for the instance in the + /var/log/libvirt/qemu + directory to see if it exists and has any useful error + messages in it. + Finally, from the + /var/lib/nova/instances + directory for the instance, see if this command + returns an error: + # virsh create libvirt.xml
-
- Manually reset the state of an instance - If an instance gets stuck in an intermediate state (e.g., "deleting"), you can - manually reset the state of an instance using the nova - reset-state command. This will reset it to an error state, which you - can then delete. For - example:$ nova reset-state c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 -$ nova delete c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 - You can also use the --active to - force the instance back into an active state instead of an - error state, for - example:$ nova reset-state --active c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 -
-
- Problems with injection - If you are diagnosing problems with instances not booting, - or booting slowly, consider investigating file injection as a - cause. Setting libvirt_inject_partition - to -2 disables injection in libvirt. This can be required if you want to make user - specified files available from the metadata server (and config drive is not enabled), - for performance reasons, and also to avoid boot failure if injection itself fails. -
-
+
+ Reset the state of an instance + If an instance remains in an intermediate state, such as + deleting, you can use the + nova reset-state command to + manually reset the state of an instance to an error state. + You can then delete the instance. For example: + $ nova reset-state c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 +$ nova delete c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 + You can also use the --active + parameter to force the instance back to an active state + instead of an error state. For example: + $ nova reset-state --active c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 +
+
+ Injection problems + If instances do not boot or boot slowly, investigate + file injection as a cause. + To disable injection in libvirt, set + to + -2. + + If you have not enabled the configuration drive and + you want to make user-specified files available from + the metadata server for to improve performance and + avoid boot failure if injection fails, you must + disable injection. + +
+
diff --git a/doc/common/section_support-object-storage.xml b/doc/common/section_support-object-storage.xml index a83d7c196b..a3291c7598 100644 --- a/doc/common/section_support-object-storage.xml +++ b/doc/common/section_support-object-storage.xml @@ -1,53 +1,93 @@ -Troubleshooting OpenStack Object Storage - For OpenStack Object Storage, everything is logged in /var/log/syslog (or messages on some distros). Several settings enable further customization of logging, such as log_name, log_facility, and log_level, within the object server configuration files. -
- Handling Drive Failure - In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for OpenStack Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up. - If the drive can’t be replaced immediately, then it is best to leave it unmounted, and remove the drive from the ring. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, it can be re-added to the ring. - Rackspace has seen hints at drive failures by looking at error messages in /var/log/kern.log - - do consider checking this in your monitoring -
- -
- - Handling Server Failure - - If a server is having hardware issues, it is a good idea to make sure the OpenStack Object Storage services are not running. This will allow OpenStack Object Storage to work around the failure while you troubleshoot. - - If the server just needs a reboot, or a small amount of work that should only last a couple of hours, then it is probably best to let OpenStack Object Storage work around the failure and get the machine fixed and back online. When the machine comes back online, replication will make sure that anything that is missing during the downtime will get updated. - - If the server has more serious issues, then it is probably best to remove all of the server’s devices from the ring. Once the server has been repaired and is back online, the server’s devices can be added back into the ring. It is important that the devices are reformatted before putting them back into the ring as it is likely to be responsible for a different set of partitions than before. -
-
-Detecting Failed Drives - - It has been our experience that when a drive is about to fail, error messages will spew into /var/log/kern.log. There is a script called swift-drive-audit that can be run via cron to watch for bad drives. If errors are detected, it will unmount the bad drive, so that OpenStack Object Storage can work around it. The script takes a configuration file with the following settings: - - - This script has only been tested on Ubuntu 10.04, so if you are using a different distro or OS, some care should be taken before using in production. -
- -
- Emergency Recovery of Ring Builder Files - You should always keep a backup of Swift ring builder files. - However, if an emergency occurs, this procedure may assist in returning - your cluster to an operational state. - Using existing Swift tools, there is no way to recover a builder - file from a ring.gz file. However, if you have a knowledge of Python, - it is possible to construct a builder file that is pretty close to - the one you have lost. The following is what you will need to do. - Warning - This procedure is a last-resort for emergency circumstances - it - requires knowledge of the swift python code and may not succeed. - First, load the ring and a new ringbuilder object in a Python REPL: - >>> from swift.common.ring import RingData, RingBuilder + xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" + xml:id="troubleshooting-openstack-object-storage"> + Troubleshoot Object Storage + For OpenStack Object Storage, everything is logged in + /var/log/syslog (or messages on some + distros). Several settings enable further customization of + logging, such as , + , and + , within the object server + configuration files. +
+ Recover drive failures + If a drive fails, make sure the + drive is unmounted to make it easier for Object + Storage to work around the failure while you resolve + it. If you plan to replace the drive immediately, replace + the drive, format it, remount it, and let replication fill + it. + If you cannot replace the drive immediately, leave it + unmounted and remove the drive from the ring. This enables + you to replicate all the replicas on that drive elsewhere + until you can replace the drive. After you replace the + drive, you can add it to the ring again. + + Rackspace has seen hints at drive failures by + looking at error messages in + /var/log/kern.log. Check this + file in your monitoring. + +
+
+ Recover server failures + If a server has hardware issues, make sure that the + Object Storage services are not running. This enables + Object Storage to work around the failure while you + troubleshoot. + If the server needs a reboot or a minimal amount of + work, let Object Storage work around the failure while you + fix the machine and get it back online. When the machine + comes back online, replication updates anything that was + missing during the downtime. + If the server has more serious issues,remove all server + devices from the ring. After you repair and put the server + online, you can add the devices for the server back to the + ring. You must reformat the devices before you add them to + the ring because they might be responsible for a different + set of partitions than before. +
+
+ Detect failed drives + When a drive is about to fail, many error messages + appear in the /var/log/kern.log file. + You can run the swift-drive-audit + script through cron to watch for bad + drives. If errors are detected, it unmounts the bad drive + so that Object Storage can work around it. The script uses + a configuration file with these settings: + + This script has been tested on only Ubuntu 10.04. If you + use a different distribution or operating system, take + care before using the script in production. +
+
+ Recover ring builder files (emergency) + You should always keep a backup of Swift ring builder + files. However, if an emergency occurs, use this procedure + to return your cluster to an operational state. + Existing Swift tools do not enable you to recover a + builder file from a ring.gz file. + However, if you have Python knowledge, you can construct a + builder file similar to the one you have lost. + + This procedure is a last-resort in an emergency. It + requires knowledge of the swift Python code and might + not succeed. + + + + Load the ring and a new ringbuilder object in a + Python REPL: + >>> from swift.common.ring import RingData, RingBuilder >>> ring = RingData.load('/path/to/account.ring.gz') - Now, start copying the data we have in the ring into the builder. - >>> import math + + + Copy the data in the ring into the + builder. + >>> import math >>> partitions = len(ring._replica2part2dev_id[0]) >>> replicas = len(ring._replica2part2dev_id) @@ -62,26 +102,43 @@ >>> for p2d in builder._replica2part2dev: for dev_id in p2d: builder.devs[dev_id]['parts'] += 1 - This is the extent of the recoverable fields. For - min_part_hours you'll either have to remember - what the value you used was, or just make up a new one. - >>> builder.change_min_part_hours(24) # or whatever you want it to be - Try some validation: if this doesn't raise an exception, you may - feel some hope. Not too much, though. - >>> builder.validate() - Save the builder. - >>> import pickle + This is the extent of the recoverable + fields. + + + For , you must + remember the value that you used previously or + create a new value. + >>> builder.change_min_part_hours(24) # or whatever you want it to be + If validation succeeds without raising an + exception, you have succeeded. + >>> builder.validate() + + + Save the builder. + >>> import pickle >>> pickle.dump(builder.to_dict(), open('account.builder', 'wb'), protocol=2) - You should now have a file called 'account.builder' in the current - working directory. - Next, run swift-ring-builder account.builder write_ring - and compare the new account.ring.gz to the account.ring.gz that you started - from. They probably won't be byte-for-byte identical, but if you load them - up in a REPL and their _replica2part2dev_id and - devs attributes are the same (or nearly so), then you're - in good shape. - Next, repeat the procedure for container.ring.gz - and object.ring.gz, and you might get usable builder - files. -
+ The account.builder file + appears in the current working directory. + + + Run swift-ring-builder account.builder + write_ring. + Compare the new + account.ring.gz to the + original account.ring.gz + file. They might not be byte-for-byte identical, + but if you load them in REPL and their + and + attributes are the same + (or nearly so), you have succeeded. + + + Repeat this procedure for the + container.ring.gz and + object.ring.gz files, and + you might get usable builder files. + + +
diff --git a/doc/common/section_tenant-specific-image-storage.xml b/doc/common/section_tenant-specific-image-storage.xml index 82f53d728e..4da2d80feb 100644 --- a/doc/common/section_tenant-specific-image-storage.xml +++ b/doc/common/section_tenant-specific-image-storage.xml @@ -3,33 +3,50 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="configuring-tenant-specific-storage-for-images"> - Configuring Tenant-specific Storage Locations for Images - with Object Storage - For some deployers, storing all images in a single place for - all tenants and users to access is not ideal. To enable access - control to specific images for cloud users, you can configure - the Image service with the ability to store image data in the - image owner-specific locations. - The relevant configuration options in the - glance-api.conf file are: - - - swift_store_multi_tenant: set to - True to enable tenant-specific storage locations (Default - value is False). - - - swift_store_admin_tenants: Specify a list of tenants - by ID to which to grant read and write access to all Object Storage - containers created by the Image service. - - - Assuming you configured 'swift' as your default_store in - glance-api.conf and you enable this - feature as described above, images will be stored in an Object - Storage service (swift) endpoint pulled from the authenticated - user's service_catalog. The created image data will only be - accessible through the Image service by the tenant that owns - it and any tenants defined in swift_store_admin_tenants that - are identified as having admin-level accounts. + Configure tenant-specific image locations with Object + Storage + For some deployers, it is not ideal to store all images in + one place to enable all tenants and users to access them. You + can configure the Image Service to store image data in + tenant-specific image locations. Then, only the following + tenants can use the Image Service to access the created image: + + The tenant who owns the image + + + Tenants that are defined in + and + that have admin-level accounts + + + + To configure tenant-specific image locations + + Configure swift as your + in the + glance-api.conf file. + + + Set these configuration options in the + glance-api.conf file: + + . + Set to True to enable + tenant-specific storage locations. Default + is False. + + + . + Specify a list of tenant IDs that can + grant read and write access to all Object + Storage containers that are created by the + Image Service. + + + + + With this configuration, images are stored in an + Object Storage service (swift) endpoint that is pulled + from the service catalog for the authenticated + user.
diff --git a/doc/common/section_trusted-compute-pools.xml b/doc/common/section_trusted-compute-pools.xml index 081b0f3f33..e971556c74 100644 --- a/doc/common/section_trusted-compute-pools.xml +++ b/doc/common/section_trusted-compute-pools.xml @@ -1,98 +1,120 @@ -
- Trusted Compute Pools - - Overview - Trusted compute pools enable administrators to designate a group of compute hosts as - "trusted". These hosts use hardware-based security features, such as Intel's Trusted - Execution Technology (TXT), to provide an additional level of security. Combined with an - external standalone web-based remote attestation server, cloud providers can ensure that - the compute node is running software with verified measurements, thus they can establish - the foundation for the secure cloud stack. Through the Trusted Computing Pools, cloud - subscribers can request services to be run on verified compute nodes. - The remote attestation server performs node verification through the following steps: - - Compute nodes boot with Intel TXT technology enabled. - - - The compute node's BIOS, hypervisor and OS are measured. - - - These measured data is sent to the attestation server when challenged by - attestation server. - - - The attestation server verifies those measurements against good/known - database to determine nodes' trustworthiness. - - - A description of how to set up an attestation service is beyond the scope of this - document. See the Open Attestation project for an open source project that can be used to - implement an attestation service. - - - - - - - - - - - - - Configuring the Compute service to use Trusted Compute Pools -The Compute service must be configured to with the connection information for the attestation - service. The connection information is specified in the - trusted_computing section of nova.conf. Specify the following - parameters in this section. - - server - - Hostname or IP address of the host that runs the attestation - service - - - - port - - HTTPS port for the attestation service - - - - server_ca_file - - Certificate file used to verify the attestation server's - identity. - - - - api_url - - The attestation service URL path. - - - - auth_blob - - An authentication blob, which is required by the attestation - service. - - - Add the following lines to /etc/nova/nova.conf in - the DEFAULT and trusted_computing sections to - enable scheduling support for Trusted Compute Pools, and edit the details of the - trusted_computing section based on the details of your - attestation - service.[DEFAULT] + Trusted compute pools + Trusted compute pools enable administrators to designate a + group of compute hosts as trusted. These hosts use hardware-based + security features, such as the Intel Trusted Execution + Technology (TXT), to provide an additional level of security. + Combined with an external stand-alone web-based remote + attestation server, cloud providers can ensure that the + compute node runs only software with verified measurements and + can ensure a secure cloud stack. + Through the trusted compute pools, cloud subscribers can + request services to run on verified compute nodes. + The remote attestation server performs node verification as + follows: + + + Compute nodes boot with Intel TXT technology + enabled. + + + The compute node BIOS, hypervisor, and OS are + measured. + + + Measured data is sent to the attestation server when + challenged by attestation server. + + + The attestation server verifies those measurements + against a good and known database to determine nodes' + trustworthiness. + + + A description of how to set up an attestation service is + beyond the scope of this document. For an open source project + that you can use to implement an attestation service, see the + Open Attestation project. + + + + + + + + +
+ Configure Compute to use trusted compute pools + + + Configure the Compute service with the + connection information for the attestation + service. + Specify these connection options in the + trusted_computing section + in the nova.conf + configuration file: + + + server + + Host name or IP address of the host + that runs the attestation + service + + + + port + + HTTPS port for the attestation + service + + + + server_ca_file + + Certificate file used to verify the + attestation server's identity. + + + + api_url + + The attestation service URL + path. + + + + auth_blob + + An authentication blob, which is + required by the attestation + service. + + + + + + To enable scheduling support for trusted compute + pools, add the following lines to the + DEFAULT and + trusted_computing sections + in the /etc/nova/nova.conf + file. Edit the details in the + trusted_computing section + based on the details of your attestation + service: + [DEFAULT] compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler scheduler_available_filters=nova.scheduler.filters.all_filters scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter,TrustedFilter @@ -105,30 +127,44 @@ server_ca_file=/etc/nova/ssl.10.1.71.206.crt api_url=/AttestationService/resources # If using OAT pre-v1.5, use this api_url: #api_url=/OpenAttestationWebServices/V1.0 -auth_blob=i-am-openstack - Restart the nova-compute and nova-scheduler services after making these changes. - - - +auth_blob=i-am-openstack + + + Restart the nova-compute and nova-scheduler + services. + + +
+ Configuration reference + +
+
+
Specify trusted flavors - One or more flavors must be configured as "trusted". Users can then request trusted - nodes by specifying one of these trusted flavors when booting a new instance. Use the - nova flavor-key set command to set a flavor as - trusted. For example, to set the m1.tiny flavor as trusted: - - # nova flavor-key m1.tiny set trust:trusted_host trusted - - A user can request that their instance runs on a trusted host by specifying a trusted - flavor when invoking the nova boot command. - - - - - - - - - - - + You must configure one or more flavors as + trusted. Users can request + trusted nodes by specifying a trusted flavor when they + boot an instance. + Use the nova flavor-key set command + to set a flavor as trusted. For example, to set the + m1.tiny flavor as trusted: + # nova flavor-key m1.tiny set trust:trusted_host trusted + To request that their instances run on a trusted host, + users can specify a trusted flavor on the nova + boot command: + + + + + + + + +
diff --git a/doc/common/section_user-data.xml b/doc/common/section_user-data.xml index 20ed533002..d952c37da0 100644 --- a/doc/common/section_user-data.xml +++ b/doc/common/section_user-data.xml @@ -137,7 +137,7 @@ adduser --disabled-password --gecos "" clouduser hostname: mynode fqdn: mynode.example.com manage_etc_hosts: true - + Example: Configure instances with Puppet diff --git a/doc/common/section_using-vnc-console.xml b/doc/common/section_using-vnc-console.xml index 8f24ced813..8f3c43b817 100644 --- a/doc/common/section_using-vnc-console.xml +++ b/doc/common/section_using-vnc-console.xml @@ -3,74 +3,70 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="using-vnc-console"> -Using VNC Console -There are several methods to interact with the VNC console, - using a VNC client directly, a special java client, or through the - web browser. For information about configuring the console, - see . - - -
- Get an access URL - Nova enables you to create access_urls through the - os-consoles extension. Support for accessing this URL is - provided by the nova client: - $ nova get-vnc-console [server_id] [novnc|xvpvnc] - Specify 'novnc' to get a URL suitable - for pasting into a web browser. - Specify 'xvpvnc' for a URL suitable for - pasting into the Java client. - To request a web browser URL: - $ nova get-vnc-console [server_id] novnc -
- -
- - Access VNC consoles with a Java client - - To enable support for the OpenStack Java VNC client in - compute, run the nova-xvpvncproxy service. - - - xvpvncproxy_port=[port] - - port to bind (defaults to 6081) - - - xvpvncproxy_host=[host] - - host to bind (defaults to 0.0.0.0) - - - As a client, you need a special Java client, which is a - slightly modified version of TightVNC that supports our token - auth: - $ git clone https://github.com/cloudbuilders/nova-xvpvncviewer + Use the VNC console + To interact through the VNC console, you can use a VNC client + directly, a special Java client, or a web browser. For information + about how to configure the console, see . +
+ Get an access URL + Nova enables you to create access_urls through the + os-consoles extension. Support for accessing this URL is + provided by the nova client: + $ nova get-vnc-console [server_id] [novnc|xvpvnc] + Specify 'novnc' to get a URL suitable for + pasting into a web browser. + Specify 'xvpvnc' for a URL suitable for + pasting into the Java client. + To request a web browser URL: + $ nova get-vnc-console [server_id] novnc +
+
+ Access a VNC console with a Java client + To enable support for the OpenStack Java VNC client in + compute, run the nova-xvpvncproxy + service. + + + xvpvncproxy_port=[port] + - port to bind (defaults to 6081) + + + xvpvncproxy_host=[host] + - host to bind (defaults to 0.0.0.0) + + + As a client, you need a special Java client, which is a + slightly modified version of TightVNC that supports our token + auth: + $ git clone https://github.com/cloudbuilders/nova-xvpvncviewer $ cd nova-xvpvncviewer/viewer $ make - To create a session, request an access URL by using - python-novaclient. Then, run the client - as follows. - To get an access URL: - $ nova get-vnc-console [server_id] xvpvnc - To run the client: - $ java -jar VncViewer.jar [access_url] -
+ To create a session, request an access URL by using + python-novaclient. Then, run the client as + follows. + To get an access URL: + $ nova get-vnc-console [server_id] xvpvnc + To run the client: + $ java -jar VncViewer.jar [access_url] +
-
- - Access a VNC console through a web browser - - Retrieving an access_url for a web browser is similar to - the flow for the Java client. - To get the access URL, run the following command: +
+ + Access a VNC console with a web browser + + Retrieving an access_url for a web browser is similar to the + flow for the Java client. + To get the access URL, run the following command: - $ nova get-vnc-console [server_id] novnc - Paste the URL into your web browser. + $ nova get-vnc-console [server_id] novnc + Paste the URL into your web browser. - Additionally, you can use the OpenStack dashboard, known - as horizon, to access browser-based VNC consoles for - instances. -
+ Additionally, you can use the OpenStack dashboard, known as + horizon, to access browser-based VNC consoles for + instances. +
diff --git a/doc/common/section_xapi-ami-setup.xml b/doc/common/section_xapi-ami-setup.xml index c95da1f9e8..49efff49d2 100644 --- a/doc/common/section_xapi-ami-setup.xml +++ b/doc/common/section_xapi-ami-setup.xml @@ -3,26 +3,21 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="xapi-ami-setup"> - -Prepare for AMI Type Images - In order to support AMI type images within your OpenStack - installation, a directory /boot/guest needs to be - created inside Dom0. The OpenStack VM will put the kernel and ramdisk - extracted from the AKI and ARI images to this location. - - This directory's content will be maintained by OpenStack, and its - size should not increase during normal operation. However, in case of power - failures or accidental shutdowns, some files might be left over. In order - to prevent these files from filling up Dom0's disk, it is recommended to set up - this directory as a symlink pointing to a subdirectory of the local SR. - - - Execute the following commands in Dom0 to achieve the above mentioned - setup: -# LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal) + + Prepare for AMI type images + To support AMI type images in your OpenStack installation, + you must create a /boot/guest directory + inside Dom0. The OpenStack VM extracts the kernel and ramdisk + from the AKI and ARI images puts them in this location. + OpenStack maintains the contents of this directory and its + size should not increase during normal operation. However, in + case of power failures or accidental shutdowns, some files + might be left over. To prevent these files from filling the + Dom0 disk, set up this directory as a symlink that points to a + subdirectory of the local SR. + Run these commands in Dom0 to achieve this setup: + # LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal) # LOCALPATH="/var/run/sr-mount/$LOCAL_SR/os-guest-kernels" # mkdir -p "$LOCALPATH" -# ln -s "$LOCALPATH" /boot/guest - - +# ln -s "$LOCALPATH" /boot/guest diff --git a/doc/common/section_xapi-install-plugins.xml b/doc/common/section_xapi-install-plugins.xml index 80b8508fbf..6dcd8e7750 100644 --- a/doc/common/section_xapi-install-plugins.xml +++ b/doc/common/section_xapi-install-plugins.xml @@ -3,83 +3,91 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="xapi-install-plugins"> - -Installing the XenAPI Plugins - When using Xen as the hypervisor for OpenStack Compute, you - can install a Python script (usually, but it can be any - executable) on the host side, and then call that through the - XenAPI. These scripts are called plugins. The XenAPI plugins - live in the nova code repository. These plugins have to be - copied to the hypervisor's Dom0, to the appropriate directory, - where xapi can find them. There are several options for the - installation. The important thing is to ensure that the - version of the plugins are in line with the nova installation - by only installing plugins from a matching nova - repository. -
Manual Installation - To manually install - Create temporary files/directories: + + Install the XenAPI plug-ins + When you use Xen as the hypervisor for OpenStack Compute, + you can install a Python script (or any executable) on the + host side, and call that through the XenAPI. These scripts are + called plug-ins. The XenAPI plug-ins live in the nova code + repository. These plug-ins have to be copied to the Dom0 for + the hypervisor, to the appropriate directory, where xapi can + find them. There are several options for the installation. The + important thing is to ensure that the version of the plug-ins + are in line with the nova installation by only installing + plug-ins from a matching nova repository. +
+ Manually install the plug-in + + + Create temporary files/directories: $ NOVA_ZIPBALL=$(mktemp) -$ NOVA_SOURCES=$(mktemp -d) - Get the source from github. The example assumes the master - branch is used. Amend the URL to match the version - being used: - $ wget -qO "$NOVA_ZIPBALL" https://github.com/openstack/nova/archive/master.zip -$ unzip "$NOVA_ZIPBALL" -d "$NOVA_SOURCES"(Alternatively) - Should you wish to use the official Ubuntu +$ NOVA_SOURCES=$(mktemp -d) + + + Get the source from github. The example assumes + the master branch is used. Amend the URL to match + the version being used: + $ wget -qO "$NOVA_ZIPBALL" https://github.com/openstack/nova/archive/master.zip +$ unzip "$NOVA_ZIPBALL" -d "$NOVA_SOURCES" + (Alternatively) To use the official Ubuntu packages, use the following commands to get the - nova code base: - $ ( cd $NOVA_SOURCES && apt-get source python-nova --download-only ) -$ ( cd $NOVA_SOURCES && for ARCHIVE in *.tar.gz; do tar -xzf $ARCHIVE; done ) - Copy the plugins to the hypervisor: + nova code base: + $ ( cd $NOVA_SOURCES && apt-get source python-nova --download-only ) +$ ( cd $NOVA_SOURCES && for ARCHIVE in *.tar.gz; do tar -xzf $ARCHIVE; done ) + + + Copy the plug-ins to the hypervisor: $ PLUGINPATH=$(find $NOVA_SOURCES -path '*/xapi.d/plugins' -type d -print) -$ tar -czf - -C "$PLUGINPATH" ./ | ssh root@xenserver tar -xozf - -C /etc/xapi.d/plugins/ - Remove the temporary files/directories: +$ tar -czf - -C "$PLUGINPATH" ./ | ssh root@xenserver tar -xozf - -C /etc/xapi.d/plugins/ + + + Remove the temporary files/directories: $ rm "$NOVA_ZIPBALL" -$ rm -rf "$NOVA_SOURCES" +$ rm -rf "$NOVA_SOURCES" + -
-
- - Packaged Installation - - Follow these steps to produce a supplemental - pack from the nova sources, and package it as a XenServer +
+
+ Package a XenServer supplemental pack + Follow these steps to produce a supplemental pack from + the nova sources, and package it as a XenServer supplemental pack. - To package a XenServer supplemental pack - - Create RPM packages. Given you have - the nova sources (use one of the methods mentioned - at Manual Installation): - $ cd nova/plugins/xenserver/xenapi/contrib -$ ./build-rpm.shThese - commands leave an .rpm file in - the rpmbuild/RPMS/noarch/ + + + Create RPM packages. Given you have the nova + sources. Use one of the methods in : + $ cd nova/plugins/xenserver/xenapi/contrib +$ ./build-rpm.sh + These commands leave an + .rpm file in the + rpmbuild/RPMS/noarch/ directory. - - - Pack the RPM packages to a - Supplemental Pack, using the XenServer DDK (the - following command should be issued on the - XenServer DDK virtual appliance, after the - produced rpm file has been copied over): - $ /usr/bin/build-supplemental-pack.sh \ + + + Pack the RPM packages to a Supplemental Pack, + using the XenServer DDK (the following command + should be issued on the XenServer DDK virtual + appliance, after the produced rpm file has been + copied over): + $ /usr/bin/build-supplemental-pack.sh \ > --output=output_directory \ > --vendor-code=novaplugin \ > --vendor-name=openstack \ > --label=novaplugins \ > --text="nova plugins" \ > --version=0 \ -> full_path_to_rpmfileThis - command produces an .iso file - in the output directory specified. Copy that file - to the hypervisor. - - - Install the Supplemental Pack. Log - in to the hypervisor, and issue: - # xe-install-supplemental-pack path_to_isofile - - -
+> full_path_to_rpmfile + This command produces an + .iso file in the output + directory specified. Copy that file to the + hypervisor. +
+ + Install the Supplemental Pack. Log in to the + hypervisor, and issue: + # xe-install-supplemental-pack path_to_isofile + +
+
diff --git a/doc/common/section_xapi-resize-setup.xml b/doc/common/section_xapi-resize-setup.xml index a74d00c8df..c71bcc913c 100644 --- a/doc/common/section_xapi-resize-setup.xml +++ b/doc/common/section_xapi-resize-setup.xml @@ -3,17 +3,16 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="xapi-resize-setup"> - -Dom0 Modifications for Resize/Migration Support - To get resize to work with XenServer (and XCP) you need to: + + Modify Dom0 for resize/migration support + To resize servers with XenServer and XCP, you must: - Establish a root trust between all hypervisor nodes of your - deployment: - - You can do so by generating an ssh key-pair (with - ssh-keygen) and then ensuring - that each of your dom0's + Establish a root trust between all hypervisor nodes + of your deployment: + To do so, generate an ssh key-pair with the + ssh-keygen command. Ensure that + each of your dom0's authorized_keys file (located in /root/.ssh/authorized_keys) contains the public key fingerprint (located in @@ -21,22 +20,20 @@ Provide an /images mount point - to your hypervisor's dom0: - - Dom0 space is a premium so creating a directory in - dom0 is kind of dangerous, and almost surely bound to - fail especially when resizing big servers. The least + to the dom0 for your hypervisor: + Dom0 space is at a premium so creating a directory + in dom0 is potentially dangerous and likely to fail + especially when you resize large servers. The least you can do is to symlink /images - to your local storage SR. The instructions below work - for an English-based installation of XenServer (and - XCP) and in the case of ext3 based SR (with which the - resize functionality is known to work + to your local storage SR. The following instructions + work for an English-based installation of XenServer + (and XCP) and in the case of ext3-based SR (with which + the resize functionality is known to work correctly). -# LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal) + # LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal) # IMG_DIR="/var/run/sr-mount/$LOCAL_SR/images" # mkdir -p "$IMG_DIR" -# ln -s "$IMG_DIR" /images - +# ln -s "$IMG_DIR" /images diff --git a/doc/common/section_xen-install.xml b/doc/common/section_xen-install.xml index 7d6474acc4..bbd193984c 100644 --- a/doc/common/section_xen-install.xml +++ b/doc/common/section_xen-install.xml @@ -3,140 +3,138 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="xenapi-install"> - Installing XenServer and + <title xml:id="xenapi-install.title">Install XenServer and XCP Before you can run OpenStack with XCP or XenServer, you must install the software on an appropriate server. + >an appropriate server. Xen is a type 1 hypervisor: When your server starts, Xen is the first software that runs. Consequently, you must install XenServer or XCP before you install the operating - system on which you want to run OpenStack code. The - OpenStack services then run in a virtual machine that you - install on top of XenServer. + system where you want to run OpenStack code. The OpenStack + services then run in a virtual machine that you install on + top of XenServer. - Before you can install your system you must decide if you - want to install Citrix XenServer (either the free edition, or - one of the paid editions) or Xen Cloud Platform from Xen.org. - You can download the software from the following locations: - - http://www.citrix.com/XenServer/download - - - - http://www.xen.org/download/xcp/index.html - - - When installing many servers, you may find it - easier to perform Before you can install your system, decide whether to + install a free or paid edition of Citrix XenServer or Xen + Cloud Platform from Xen.org. Download the software from these + locations: + + + http://www.citrix.com/XenServer/download + + + http://www.xen.org/download/xcp/index.html + + + When you install many servers, you might find it easier to + perform PXE boot installations of XenServer or XCP. You - can also package up any post install changes you wish to make - to your XenServer by PXE boot installations of XenServer or XCP. You + can also package any post-installation changes that you want + to make to your XenServer by creating your own XenServer supplemental + >creating your own XenServer supplemental pack. - It is also possible to get XCP by installing the xcp-xenapi package on Debian based - distributions. However, this is not as mature or feature - complete as above distributions. This will modify your boot - loader to first boot Xen, then boot your existing OS on top of - Xen as Dom0. It is in Dom0 that the xapi daemon will run. You - can find more details on the Xen.org wiki: - http://wiki.xen.org/wiki/Project_Kronos - - - Ensure you are using the EXT type of storage - repository (SR). Features that require access to VHD - files (such as copy on write, snapshot and migration) - do not work when using the LVM SR. Storage repository - (SR) is a XenAPI specific term relating to the - physical storage on which virtual disks are - stored. - On the XenServer/XCP installation screen, this is - selected by choosing "XenDesktop Optimized" option. In - case you are using an answer file, make sure you use - srtype="ext" within the - installation tag of the answer - file. - + You can also install the xcp-xenapi + package on Debian-based distributions to get XCP. However, + this is not as mature or feature complete as above + distributions. This modifies your boot loader to first boot + Xen and boot your existing OS on top of Xen as Dom0. The xapi + daemon runs in Dom0. Find more details at http://wiki.xen.org/wiki/Project_Kronos. + + Make sure you use the EXT type of storage repository + (SR). Features that require access to VHD files (such as + copy on write, snapshot and migration) do not work when + you use the LVM SR. Storage repository (SR) is a + XenAPI-specific term relating to the physical storage + where virtual disks are stored. + On the XenServer/XCP installation screen, choose the + XenDesktop Optimized option. If + you use an answer file, make sure you use + srtype="ext" in the + tag of the answer + file. +
- Post install steps - You are now ready to install OpenStack onto your - XenServer system. This process involves the following - steps: - - For resize and migrate functionality, please - perform the changes described in the Configuring Resize section of the - OpenStack Configuration Reference. - - - - Install the VIF isolation rules to help - prevent mac and ip address spoofing. - - - Install the XenAPI plugins - see the next - section. - - - To support AMI type images, you must set up - /boot/guest - symlink/directory in Dom0. For detailed - instructions, see next section. - - - To support resize/migration, set up an ssh - trust relation between your XenServer hosts, - and ensure /images is - properly set up. See next section for more - details. - - - Create a Paravirtualized virtual machine - that can run the OpenStack compute - code. - - - Install and configure the nova-compute - in the above virtual machine. - - For further information on these steps - look at how DevStack performs the last three steps when - doing developer deployments. For more information on - DevStack, take a look at the Post-installation steps + Complete these steps to install OpenStack in your + XenServer system: + + + For resize and migrate functionality, complete + the changes described in the Configure + resize section in the OpenStack Configuration + Reference. + + + Install the VIF isolation rules to help prevent + mac and IP address spoofing. + + + Install the XenAPI plug-ins. See the following + section. + + + To support AMI type images, you must set up + /boot/guest + symlink/directory in Dom0. For detailed + instructions, see next section. + + + To support resize/migration, set up an ssh trust + relation between your XenServer hosts, and ensure + /images is properly set up. + See next section for more details. + + + Create a Paravirtualized virtual machine that + can run the OpenStack compute code. + + + Install and configure the nova-compute in + the above virtual machine. + + + For more information, see how DevStack performs the last + three steps for developer deployments. For more + information about DevStack, see Getting Started + With XenServer and Devstack ( DevStack and XenServer Readme. More - information on the first step can be found in the https://github.com/openstack-dev/devstack/blob/master/tools/xen/README.md). + Find more information about the first step, see + Multi Tenancy Networking Protections in + XenServer ( XenServer mutli-tenancy protection doc. More - information on how to install the XenAPI plugins can be - found in the https://github.com/openstack/nova/blob/master/plugins/xenserver/doc/networking.rst). + For information about how to install the XenAPI plug-ins, + see XenAPI README ( XenAPI plugins Readme. + >https://github.com/openstack/nova/blob/master/plugins/xenserver/xenapi/README).
- Xen Boot from ISO - XenServer, through the XenAPI integration with OpenStack - provides a feature to boot instances from an ISO file. To - activate the "Boot From ISO" feature, you must configure - the SR elements on XenServer host that way. + Xen boot from ISO + XenServer, through the XenAPI integration with + OpenStack, provides a feature to boot instances from an + ISO file. To activate the Boot From ISO feature, you must + configure the SR elements on XenServer host, as + follows: - To Xen boot from ISO Create an ISO-typed SR, such as an NFS ISO library, for instance. For this, using XenCenter @@ -145,32 +143,32 @@ in read-write mode. - On the compute host, find the uuid of this ISO - SR and write it down. - # xe host-list + On the compute host, find and record the uuid of + this ISO SR: + # xe host-list - Locate the uuid of the NFS ISO library: - # xe sr-list content-type=iso + Locate the uuid of the NFS ISO library: + # xe sr-list content-type=iso Set the uuid and configuration. Even if an NFS - mount point isn't local storage, you must specify - "local-storage-iso." + mount point is not local, you must specify + local-storage-iso. # xe sr-param-set uuid=[iso sr uuid] other-config:i18n-key=local-storage-iso - Make sure the host-uuid from "xe pbd-list" - equals the uuid of the host you found - earlier: + Make sure the host-uuid from xe + pbd-list equals the uuid of the host + you found previously: # xe sr-uuid=[iso sr uuid] - You can now add images via the OpenStack Image - Registry, with disk-format=iso, - and boot them in OpenStack Compute. - # glance image-create --name=fedora_iso --disk-format=iso --container-format=bare < Fedora-16-x86_64-netinst.iso - + You can now add images through the OpenStack + Image Service with + disk-format=iso, and boot + them in OpenStack Compute: + # glance image-create --name=fedora_iso --disk-format=iso --container-format=bare < Fedora-16-x86_64-netinst.iso