From 421a8d732ef91eff1dc54927113b1aa7ad037311 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Fri, 2 Aug 2013 18:00:26 +0200 Subject: [PATCH] removed unnecessary spaces ' ' --> '' '. ' --> '.' ' ' --> '' '. ' --> '.' ' ' --> '' '. ' --> '.' Change-Id: Ib30729297f0e05cb85d79ba6780a9d0ff1b8daf7 --- .../src/basic-install_architecture.xml | 10 +- .../src/basic-install_compute-common.xml | 2 +- .../src/basic-install_controller-common.xml | 2 +- .../src/basic-install_controller-keystone.xml | 2 +- .../basic-install/src/basic-install_intro.xml | 4 +- .../src/basic-install_network-common.xml | 2 +- .../src/basic-install_operate.xml | 6 +- .../basic-install/src/bk-basic-install.xml | 2 +- doc/src/docbkx/cli-guide/src/bk-cli-guide.xml | 6 +- .../cli-guide/src/ch_client_overview.xml | 2 +- .../cli-guide/src/cli_troubleshooting.xml | 4 +- .../docbkx/cli-guide/src/glance_cli_howto.xml | 2 +- .../cli-guide/src/neutron_cli_howto.xml | 10 +- doc/src/docbkx/common/about-dashboard.xml | 2 +- doc/src/docbkx/common/adding-images.xml | 88 ++++---- .../docbkx/common/certificates-for-pki.xml | 48 ++--- doc/src/docbkx/common/ch_identity_mgmt.xml | 186 ++++++++-------- doc/src/docbkx/common/ch_image_mgmt.xml | 16 +- doc/src/docbkx/common/ch_installdashboard.xml | 8 +- doc/src/docbkx/common/ch_resources.xml | 2 +- doc/src/docbkx/common/cli_help.xml | 6 +- doc/src/docbkx/common/cli_install.xml | 16 +- doc/src/docbkx/common/cli_overview.xml | 8 +- doc/src/docbkx/common/colocating-services.xml | 4 +- doc/src/docbkx/common/compute-options.xml | 2 +- .../docbkx/common/compute-spice-console.xml | 2 +- doc/src/docbkx/common/config-drive.xml | 4 +- doc/src/docbkx/common/dashboard-configure.xml | 4 +- doc/src/docbkx/common/dashboard-install.xml | 26 +-- .../docbkx/common/dashboard-system-reqs.xml | 10 +- .../docbkx/common/dashboard_customizing.xml | 8 +- doc/src/docbkx/common/dashboard_sessions.xml | 8 +- doc/src/docbkx/common/fibrechannel.xml | 2 +- doc/src/docbkx/common/getstart.xml | 38 ++-- .../docbkx/common/glossary/glossary-terms.xml | 42 ++-- .../common/glossary/openstack-glossary.xml | 2 +- doc/src/docbkx/common/host_aggregates.xml | 2 +- doc/src/docbkx/common/hyper-v.xml | 2 +- doc/src/docbkx/common/identity-configure.xml | 2 +- doc/src/docbkx/common/image-formats.xml | 2 +- doc/src/docbkx/common/introduction-to-xen.xml | 4 +- doc/src/docbkx/common/keystone-concepts.xml | 46 ++-- doc/src/docbkx/common/keystone-ssl-config.xml | 2 +- doc/src/docbkx/common/kvm.xml | 8 +- doc/src/docbkx/common/lxc.xml | 2 +- doc/src/docbkx/common/moosefs.xml | 44 ++-- .../common/nova_cli_access_and_security.xml | 4 +- doc/src/docbkx/common/nova_cli_boot.xml | 28 +-- doc/src/docbkx/common/nova_cli_evacuate.xml | 8 +- .../docbkx/common/nova_cli_fileinjection.xml | 6 +- doc/src/docbkx/common/nova_cli_images.xml | 4 +- doc/src/docbkx/common/nova_cli_secgroups.xml | 32 +-- doc/src/docbkx/common/nova_cli_sshkeys.xml | 4 +- doc/src/docbkx/common/nova_cli_startstop.xml | 6 +- doc/src/docbkx/common/nova_cli_terminate.xml | 4 +- .../common/nova_cli_usage_statistics.xml | 6 +- doc/src/docbkx/common/nova_cli_userdata.xml | 2 +- doc/src/docbkx/common/qemu.xml | 4 +- .../docbkx/common/section_cli_baremetal.xml | 10 +- .../common/section_cli_manage_images.xml | 36 ++-- doc/src/docbkx/common/section_cli_openrc.xml | 10 +- doc/src/docbkx/common/section_cli_reboot.xml | 2 +- .../common/section_dashboard_access.xml | 12 +- .../common/section_dashboard_install.xml | 4 +- ..._dashboard_launch_instances_from_image.xml | 26 +-- .../common/section_dashboard_overview.xml | 18 +- doc/src/docbkx/common/storage-concepts.xml | 4 +- doc/src/docbkx/common/support-compute.xml | 8 +- .../docbkx/common/support-object-storage.xml | 6 +- doc/src/docbkx/common/support.xml | 22 +- doc/src/docbkx/common/tables/glance-api.xml | 4 +- .../common/tables/ldap-keystone-conf.xml | 102 ++++----- .../docbkx/common/trusted-compute-pools.xml | 6 +- doc/src/docbkx/common/user-data.xml | 2 +- doc/src/docbkx/common/using-vnc-console.xml | 14 +- .../docbkx/common/xapi-install-plugins.xml | 4 +- doc/src/docbkx/common/xen-install.xml | 2 +- .../docbkx-example/src/docbkx/example.xml | 8 +- .../openstack-admin-user/src/ch_overview.xml | 2 +- .../src/section_cli_floating_ips.xml | 14 +- .../src/section_cli_manage_images.xml | 36 ++-- .../src/section_cli_nova.xml | 24 +-- .../src/section_cli_set_quotas.xml | 24 +-- .../src/section_cli_swift.xml | 24 +-- .../src/section_dashboard_set_quotas.xml | 34 +-- .../add-volume-node.xml | 8 +- .../backup-block-storage-disks.xml | 28 +-- .../bk-block-storage-adminguide.xml | 2 +- .../block-storage-manage-volumes.xml | 6 +- .../block-storage-overview.xml | 4 +- .../drivers/ceph-rbd-volume-driver.xml | 4 +- .../drivers/glusterfs-driver.xml | 6 +- .../drivers/hds-volume-driver.xml | 16 +- .../drivers/hp-lefthand-driver.xml | 2 +- .../drivers/ibm-storwize-svc-driver.xml | 30 +-- .../drivers/netapp-volume-driver.xml | 16 +- .../drivers/nexenta-volume-driver.xml | 2 +- .../drivers/nfs-volume-driver.xml | 2 +- .../drivers/xen-sm-driver.xml | 16 +- .../drivers/xenapi-nfs.xml | 4 +- .../troubleshoot-cinder.xml | 2 +- .../openstack-compute-admin/aboutcompute.xml | 4 +- .../bk-compute-adminguide.xml | 2 +- .../ch_instance_mgmt.xml | 10 +- .../openstack-compute-admin/computeadmin.xml | 102 ++++----- .../computeautomation.xml | 2 +- .../computeconfigure.xml | 16 +- .../computeinstall.xml | 4 +- .../computenetworking.xml | 134 ++++++------ .../computescheduler.xml | 38 ++-- .../computetutorials.xml | 14 +- .../moosefsbackend.xml | 42 ++-- .../openstack-compute-admin/preface.xml | 2 +- .../openstack-compute-admin/rootwrap.xml | 12 +- .../section_dashboard.xml | 8 +- .../ch_blockstorageconfigure.xml | 2 +- .../openstack-config/ch_computecells.xml | 4 +- .../openstack-config/ch_computeconfigure.xml | 14 +- .../ch_computehypervisors.xml | 10 +- .../openstack-config/ch_computescheduler.xml | 52 ++--- .../openstack-config/ch_config-overview.xml | 2 +- .../openstack-config/ch_identityconfigure.xml | 2 +- .../openstack-config/ch_imageservice.xml | 2 +- .../compute-configure-console.xml | 2 +- .../compute-configure-migrations.xml | 14 +- .../compute-configure-quotas.xml | 4 +- .../compute-configure-service-groups.xml | 4 +- .../compute-configure-vnc.xml | 58 ++--- .../docbkx/openstack-image/bk-imageguide.xml | 2 +- .../docbkx/openstack-image/centos-example.xml | 6 +- .../ch_creating_images_automatically.xml | 6 +- .../ch_creating_images_manually.xml | 2 +- .../openstack-image/ch_introduction.xml | 6 +- .../openstack-image/ch_obtaining_images.xml | 8 +- .../openstack-image/ch_openstack_images.xml | 6 +- .../docbkx/openstack-image/ubuntu-example.xml | 6 +- .../openstack-image/windows-example.xml | 2 +- .../ap_configuration_files.xml | 36 ++-- .../bk_openstackinstallguide.xml | 4 +- .../openstack-install/ch_assumptions.xml | 2 +- .../ch_installing-openstack-overview.xml | 8 +- .../ch_instances-running.xml | 2 +- .../openstack-install/ch_terminology.xml | 10 +- .../openstack-install/cinder-install.xml | 2 +- .../compute-config-guest-network.xml | 2 +- .../compute-database-mysql.xml | 6 +- .../compute-database-postgresql.xml | 2 +- .../compute-minimum-configuration.xml | 10 +- .../compute-scripted-ubuntu-install.xml | 6 +- .../compute-sys-requirements.xml | 8 +- .../configuring-multiple-compute-nodes.xml | 6 +- .../example-compute-install-arch.xml | 4 +- .../example-object-storage-install-arch.xml | 2 +- .../identity-config-keystone.xml | 4 +- .../identity-install-keystone.xml | 28 +-- .../identity-verify-install.xml | 12 +- .../install-config-glance.xml | 16 +- .../install-config-proxy-node.xml | 2 +- .../install-config-storage-nodes.xml | 2 +- .../openstack-install/install-nova-volume.xml | 2 +- .../installing-additional-compute-nodes.xml | 4 +- .../openstack-install/installing-mysql.xml | 2 +- ...object-storage-howto-install-multinode.xml | 20 +- .../object-storage-install-ubuntu.xml | 2 +- .../object-storage-network-planning.xml | 4 +- .../object-storage-sys-requirements.xml | 4 +- .../app_core.xml | 200 +++++++++--------- .../app_demo_flat.xml | 2 +- .../app_demo_multi_dhcp_agents.xml | 8 +- .../app_demo_single_router.xml | 14 +- .../bk-networking-admin-guide.xml | 2 +- .../ch_adv_config.xml | 54 ++--- .../ch_adv_features.xml | 32 +-- .../ch_adv_operational_features.xml | 14 +- .../ch_auth.xml | 2 +- .../ch_config.xml | 22 +- .../ch_install.xml | 42 ++-- .../ch_limitations.xml | 10 +- .../ch_overview.xml | 20 +- .../ch_preface.xml | 8 +- .../ch_under_the_hood.xml | 16 +- .../ch_using.xml | 30 +-- .../aboutobjectstorage.xml | 10 +- .../bk-objectstorage-adminguide.xml | 2 +- .../objectstorage-config-reference.xml | 16 +- .../objectstorageadmin.xml | 82 +++---- .../objectstoragetutorials.xml | 12 +- .../ch002_why-and-how-we-wrote-this-book.xml | 10 +- .../ch005_security-domains.xml | 8 +- .../ch008_system-roles-types.xml | 4 +- .../ch012_configuration-management.xml | 22 +- .../ch013_node-bootstrapping.xml | 18 +- ...est-practices-for-operator-mode-access.xml | 50 ++--- .../ch015_case-studies-management.xml | 8 +- ...-models-confidence-and-confidentiality.xml | 34 +-- .../ch020_ssl-everywhere.xml | 10 +- .../ch024_authentication.xml | 6 +- .../openstack-security/ch026_compute.xml | 20 +- .../ch032_networking-best-practices.xml | 16 +- .../ch033_securing-neutron-services.xml | 12 +- ...enant-secure-networking-best-practices.xml | 4 +- .../docbkx/openstack-security/ch037_risks.xml | 6 +- .../ch038_transport-security.xml | 16 +- .../ch041_database-backend-considerations.xml | 10 +- .../ch042_database-overview.xml | 4 +- .../ch043_database-transport-security.xml | 10 +- .../ch046_data-residency.xml | 52 ++--- .../ch047_data-encryption.xml | 6 +- .../ch048_key-management.xml | 4 +- .../ch049_case-studies-tenant-data.xml | 20 +- .../openstack-security/ch051_vss-intro.xml | 70 +++--- .../openstack-security/ch052_devices.xml | 8 +- .../ch055_security-services-for-instances.xml | 24 +-- .../ch058_forensicsincident-response.xml | 12 +- .../ch061_compliance-overview.xml | 20 +- .../ch062_audit-guidance.xml | 6 +- ...4_certifications-compliance-statements.xml | 12 +- .../openstack-security/ch065_privacy.xml | 2 +- .../openstack-training/bk000-preface.xml | 2 +- .../bk001-ch003-associate-general.xml | 2 +- .../bk002-ch003-operations-general.xml | 10 +- .../bk002-ch008-operations-assessment.xml | 2 +- .../openstack-training/st-training-guides.xml | 2 +- .../docbkx/openstack-user/src/ch_overview.xml | 2 +- .../src/section_cli_configure_instances.xml | 20 +- .../src/section_cli_floating_ips.xml | 14 +- .../openstack-user/src/section_cli_heat.xml | 4 +- .../src/section_cli_launch_instances.xml | 26 +-- .../src/section_cli_manage_volumes.xml | 2 +- .../openstack-user/src/section_cli_nova.xml | 24 +-- .../openstack-user/src/section_cli_swift.xml | 24 +-- .../section_dashboard_access_and_security.xml | 42 ++-- .../section_dashboard_launch_instances.xml | 4 +- ..._dashboard_launch_instances_from_image.xml | 28 +-- ...dashboard_launch_instances_from_volume.xml | 18 +- .../section_dashboard_manage_instances.xml | 12 +- .../src/section_dashboard_manage_volumes.xml | 52 ++--- 237 files changed, 1811 insertions(+), 1811 deletions(-) diff --git a/doc/src/docbkx/basic-install/src/basic-install_architecture.xml b/doc/src/docbkx/basic-install/src/basic-install_architecture.xml index 3d00a5072b..d66f2fd14a 100644 --- a/doc/src/docbkx/basic-install/src/basic-install_architecture.xml +++ b/doc/src/docbkx/basic-install/src/basic-install_architecture.xml @@ -25,14 +25,14 @@ Compute service, such as the API server, the scheduler, conductor, console authenticator, and VNC service. Finally, it hosts the API endpoint for the OpenStack - Network service. + Network service. The Network Controller. Provides the bulk of the OpenStack Network services such as DHCP, layer 2 switching, layer 3 routing, floating IPs (which this - guide does not configure), and metadata connectivity. + guide does not configure), and metadata connectivity. Compute Node. Runs @@ -41,7 +41,7 @@ plugin agent). This server also manages an OpenStack-compatible hypervisor such as KVM or Xen. This server hosts the actual virtual machines - (instances). + (instances). OpenStack provides great flexibility with regard to how its @@ -49,7 +49,7 @@ that run on the Network Controller can easily be installed on the Cloud Controller. As another example, the OpenStack Image service can be installed on its own server (or many servers to - provide a more highly available service). + provide a more highly available service). With regard to cloud networking, a standard OpenStack Network setup can have up to four distinct physical data @@ -88,7 +88,7 @@ possible to create a subnet for the external network that uses IP allocation ranges to use only less than the full range of IP addresses in an IP - block. + block. diff --git a/doc/src/docbkx/basic-install/src/basic-install_compute-common.xml b/doc/src/docbkx/basic-install/src/basic-install_compute-common.xml index fb4317039d..24019a5eb8 100644 --- a/doc/src/docbkx/basic-install/src/basic-install_compute-common.xml +++ b/doc/src/docbkx/basic-install/src/basic-install_compute-common.xml @@ -115,7 +115,7 @@ ONBOOT=yes Install NTP: - + # apt-get install ntp # yum install ntp diff --git a/doc/src/docbkx/basic-install/src/basic-install_controller-common.xml b/doc/src/docbkx/basic-install/src/basic-install_controller-common.xml index 5adb84df1f..d87dc55c4c 100644 --- a/doc/src/docbkx/basic-install/src/basic-install_controller-common.xml +++ b/doc/src/docbkx/basic-install/src/basic-install_controller-common.xml @@ -168,7 +168,7 @@ ONBOOT=yes Install NTP. NTP ensures that the server has the correct time. This is important because if an OpenStack server's time is not correct, it is removed from the rest of the cloud. - + # apt-get install ntp # yum install ntp diff --git a/doc/src/docbkx/basic-install/src/basic-install_controller-keystone.xml b/doc/src/docbkx/basic-install/src/basic-install_controller-keystone.xml index 4e4f1c7064..715a976bdc 100644 --- a/doc/src/docbkx/basic-install/src/basic-install_controller-keystone.xml +++ b/doc/src/docbkx/basic-install/src/basic-install_controller-keystone.xml @@ -67,7 +67,7 @@ export OS_SERVICE_TOKEN=password Source the credentials into your environment: source ~/openrc - Configure the Bash shell to load these credentials upon each login: + Configure the Bash shell to load these credentials upon each login: echo "source ~/openrc" >> ~/.bashrc diff --git a/doc/src/docbkx/basic-install/src/basic-install_intro.xml b/doc/src/docbkx/basic-install/src/basic-install_intro.xml index 2190154bff..92d4d9ebaf 100644 --- a/doc/src/docbkx/basic-install/src/basic-install_intro.xml +++ b/doc/src/docbkx/basic-install/src/basic-install_intro.xml @@ -16,7 +16,7 @@ Ringtail) and the most recent LTS (Long Term Support) version which is 12.04 (Precise Pangolin), via the Ubuntu Cloud Archive. At this time, there are not - packages available for 12.10. + packages available for 12.10. We are going to install a three-node setup with one controller, one network and one compute node. @@ -26,7 +26,7 @@ options. These options specify the default setting. You only need to uncomment these lines if you are changing the setting to a non-default value. Additionally, this guide only shows options that are being - modified from their default value. + modified from their default value. Finally, please be aware that the use of password as a password throughout this guide is for simplicity and testing purposes. Please ensure you use proper passwords when diff --git a/doc/src/docbkx/basic-install/src/basic-install_network-common.xml b/doc/src/docbkx/basic-install/src/basic-install_network-common.xml index a2331aca73..fbfd1ff810 100644 --- a/doc/src/docbkx/basic-install/src/basic-install_network-common.xml +++ b/doc/src/docbkx/basic-install/src/basic-install_network-common.xml @@ -157,7 +157,7 @@ ONBOOT=yes Install NTP: - + # apt-get install ntp # yum install ntp diff --git a/doc/src/docbkx/basic-install/src/basic-install_operate.xml b/doc/src/docbkx/basic-install/src/basic-install_operate.xml index ac9d041cf3..f4ea1b54c7 100644 --- a/doc/src/docbkx/basic-install/src/basic-install_operate.xml +++ b/doc/src/docbkx/basic-install/src/basic-install_operate.xml @@ -14,7 +14,7 @@ Create a personal keypair, default_key. If you see an Error: Unable to create keypair: Key pair 'default_key' already exists, it may have been created using the command-line method below. - Modify the permissions on the keypair file, default_key. + Modify the permissions on the keypair file, default_key. # chmod 400 default_key Go to "Instances" and click "Launch Instance" for spawning a new @@ -34,7 +34,7 @@ # nova keypair-add --pub_key ~/.ssh/id_rsa.pub default_key If you see an Error: Unable to create keypair: Key pair 'default_key' already exists, it may have been created using the Dashboard method above. - Modify the permissions on the keypair file, default_key. + Modify the permissions on the keypair file, default_key. # chmod 400 default_key @@ -80,7 +80,7 @@ beginning with qrouter and the other beginning with - qdhcp. + qdhcp. Run SSH inside the diff --git a/doc/src/docbkx/basic-install/src/bk-basic-install.xml b/doc/src/docbkx/basic-install/src/bk-basic-install.xml index a34609eedf..1d936666bd 100644 --- a/doc/src/docbkx/basic-install/src/bk-basic-install.xml +++ b/doc/src/docbkx/basic-install/src/bk-basic-install.xml @@ -51,7 +51,7 @@ This document is for administrators who install -OpenStack on various Linux distributions. +OpenStack on various Linux distributions. diff --git a/doc/src/docbkx/cli-guide/src/bk-cli-guide.xml b/doc/src/docbkx/cli-guide/src/bk-cli-guide.xml index 08a13fc406..06350a28cd 100644 --- a/doc/src/docbkx/cli-guide/src/bk-cli-guide.xml +++ b/doc/src/docbkx/cli-guide/src/bk-cli-guide.xml @@ -72,7 +72,7 @@ protocol, including methods, URIs, media types, and response codes. To request OpenStack services, you must first issue an authentication request to the - OpenStack Identity Service v2.0. + OpenStack Identity Service v2.0. @@ -116,11 +116,11 @@ Added front matter to and - reorganized the book. + reorganized the book. Moved authentication to the - installation chapter. + installation chapter. Added the cinder client. diff --git a/doc/src/docbkx/cli-guide/src/ch_client_overview.xml b/doc/src/docbkx/cli-guide/src/ch_client_overview.xml index 59caaa63d7..5aa4ee113a 100644 --- a/doc/src/docbkx/cli-guide/src/ch_client_overview.xml +++ b/doc/src/docbkx/cli-guide/src/ch_client_overview.xml @@ -34,7 +34,7 @@ format="SVG" scale="60"/> To manage your servers, images, volumes, isolated networks, and other cloud resources from the command line, install and - use the OpenStack clients. + use the OpenStack clients. diff --git a/doc/src/docbkx/cli-guide/src/cli_troubleshooting.xml b/doc/src/docbkx/cli-guide/src/cli_troubleshooting.xml index 4b87a18caf..c2ff194d86 100644 --- a/doc/src/docbkx/cli-guide/src/cli_troubleshooting.xml +++ b/doc/src/docbkx/cli-guide/src/cli_troubleshooting.xml @@ -5,7 +5,7 @@ xml:id="troubleshooting_cli"> Troubleshooting The CLI clients are considered to be software in - development. + development. If you cannot run commands successfully, make sure @@ -27,7 +27,7 @@ OS_TENANT_NAME=coolu If you change any environment variables, either log out and back in or source your bash - profile again. + profile again. To override some environment variable diff --git a/doc/src/docbkx/cli-guide/src/glance_cli_howto.xml b/doc/src/docbkx/cli-guide/src/glance_cli_howto.xml index ce38703033..9faba97804 100644 --- a/doc/src/docbkx/cli-guide/src/glance_cli_howto.xml +++ b/doc/src/docbkx/cli-guide/src/glance_cli_howto.xml @@ -7,7 +7,7 @@ xml:id="glance_client"> glance command-line client - Learn how to use the glance client to create and manage images. + Learn how to use the glance client to create and manage images. Positional arguments. Mandatory arguments that - must be specified in a specific order. + must be specified in a specific order. Unknown options. Complement the known - arguments. + arguments. To define an unknown option, use the format: --optionname [type=int|bool|dict...][list=true] [optionvalue]* You can specify multiple option values for an option name. When no option value is specified, the option defaults to bool - with a value of true. + with a value of true. The type is python built-in type, such as int, @@ -216,7 +216,7 @@ CSV Formatter: return. A sample of such URLs is: http://localhost:9696/v2.0/networks.json?fields=id&fields=name - Neutron client supports this feature by + Neutron client supports this feature by -F option in known options part and --fields in unknown options part. For example, neutron -F id net-list -- @@ -239,7 +239,7 @@ CSV Formatter: options in unknown option part. For example neutron net-list -- --name test1 test2. Only xx-list - commands support this feature. + commands support this feature. diff --git a/doc/src/docbkx/common/about-dashboard.xml b/doc/src/docbkx/common/about-dashboard.xml index 2e5222d89e..8fdd665f35 100644 --- a/doc/src/docbkx/common/about-dashboard.xml +++ b/doc/src/docbkx/common/about-dashboard.xml @@ -21,7 +21,7 @@ Verify your installation by going to the URL of the - Apache server you configured. + Apache server you configured. diff --git a/doc/src/docbkx/common/adding-images.xml b/doc/src/docbkx/common/adding-images.xml index 846d458890..48ece51f14 100644 --- a/doc/src/docbkx/common/adding-images.xml +++ b/doc/src/docbkx/common/adding-images.xml @@ -5,7 +5,7 @@ xml:id="adding-images"> Adding images with glance image-create To add a virtual machine image to glance, use the - glance image-create command. + glance image-create command. To modify image properties, use the glance image-update command. The image-create command requires that you @@ -30,7 +30,7 @@ If you set the following properties on an image, and the ImagePropertiesFilter scheduler filter is enabled, which is the default, the scheduler only considers compute hosts that satisfy - these properties. + these properties. architecture: The CPU architecture that must be supported by the hypervisor, e.g. @@ -332,7 +332,7 @@ vmware_image_version Currently unused. Set it to - 1. + 1. @@ -344,7 +344,7 @@ instance_uuid For snapshot images, the UUID of the server - used to create this image. + used to create this image. @@ -352,7 +352,7 @@ The ID of image stored in Glance that should be used as the kernel when booting an AMI-style - image. + image. @@ -360,14 +360,14 @@ The ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style - image. + image. os_version The operating system version as specified by - the distributor. + the distributor. @@ -387,8 +387,8 @@ arch - This is: Arch Linux - Do not use: + This is: Arch Linux + Do not use: archlinux, or org.archlinux @@ -397,9 +397,9 @@ centos - This is: Community Enterprise + This is: Community Enterprise Operating System - Do not use: + Do not use: org.centos CentOS @@ -408,8 +408,8 @@ debian - This is: Debian - Do not use: + This is: Debian + Do not use: Debian, or org.debian @@ -418,8 +418,8 @@ fedora - This is: Fedora - Do not use: + This is: Fedora + Do not use: Fedora, org.fedora, or org.fedoraproject @@ -429,8 +429,8 @@ freebsd - This is: FreeBSD - Do not use: + This is: FreeBSD + Do not use: org.freebsd, freeBSD, or FreeBSD @@ -440,8 +440,8 @@ gentoo - This is: Gentoo Linux - Do not use: + This is: Gentoo Linux + Do not use: Gentoo, or org.gentoo @@ -450,8 +450,8 @@ mandrake - This is: Mandrakelinux (MandrakeSoft) - Do not use: + This is: Mandrakelinux (MandrakeSoft) + Do not use: mandrakelinux, or MandrakeLinux @@ -460,8 +460,8 @@ mandriva - This is: Mandriva Linux - Do not use: + This is: Mandriva Linux + Do not use: mandrivalinux @@ -469,8 +469,8 @@ mes - This is: Mandriva Enterprise Server - Do not use: + This is: Mandriva Enterprise Server + Do not use: mandrivaent, or mandrivaES @@ -479,17 +479,17 @@ msdos - This is: Microsoft Disc Operating + This is: Microsoft Disc Operating System - Do not use: ms-dos + Do not use: ms-dos netbsd - This is: NetBSD - Do not use: + This is: NetBSD + Do not use: NetBSD, or org.netbsd @@ -498,8 +498,8 @@ netware - This is: Novell NetWare - Do not use: + This is: Novell NetWare + Do not use: novell, or NetWare @@ -508,8 +508,8 @@ openbsd - This is: OpenBSD - Do not use: + This is: OpenBSD + Do not use: OpenBSD, or org.openbsd @@ -518,7 +518,7 @@ opensolaris - Do not use: + Do not use: OpenSolaris,or org.opensolaris @@ -527,8 +527,8 @@ opensuse - This is: openSUSE - Do not use: suse, + This is: openSUSE + Do not use: suse, SuSE, or org.opensuse @@ -537,8 +537,8 @@ rhel - This is: Red Hat Enterprise Linux - Do not use: + This is: Red Hat Enterprise Linux + Do not use: redhat, RedHat, or com.redhat @@ -548,9 +548,9 @@ sled - This is: SUSE Linux Enterprise + This is: SUSE Linux Enterprise Desktop - Do not use: + Do not use: com.suse @@ -558,8 +558,8 @@ ubuntu - This is: Ubuntu - Do not use: + This is: Ubuntu + Do not use: Ubuntu, com.ubuntu, org.ubuntu, or @@ -570,8 +570,8 @@ windows - This is: Microsoft Windows - Do not use: + This is: Microsoft Windows + Do not use: com.microsoft.server, or windoze diff --git a/doc/src/docbkx/common/certificates-for-pki.xml b/doc/src/docbkx/common/certificates-for-pki.xml index 5cffdb56ce..e5cced18ea 100644 --- a/doc/src/docbkx/common/certificates-for-pki.xml +++ b/doc/src/docbkx/common/certificates-for-pki.xml @@ -4,7 +4,7 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="certificates-for-pki"> Certificates for PKI - PKI stands for Public Key Infrastructure. Tokens are + PKI stands for Public Key Infrastructure. Tokens are documents, cryptographically signed using the X509 standard. In order to work correctly token generation requires a public/private key pair. The public key must be signed in an @@ -15,9 +15,9 @@ generated. The files need to be in the locations specified by the top level Keystone configuration file as specified in the above section. Additionally, the private key should only be - readable by the system user that will run Keystone. + readable by the system user that will run Keystone. - The certificates can be world readable, but the private + The certificates can be world readable, but the private key cannot be. The private key should only be readable by the account that is going to sign tokens. When generating files with the keystone-mange pki_setup @@ -105,49 +105,49 @@ nouriuiCgFayIqCssK3SVdhOMINiuJtqv0sE-wBDFiEj-Prcudqlz-n+6q7VgV4mwMPszz39-rwp+P5l SrWY8lF3HrTcJT23sZIleg==
Signing Certificate Issued by External CA - You may use a signing certificate issued by an external + You may use a signing certificate issued by an external CA instead of generated by keystone-manage. However, certificate issued by external CA must satisfy the following conditions: - all certificate and key files must be in + all certificate and key files must be in Privacy Enhanced Mail (PEM) format - private key files must not be protected by a + private key files must not be protected by a password - When using signing certificate issued by an external + When using signing certificate issued by an external CA, you do not need to specify key_size, valid_days, and ca_password as they will be - ignored. - The basic workflow for using a signing certificate + ignored. + The basic workflow for using a signing certificate issued by an external CA involves: - Request Signing Certificate from External CA + Request Signing Certificate from External CA - Convert certificate and private key to PEM if + Convert certificate and private key to PEM if needed - Install External Signing Certificate + Install External Signing Certificate
Request Signing Certificate from External CA - One way to request a signing certificate from an + One way to request a signing certificate from an external CA is to first generate a PKCS #10 Certificate - Request Syntax (CRS) using OpenSSL CLI. - First create a certificate request configuration file + Request Syntax (CRS) using OpenSSL CLI. + First create a certificate request configuration file (e.g. cert_req.conf): [ req ] @@ -167,15 +167,15 @@ organizationalUnitName = Keystone commonName = Keystone Signing emailAddress = keystone@openstack.org - Then generate a CRS with OpenSSL CLI. Then generate a CRS with OpenSSL CLI. Do not encrypt the generated private key. Must use the -nodes option. - For example: + For example: openssl req -newkey rsa:1024 -keyout signing_key.pem -keyform PEM -out signing_cert_req.pem -outform PEM -config cert_req.conf -nodes - If everything is successfully, you should end up with + If everything is successfully, you should end up with signing_cert_req.pem and signing_key.pem. Send signing_cert_req.pem to your CA to @@ -186,7 +186,7 @@ openssl req -newkey rsa:1024 -keyout signing_key.pem -keyform PEM -out signing_c
Install External Signing Certificate - Assuming you have the following already: + Assuming you have the following already: @@ -204,7 +204,7 @@ openssl req -newkey rsa:1024 -keyout signing_key.pem -keyform PEM -out signing_c certificate chain in PEM format - Copy the above to your certificate directory. For + Copy the above to your certificate directory. For example: mkdir -p /etc/keystone/ssl/certs @@ -214,13 +214,13 @@ cp cacert.pem /etc/keystone/ssl/certs/ chmod -R 700 /etc/keystone/ssl/certs - Make sure the certificate directory is only - accessible by root. + Make sure the certificate directory is only + accessible by root. - If your certificate directory path is different from + If your certificate directory path is different from the default /etc/keystone/ssl/certs, make sure it is reflected in the [signing] section of the - configuration file. + configuration file.
diff --git a/doc/src/docbkx/common/ch_identity_mgmt.xml b/doc/src/docbkx/common/ch_identity_mgmt.xml index eb0a1816b9..4ec3ddbad3 100644 --- a/doc/src/docbkx/common/ch_identity_mgmt.xml +++ b/doc/src/docbkx/common/ch_identity_mgmt.xml @@ -59,13 +59,13 @@ pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body j
Logging - Logging is configured externally to the rest of Identity, + Logging is configured externally to the rest of Identity, the file specifying the logging configuration is in the [DEFAULT] section of the keystone.conf file under log_config. If you wish to route all your logging through syslog, set use_syslog=true - option in the [DEFAULT] section. + option in the [DEFAULT] section. A sample logging file is available with the project in the directory etc/logging.conf.sample. Like other @@ -205,10 +205,10 @@ keystone-manage db_sync
Adding Users, Tenants, and Roles with python-keystoneclient - Only users with admin credentials can administer users, tenants and roles. You can configure the python-keystoneclient with admin credentials through either the authentication token, or the username and password method. + Only users with admin credentials can administer users, tenants and roles. You can configure the python-keystoneclient with admin credentials through either the authentication token, or the username and password method.
Token Auth Method - To use keystone client using token auth, set the following flags: + To use keystone client using token auth, set the following flags: @@ -220,7 +220,7 @@ keystone-manage db_sync --token SERVICE_TOKEN. The administrator - service token. + service token.
@@ -229,7 +229,7 @@ keystone-manage db_sync - --username OS_USERNAME. The administrator username. + --username OS_USERNAME. The administrator username. @@ -238,7 +238,7 @@ keystone-manage db_sync - --tenant_name OS_TENANT_NAME. The tenant name. + --tenant_name OS_TENANT_NAME. The tenant name. @@ -261,7 +261,7 @@ keystone-manage db_sync --os-identity-api-version. - Specifies the Identity Service API version. + Specifies the Identity Service API version. For example, the following parameters indicate the use of API v3: --os-url "http://15.253.57.115:35357/v3" --os-identity-api-version 3 @@ -305,19 +305,19 @@ keystone --username=admin --password=secrete --tenant_name=admin tenant-create - tenant owns virtual machines. In swift, a tenant owns containers. Users can be associated with more than one tenant. Each tenant and user pairing can have a role associated with - it. + it.
<literal>tenant-create</literal> - keyword arguments + keyword arguments - name + name - description (optional, defaults to None) + description (optional, defaults to None) - enabled (optional, defaults to True) + enabled (optional, defaults to True) The following command creates a tenant named @@ -326,39 +326,39 @@ keystone --username=admin --password=secrete --tenant_name=admin tenant-create -
<literal>tenant-delete</literal> - arguments + arguments - tenant_id + tenant_id - example: + example: keystone tenant-delete f2b7b39c860840dfa47d9ee4adffa0b3
<literal>tenant-enable</literal> - arguments + arguments - tenant_id + tenant_id - example: + example: keystone tenant-enable f2b7b39c860840dfa47d9ee4adffa0b3
<literal>tenant-disable</literal> - arguments + arguments - tenant_id + tenant_id - example: + example: keystone tenant-disable f2b7b39c860840dfa47d9ee4adffa0b3 @@ -368,25 +368,25 @@ keystone tenant-disable f2b7b39c860840dfa47d9ee4adffa0b3 Users
<literal>user-create</literal> - keyword arguments: + keyword arguments: - name + name - pass + pass - email + email - default_tenant (optional, defaults to None) + default_tenant (optional, defaults to None) - enabled (optional, defaults to True) + enabled (optional, defaults to True) - example: + example: keystone user-create --name=admin \ @@ -396,88 +396,88 @@ keystone user-create
<literal>user-delete</literal> - keyword arguments: + keyword arguments: - user + user - example: + example: keystone user-delete f2b7b39c860840dfa47d9ee4adffa0b3
<literal>user-list</literal> - list users in the system, optionally by a specific tenant + list users in the system, optionally by a specific tenant (identified by tenant_id) - arguments + arguments - tenant_id (optional, defaults to None) + tenant_id (optional, defaults to None) - example: + example: keystone user-list
<literal>user-update --email</literal> - arguments + arguments - user_id + user_id - email + email - example: + example: keystone user-update --email 03c84b51574841ba9a0d8db7882ac645 "someone@somewhere.com"
<literal>user-enable</literal> - arguments + arguments - user_id + user_id - example: + example: keystone user-enable 03c84b51574841ba9a0d8db7882ac645
<literal>user-disable</literal> - arguments + arguments - user_id + user_id - example: + example: keystone user-disable 03c84b51574841ba9a0d8db7882ac645
<literal>user-update --password</literal> - arguments + arguments - user_id + user_id - password + password - example: + example: keystone user-update --password 03c84b51574841ba9a0d8db7882ac645 foo @@ -487,65 +487,65 @@ keystone user-update --password 03c84b51574841ba9a0d8db7882ac645 foo Roles
<literal>role-create</literal> - arguments + arguments - name + name - example: + example: keystone role-create --name=demo
<literal>role-delete</literal> - arguments + arguments - role_id + role_id - example: + example: keystone role-delete 19d1d3344873464d819c45f521ff9890
<literal>role-list</literal> - example: + example: keystone role-list
<literal>role-get</literal> - arguments + arguments - role_id + role_id - example: + example: keystone role-get role=19d1d3344873464d819c45f521ff9890
<literal>add-user-role</literal> - arguments + arguments - role_id + role_id - user_id + user_id - tenant_id + tenant_id - example: + example: keystone add-user-role \ 3a751f78ef4c412b827540b829e2d7dd \ @@ -555,19 +555,19 @@ keystone add-user-role \
<literal>remove-user-role</literal> - arguments + arguments - role_id + role_id - user_id + user_id - tenant_id + tenant_id - example: + example: keystone remove-user-role \ 19d1d3344873464d819c45f521ff9890 \ @@ -580,19 +580,19 @@ keystone remove-user-role \ Services
<literal>service-create</literal> - keyword arguments + keyword arguments - name + name - type + type - description + description - example: + example: keystone service create \ --name=nova \ @@ -602,39 +602,39 @@ keystone service create \
<literal>service-list</literal> - arguments + arguments - service_id + service_id - example: + example: keystone service-list
<literal>service-get</literal> - arguments + arguments - service_id + service_id - example: + example: keystone service-get 08741d8ed88242ca88d1f61484a0fe3b
<literal>service-delete</literal> - arguments + arguments - service_id + service_id - example: + example: keystone service-delete 08741d8ed88242ca88d1f61484a0fe3b @@ -682,11 +682,11 @@ keystone service-delete 08741d8ed88242ca88d1f61484a0fe3b To ensure services that you add to the catalog know about the users, tenants, and roles, you must create an admin token and create service users. These sections walk through those - requirements. + requirements.
Admin Token - For a default installation of Keystone, before you can + For a default installation of Keystone, before you can use the REST API, you need to define an authorization token. This is configured in keystone.conf file under the section [DEFAULT]. In the @@ -697,10 +697,10 @@ keystone service-delete 08741d8ed88242ca88d1f61484a0fe3b [DEFAULT] admin_token = ADMIN - This configured token is a "shared secret" + This configured token is a "shared secret" between keystone and other OpenStack services, and is used by the client to communicate with the API to create tenants, - users, roles, etc. + users, roles, etc.
Setting up tenants, users, and roles @@ -790,7 +790,7 @@ keystone user-role-add --tenant_id=[uuid of the service tenant] \ heavily - and this must be configured for the OpenStack Dashboard to properly function. - The endpoints for these services are defined in a + The endpoints for these services are defined in a template, an example of which is in the project as the file etc/default_catalog.templates. When keystone uses a template file backend, then changes made to @@ -838,7 +838,7 @@ keystone service-create --name=swift \
Configuring Nova to use Keystone - When configuring Nova, it is important to create a nova user in the service tenant and + When configuring Nova, it is important to create a nova user in the service tenant and include the nova user's login information in /etc/nova/nova.conf
@@ -1215,7 +1215,7 @@ admin_password = keystone123
Configuring Keystone SSL support - Keystone may be configured to support 2-way SSL out-of-the-box. +Keystone may be configured to support 2-way SSL out-of-the-box. The x509 certificates used by Keystone must be obtained externally and configured for use with Keystone as described in this section. However, a set of sample certificates is provided @@ -1245,8 +1245,8 @@ admin_password = keystone123 Note that you may choose whatever names you want for these certificates, or combine the public/private keys in the same file if you wish. These certificates are just provided - as an example. - To enable SSL with client authentication, modify the + as an example. +To enable SSL with client authentication, modify the etc/keystone.conf file accordingly under the [ssl] section. SSL configuration example using the included sample certificates: diff --git a/doc/src/docbkx/common/ch_image_mgmt.xml b/doc/src/docbkx/common/ch_image_mgmt.xml index ceb8fd794c..28ca8b4de5 100644 --- a/doc/src/docbkx/common/ch_image_mgmt.xml +++ b/doc/src/docbkx/common/ch_image_mgmt.xml @@ -13,7 +13,7 @@ xlink:href="http://docs.openstack.org/cli/quick-start/content/glance_client.html">glance command-line tool, or the Python API - to accomplish the same tasks. + to accomplish the same tasks. VM images made available through OpenStack Image Service can be stored in a variety of locations. The OpenStack Image Service supports the following backend stores: @@ -135,14 +135,14 @@ injecting ssh keys into instances before they are booted. This allows a user to log in to the instances that he or she creates securely. Generally the first thing that a - user does when using the system is create a keypair. + user does when using the system is create a keypair. Keypairs provide secure authentication to your instances. As part of the first boot of a virtual image, the private key of your keypair is added to authorized_keys file of the login account. Nova generates a public and private key pair, and sends the private key to the user. The public key is stored so that it can be - injected into instances. + injected into instances. Run (boot) a test instance: $ nova boot --image cirros-0.3.0-x86_64 --flavor m1.small --key_name test my-first-server @@ -174,7 +174,7 @@ $ nova list - The instance will go from BUILD to ACTIVE in a short + The instance will go from BUILD to ACTIVE in a short time, and you should be able to connect via ssh as 'cirros' user, using the private key you created. If your ssh keypair fails for some reason, you can also log in @@ -222,11 +222,11 @@ Since the release of the API in its 1.1 version, it is possible to pause and suspend instances. - Pausing and Suspending instances only apply to + Pausing and Suspending instances only apply to KVM-based hypervisors and XenServer/XCP Hypervisors. - Pause/ Unpause : Stores the content of the VM in memory + Pause/ Unpause : Stores the content of the VM in memory (RAM). Suspend/ Resume : Stores the content of the VM on disk. @@ -245,7 +245,7 @@
Suspending instance - To suspend an instance : + To suspend an instance : nova suspend $server-id To resume a suspended instance : nova resume $server-id @@ -411,7 +411,7 @@ Options: - Load the contents of a local directory into glance. + Load the contents of a local directory into glance. The dump and load are useful when replicating across two glance servers where a direct diff --git a/doc/src/docbkx/common/ch_installdashboard.xml b/doc/src/docbkx/common/ch_installdashboard.xml index e4919ba10d..e487c98824 100644 --- a/doc/src/docbkx/common/ch_installdashboard.xml +++ b/doc/src/docbkx/common/ch_installdashboard.xml @@ -9,18 +9,18 @@ xlink:href="https://github.com/openstack/horizon/" >horizon, is a Web interface that allows cloud administrators and users to manage various OpenStack resources - and services. + and services. The dashboard enables web-based interactions with the - OpenStack Compute cloud controller through the OpenStack APIs. + OpenStack Compute cloud controller through the OpenStack APIs. The following instructions show an example deployment - configured with an Apache web server. + configured with an Apache web server. After you install and configure the dashboard, you can complete the following tasks: Customize your dashboard. See . + linkend="dashboard-custom-brand"/>. Set up session storage for the dashboard. See Resources For the available OpenStack documentation, see docs.openstack.org. + >docs.openstack.org. For assistance with OpenStack, go to ask.openstack.org. @@ -672,7 +672,7 @@ Defines resources for a cell, including CPU, storage, and networking. Can apply to the specific - services within a cell or a whole cell. + services within a cell or a whole cell. @@ -838,7 +838,7 @@ cloud architect A person who plans, designs, and oversees the - creation of clouds. + creation of clouds. @@ -980,7 +980,7 @@ A node that runs the nova-compute daemon, a VM instance that provides a wide range of services - such as a web services and analytics. + such as a web services and analytics. @@ -1432,7 +1432,7 @@ usa.gov, Harvard.edu, or mail.yahoo.com. A domain is an entity or container of all DNS-related information containing one or more - records. + records. @@ -1461,7 +1461,7 @@ master invokes the slave. DNS servers might also be clustered or replicated such that changes made to one DNS server are automatically propagated to - other active servers. + other active servers. @@ -2189,7 +2189,7 @@ can also create custom images, or snapshots, from servers that you have launched. Custom images can be used for data backups or as "gold" images for - additional servers. + additional servers. @@ -2351,7 +2351,7 @@ Number that is unique to every computer system on the Internet. Two versions of the Internet Protocol (IP) are in use for addresses: IPv4 and - IPv6. + IPv6. @@ -2479,7 +2479,7 @@ large object An object within swift that is larger than 5 - GBs. + GBs. @@ -2540,7 +2540,7 @@ belongs to a cloud account. It is used to distribute workloads between multiple back-end systems or services, based on the criteria defined - as part of its configuration. + as part of its configuration. @@ -2959,7 +2959,7 @@ Opens all objects for an object server and verifies the MD5 hash, size, and metadata for each - object. + object. @@ -3356,7 +3356,7 @@ other and with the public network. All machines must have a public and private network interface. The public network interface is controlled by the - public_interface option. + public_interface option. diff --git a/doc/src/docbkx/common/nova_cli_boot.xml b/doc/src/docbkx/common/nova_cli_boot.xml index 61c8e7b7fe..111ab5559b 100644 --- a/doc/src/docbkx/common/nova_cli_boot.xml +++ b/doc/src/docbkx/common/nova_cli_boot.xml @@ -43,7 +43,7 @@ server that can be launched. For more details and a list of default flavors available, see Section 1.5, "Managing Flavors," (↑ User Guide for - Administrators ). + Administrators ). User Data is a special key in @@ -54,7 +54,7 @@ >cloudinit system is an open source package from Ubuntu that handles early initialization of a cloud instance that makes use - of this user data. + of this user data. Access and security credentials, which include @@ -74,7 +74,7 @@ use the keypair for multiple instances that belong to that project. For details, refer to Section 1.5.1, Creating or - Importing Keys. + Importing Keys. A security @@ -125,7 +125,7 @@ | 84 | m1.micro | 128 | 0 | 0 | | 1 | 1.0 | True | +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+ Note the ID of the flavor that you want to use - for your instance. + for your instance. List the available images: @@ -142,14 +142,14 @@ $ nova image-list | grep 'kernel' | df430cc2-3406-4061-b635-a51c16e488ac | cirros-0.3.1-x86_64-uec-kernel | ACTIVE | | Note the ID of the image that you want to boot - your instance from. + your instance from. List the available security groups: If you are an admin user, specify the --all-tenants parameter - to list groups for all tenants. + to list groups for all tenants. $ nova secgroup-list --all-tenants +----+---------+-------------+----------------------------------+ @@ -160,7 +160,7 @@ +----+---------+-------------+----------------------------------+ If you have not created any security groups, you can assign the instance to only the default - security group. + security group. You can also list rules for a specified security group: $ nova secgroup-list-rules default @@ -174,7 +174,7 @@ 80. - List the available keypairs. + List the available keypairs. $ nova keypair-list +------+-------------+ | Name | Fingerprint | @@ -212,9 +212,9 @@ depending on which parameters you provide. A status of BUILD indicates that the instance has started, but is not yet - online. + online. A status of ACTIVE indicates - that your server is active. + that your server is active. +-------------------------------------+--------------------------------------+ | Property | Value | @@ -250,7 +250,7 @@ Copy the server ID value from the id field in the output. You use this ID to get details for or delete your - server. + server. Copy the administrative password value from the adminPass field. You use this value to log into your server. @@ -281,9 +281,9 @@ This command lists all instances of the project you belong to, including their ID, their name, their status, and their private (and if assigned, - their public) IP addresses. + their public) IP addresses. If the status for the instance is ACTIVE, the - instance is online. + instance is online. To view the available options for the nova list command, run the following command: @@ -293,7 +293,7 @@ If you did not provide a keypair, security groups, or rules, you can only access the instance from inside the cloud through VNC. Even pinging - the instance is not possible. + the instance is not possible. diff --git a/doc/src/docbkx/common/nova_cli_evacuate.xml b/doc/src/docbkx/common/nova_cli_evacuate.xml index 742a4a4b64..9b690075ea 100644 --- a/doc/src/docbkx/common/nova_cli_evacuate.xml +++ b/doc/src/docbkx/common/nova_cli_evacuate.xml @@ -6,12 +6,12 @@ Evacuate instances If a cloud compute node fails due to a hardware malfunction or another reason, you can evacuate instances to make them - available again. - You can choose evacuation parameters for your use case. + available again. + You can choose evacuation parameters for your use case. To preserve user data on server disk, you must configure shared storage on the target host. Also, you must validate that the current VM host is down. Otherwise the evacuation - fails with an error. + fails with an error. To evacuate your server @@ -44,7 +44,7 @@ filesystem. To configure your system, see Configure migrations guide. In this - example, the password remains unchanged. + example, the password remains unchanged. $ nova evacuate evacuated_server_name host_b --on-shared-storage diff --git a/doc/src/docbkx/common/nova_cli_fileinjection.xml b/doc/src/docbkx/common/nova_cli_fileinjection.xml index 4802573dbe..bcc8b48c2b 100644 --- a/doc/src/docbkx/common/nova_cli_fileinjection.xml +++ b/doc/src/docbkx/common/nova_cli_fileinjection.xml @@ -9,10 +9,10 @@ Inject files into instances You can inject local files into the instance file system when - you launch an instance. + you launch an instance. Use the --file dst-path=src-path - parameter on the nova boot command. - You can inject up to five files. + parameter on the nova boot command. + You can inject up to five files. For example, you might inject the special_authorized_keysfile file into the instance rather than using the regular ssh key injection. diff --git a/doc/src/docbkx/common/nova_cli_images.xml b/doc/src/docbkx/common/nova_cli_images.xml index 6040f2f93f..d9bda812a5 100644 --- a/doc/src/docbkx/common/nova_cli_images.xml +++ b/doc/src/docbkx/common/nova_cli_images.xml @@ -37,7 +37,7 @@ $ nova image-create myCirrosServer myCirrosImageThe command creates a qemu snapshot and automatically uploads the image to your repository. Only the tenant - that creates the image has access to it. + that creates the image has access to it. Get details for your image to check its @@ -79,7 +79,7 @@ After a while, the image status changes from SAVING to ACTIVE. Only the tenant who - creates the image has access to it. + creates the image has access to it. diff --git a/doc/src/docbkx/common/nova_cli_secgroups.xml b/doc/src/docbkx/common/nova_cli_secgroups.xml index 86fa4904f2..986dfaf8a7 100644 --- a/doc/src/docbkx/common/nova_cli_secgroups.xml +++ b/doc/src/docbkx/common/nova_cli_secgroups.xml @@ -11,7 +11,7 @@
Add or delete a security group Security groups can be added with nova - secgroup-create. + secgroup-create. The following example shows the creation of the security group secure1. After the group is created, it can be viewed in the security @@ -51,7 +51,7 @@ You can add extra rules into the default security group for handling the egress - traffic. Rules are ingress only at this time. + traffic. Rules are ingress only at this time. In the following example, the group @@ -83,16 +83,16 @@ types of traffic. The command requires the following arguments for both TCP and UDP rules : - <secgroup> ID of security group. + <secgroup> ID of security group. - <ip_proto> IP protocol (icmp, tcp, udp). + <ip_proto> IP protocol (icmp, tcp, udp). - <from_port> Port at start of range. + <from_port> Port at start of range. - <to_port> Port at end of range. + <to_port> Port at end of range. <cidr> CIDR for address range. @@ -101,23 +101,23 @@ For ICMP rules, instead of specifying a begin and end port, you specify the allowed ICMP code and ICMP type: - <secgroup> ID of security group. + <secgroup> ID of security group. - <ip_proto> IP protocol (with icmp specified). + <ip_proto> IP protocol (with icmp specified). - <ICMP_code> The ICMP code. + <ICMP_code> The ICMP code. - <ICMP_type> The ICMP type. + <ICMP_type> The ICMP type. <cidr> CIDR for the source address range. - Entering "-1" for both code and type + Entering "-1" for both code and type indicates that all ICMP codes and types should be allowed. @@ -170,19 +170,19 @@ In order to delete a rule, you need to specify the exact same arguments you used to create it: - <secgroup> ID of security group. + <secgroup> ID of security group. - <ip_proto> IP protocol (icmp, tcp, udp). + <ip_proto> IP protocol (icmp, tcp, udp). - <from_port> Port at start of range. + <from_port> Port at start of range. - <to_port> Port at end of range. + <to_port> Port at end of range. - <cidr> CIDR for address range. + <cidr> CIDR for address range. $ nova secgroup-delete-rule default tcp 80 80 0.0.0.0/0 diff --git a/doc/src/docbkx/common/nova_cli_sshkeys.xml b/doc/src/docbkx/common/nova_cli_sshkeys.xml index 1378bc8a4d..abbaa7ecb0 100644 --- a/doc/src/docbkx/common/nova_cli_sshkeys.xml +++ b/doc/src/docbkx/common/nova_cli_sshkeys.xml @@ -17,7 +17,7 @@ $ nova keypair-add mykey > mykey.pem Save the mykey.pem file to a secure location. It enables root access to any instances with which - the mykey key is associated. + the mykey key is associated. Import a keypair @@ -28,7 +28,7 @@ $ nova keypair-add --pub-key mykey.pub mykey You must have the matching private key to access instances - that are associated with this key. + that are associated with this key.
diff --git a/doc/src/docbkx/common/nova_cli_startstop.xml b/doc/src/docbkx/common/nova_cli_startstop.xml index 763ee01e7d..1739a19615 100644 --- a/doc/src/docbkx/common/nova_cli_startstop.xml +++ b/doc/src/docbkx/common/nova_cli_startstop.xml @@ -13,17 +13,17 @@ To pause a server, run the following command:$ nova pause SERVER This command stores the state of the VM in RAM. A paused instance continues to run in a frozen - state. + state. To un-pause the server, run the following command:$ nova unpause SERVER
Suspend and resume an instance To suspend and resume a server Administrative users might want to suspend an infrequently used - instance or to perform system maintenance. + instance or to perform system maintenance. When you suspend an instance, its VM state is stored on disk, all memory is written to disk, and the virtual machine is stopped. Suspending an instance is similar to placing a device - in hibernation; memory and vCPUs become available. To initiate a hypervisor-level suspend operation, + in hibernation; memory and vCPUs become available.To initiate a hypervisor-level suspend operation, run the following command:$ nova suspend SERVER To resume a suspended server: diff --git a/doc/src/docbkx/common/nova_cli_terminate.xml b/doc/src/docbkx/common/nova_cli_terminate.xml index 17abef4199..37d5715209 100644 --- a/doc/src/docbkx/common/nova_cli_terminate.xml +++ b/doc/src/docbkx/common/nova_cli_terminate.xml @@ -6,7 +6,7 @@ version="5.0" xml:id="terminating"> Delete an instance - When you no longer need an instance, you can delete it. + When you no longer need an instance, you can delete it. To delete an instance List all instances: $ nova list @@ -22,7 +22,7 @@ newServer instance, which is in ERROR state: $ nova delete newServer -The command does not notify that your server was deleted. +The command does not notify that your server was deleted. Instead, run the nova list command: $ nova list diff --git a/doc/src/docbkx/common/nova_cli_usage_statistics.xml b/doc/src/docbkx/common/nova_cli_usage_statistics.xml index 08d0ba43d2..edcc5f10f8 100644 --- a/doc/src/docbkx/common/nova_cli_usage_statistics.xml +++ b/doc/src/docbkx/common/nova_cli_usage_statistics.xml @@ -37,10 +37,10 @@ | devstack-grizzly | 66265572db174a7aa66eba661f58eb9e | 2 | 4096 | 40 | +------------------+----------------------------------+-----+-----------+---------+ The cpu column shows the sum of - the virtual CPUs for instances running on the host. + the virtual CPUs for instances running on the host. The memory_mb column shows the sum of the memory (in MB) allocated to the instances - that run on the hosts. + that run on the hosts. The disk_gb column shows the sum of the root and ephemeral disk sizes (in GB) of the instances that run on the hosts. @@ -53,7 +53,7 @@ To show instance usage statistics Get CPU, memory, I/O, and network statistics for an - instance. + instance. First, list instances: $ nova list +--------------------------------------+----------------------+--------+------------+-------------+------------------+ diff --git a/doc/src/docbkx/common/nova_cli_userdata.xml b/doc/src/docbkx/common/nova_cli_userdata.xml index 4984c128e5..7ca19754d9 100644 --- a/doc/src/docbkx/common/nova_cli_userdata.xml +++ b/doc/src/docbkx/common/nova_cli_userdata.xml @@ -13,7 +13,7 @@ xlink:href="https://help.ubuntu.com/community/CloudInit" >cloudinit system is an open source package from Ubuntu that handles early initialization of a cloud instance that makes - use of this user data. + use of this user data. This user-data can be put in a file on your local system and then passed in at instance creation with the flag diff --git a/doc/src/docbkx/common/qemu.xml b/doc/src/docbkx/common/qemu.xml index 47487943b7..f04725c010 100644 --- a/doc/src/docbkx/common/qemu.xml +++ b/doc/src/docbkx/common/qemu.xml @@ -52,7 +52,7 @@ libvirt_type=qemu
Tips and fixes for QEMU on RHEL - If you are testing OpenStack in a virtual machine, you need + If you are testing OpenStack in a virtual machine, you need to configure nova to use qemu without KVM and hardware virtualization. The second command relaxes SELinux rules to allow this mode of operation @@ -61,7 +61,7 @@ libvirt_type=qemu RHEL 6.4. Note nested virtualization will be the much slower TCG variety, and you should provide lots of memory to the top level guest, as the OpenStack-created guests - default to 2GM RAM with no overcommit. + default to 2GM RAM with no overcommit. The second command, setsebool, may take a while. $ sudo openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_type qemu $ sudo setsebool -P virt_use_execmem on diff --git a/doc/src/docbkx/common/section_cli_baremetal.xml b/doc/src/docbkx/common/section_cli_baremetal.xml index 725ed35aee..c78b586f60 100644 --- a/doc/src/docbkx/common/section_cli_baremetal.xml +++ b/doc/src/docbkx/common/section_cli_baremetal.xml @@ -16,24 +16,24 @@ baremetal-interface-list - Lists network interfaces associated with a bare metal + Lists network interfaces associated with a bare metal node. baremetal-interface-remove - Removes a network interface from a bare metal + Removes a network interface from a bare metal node. baremetal-node-create - Creates a bare metal node. + Creates a bare metal node. baremetal-node-delete - Removes a bare metal node and any associated + Removes a bare metal node and any associated interfaces. @@ -42,7 +42,7 @@ baremetal-node-show - Shows information about a bare metal node. + Shows information about a bare metal node. To manage bare metal nodesCreate a bare metal node. $ nova baremetal-node-create --pm_address=1.2.3.4 --pm_user=ipmi --pm_password=ipmi $(hostname -f) 1 512 10 aa:bb:cc:dd:ee:ff diff --git a/doc/src/docbkx/common/section_cli_manage_images.xml b/doc/src/docbkx/common/section_cli_manage_images.xml index 9454d13850..55052495ab 100644 --- a/doc/src/docbkx/common/section_cli_manage_images.xml +++ b/doc/src/docbkx/common/section_cli_manage_images.xml @@ -7,9 +7,9 @@ During set up of OpenStack cloud, the cloud operator sets user permissions to manage images. Image upload and management might be restricted to only - cloud administrators or cloud operators. + cloud administrators or cloud operators. After you upload an image, it is considered golden and you cannot change it. + role="italic">golden and you cannot change it. You can upload images through the glance client or the Image Service API. You can also use the nova client to list images, set and delete image metadata, delete images, and take a @@ -96,12 +96,12 @@ To update an image by name or ID: $ glance image-update IMAGE - To modify image properties, use the following + To modify image properties, use the following optional arguments: --name NAME. The - name of the image. + name of the image. @@ -109,36 +109,36 @@ DISK_FORMAT. The disk format of the image. Acceptable formats are ami, ari, aki, vhd, vmdk, raw, qcow2, - vdi, and iso. + vdi, and iso. --container-format CONTAINER_FORMAT. The container format of the image. Acceptable - formats are ami, ari, aki, bare, and ovf. + formats are ami, ari, aki, bare, and ovf. --owner TENANT_ID. The tenant who - should own the image. + should own the image. --size SIZE. The - size of image data, in bytes. + size of image data, in bytes. --min-disk DISK_GB. The minimum size of disk needed to boot image, in - gigabytes. + gigabytes. --min-ram DISK_RAM. The minimum amount of ram needed to boot image, in - megabytes. + megabytes. @@ -147,7 +147,7 @@ the data for this image resides. For example, if the image data is stored in swift, you could specify - swift://account:key@example.com/container/obj. + swift://account:key@example.com/container/obj. @@ -155,12 +155,12 @@ Local file that contains disk image to be uploaded during update. Alternatively, you can pass images to the client through - stdin. + stdin. --checksum CHECKSUM. Hash of image - data to use for verification. + data to use for verification. @@ -170,26 +170,26 @@ usage, but indicates that the Glance server should immediately copy the data and store it in its configured image - store. + store. --is-public [True|False]. Makes an - image accessible to the public. + image accessible to the public. --is-protected [True|False]. Prevents an - image from being deleted. + image from being deleted. --property KEY=VALUE. Arbitrary property to associate with image. Can be - used multiple times. + used multiple times. @@ -197,7 +197,7 @@ Deletes all image properties that are not explicitly set in the update request. Otherwise, those properties not referenced - are preserved. + are preserved. diff --git a/doc/src/docbkx/common/section_cli_openrc.xml b/doc/src/docbkx/common/section_cli_openrc.xml index c167800035..76c27d0e00 100644 --- a/doc/src/docbkx/common/section_cli_openrc.xml +++ b/doc/src/docbkx/common/section_cli_openrc.xml @@ -8,7 +8,7 @@ command-line clients, you must download and source an environment file, openrc.sh. It is project-specific and contains the credentials used by - OpenStack Compute, Image, and Identity services. + OpenStack Compute, Image, and Identity services. When you source the file and enter the password, environment variables are set for that shell. They allow the commands to communicate to the OpenStack services that run in the @@ -18,7 +18,7 @@ To download the OpenStack RC file - Log in to the OpenStack dashboard. + Log in to the OpenStack dashboard. On the Project tab, select the @@ -33,15 +33,15 @@ Copy the openrc.sh file to the - machine from where you want to run OpenStack commands. + machine from where you want to run OpenStack commands. For example, copy the file to the machine from where you want to upload an image with a glance client - command. + command. On any shell from where you want to run OpenStack commands, source the openrc.sh - file for the respective project. + file for the respective project. In this example, we source the demo-openrc.sh file for the demo project: diff --git a/doc/src/docbkx/common/section_cli_reboot.xml b/doc/src/docbkx/common/section_cli_reboot.xml index 2ee3f99565..5f5bc03384 100644 --- a/doc/src/docbkx/common/section_cli_reboot.xml +++ b/doc/src/docbkx/common/section_cli_reboot.xml @@ -6,7 +6,7 @@ You can perform a soft or hard reboot of a running instance. A soft reboot attempts a graceful shutdown and restart of the instance. A hard reboot power cycles the instance. - To reboot a server By default, when you reboot a server, it is a soft reboot. + To reboot a server By default, when you reboot a server, it is a soft reboot. $ nova reboot SERVER To perform a hard reboot, pass the --hard parameter, as follows:$ nova reboot --hard SERVER
diff --git a/doc/src/docbkx/common/section_dashboard_access.xml b/doc/src/docbkx/common/section_dashboard_access.xml index 2c38646ed2..e76ec8c32a 100644 --- a/doc/src/docbkx/common/section_dashboard_access.xml +++ b/doc/src/docbkx/common/section_dashboard_access.xml @@ -18,12 +18,12 @@ overview of the interface.
Log in to the dashboard @@ -39,11 +39,11 @@ The dashboard is available on the node that has the nova-dashboard - server role. + server role. The user name and password with which - you can log in to the dashboard. + you can log in to the dashboard. @@ -67,13 +67,13 @@ that is not considered trustworthy by default. In this case, verify the certificate. To proceed anyway, you can add an exception in - the browser to bypass the warning. + the browser to bypass the warning. On the dashboard log in page, enter your user name and password and click Sign - In. + In.
diff --git a/doc/src/docbkx/common/section_dashboard_install.xml b/doc/src/docbkx/common/section_dashboard_install.xml index 6ec2f678ae..65f5d3478d 100644 --- a/doc/src/docbkx/common/section_dashboard_install.xml +++ b/doc/src/docbkx/common/section_dashboard_install.xml @@ -6,7 +6,7 @@ Install the dashboard The following instructions show an example dashboard - deployment configured with an Apache web server. + deployment configured with an Apache web server. To install the OpenStack dashboard, complete the following high-level steps: @@ -36,7 +36,7 @@ following tasks: To customize your dashboard, see . + linkend="dashboard-custom-brand"/>. To set up session storage for the dashboard, see diff --git a/doc/src/docbkx/common/section_dashboard_launch_instances_from_image.xml b/doc/src/docbkx/common/section_dashboard_launch_instances_from_image.xml index 1b97526250..229ba5c46d 100644 --- a/doc/src/docbkx/common/section_dashboard_launch_instances_from_image.xml +++ b/doc/src/docbkx/common/section_dashboard_launch_instances_from_image.xml @@ -56,7 +56,7 @@ keypair with an external tool, you can import it into OpenStack. You can use the keypair for multiple instances that belong to that - project. + project. A security @@ -65,7 +65,7 @@ instances. Security groups hold a set of firewall policies, known as security group - rules. + rules. @@ -84,14 +84,14 @@ If you are a member of multiple projects, select a project from the drop-down list at the top of the - Project tab. + Project tab. Click the Images & Snapshot - category. + category. The dashboard shows the images that have been uploaded to OpenStack Image Service and are available - for this project. + for this project. Select an image and click @@ -114,24 +114,24 @@ Enter an instance name to assign to the - virtual machine. + virtual machine. From the Flavor drop-down list, select the size of the virtual - machine to launch. + machine to launch. - Optionally, select a keypair. + Optionally, select a keypair. In case an image uses a static root password or a static key set (neither is recommended), you do not need to provide a keypair on - starting the instance. + starting the instance. In Instance Count, enter the number of virtual machines to launch - from this image. + from this image. Assign the instance to the default security @@ -142,14 +142,14 @@ If you want to boot from volume, click the respective entry to expand its options. Set the options as described in Launching - Instances from a Volume. + Instances from a Volume. --> Click Launch Instance. The instance is launched on any of the compute nodes in - the cloud. + the cloud.
After you have launched an instance, switch to the @@ -168,5 +168,5 @@ If you did not provide a keypair on starting and have not touched security groups or rules so far, by default the instance can only be accessed from inside the cloud through - VNC at this point. Even pinging the instance is not possible. + VNC at this point. Even pinging the instance is not possible.
diff --git a/doc/src/docbkx/common/section_dashboard_overview.xml b/doc/src/docbkx/common/section_dashboard_overview.xml index ae20d2f622..1e04964ef2 100644 --- a/doc/src/docbkx/common/section_dashboard_overview.xml +++ b/doc/src/docbkx/common/section_dashboard_overview.xml @@ -27,15 +27,15 @@ The top-level row shows the user name that you logged in with. You can also access Settings or Sign - Out of the Web interface. + Out of the Web interface. The visible tabs and functions in the dashboard depend on the access permissions of the user that - is logged in. They are defined by roles. + is logged in. They are defined by roles. If you are logged in as an end user rather than an admin user, the main screen shows only the - Project tab. + Project tab. This tab shows details for the projects, or tenants, of which you are a member. Select a project from the drop-down list on the @@ -53,7 +53,7 @@ Instances Lists instances and volumes created by - users of the project. + users of the project. From here, you can stop, pause, or reboot any instances or connect to them through virtual network computing (VNC). @@ -64,9 +64,9 @@ Volumes Lists volumes created by users of the - project. + project. From here, you can create or delete - volumes. + volumes. @@ -89,13 +89,13 @@ On the Security Groups tab, you can list, create, and delete security groups and - edit rules for security groups. + edit rules for security groups. On the Keypairs tab, you can list, create, and import - keypairs, and delete keypairs. + keypairs, and delete keypairs. On the Floating IPs tab, you can allocate an IP address to or - release it from a project. + release it from a project. On the API Access tab, you can list the API endpoints. diff --git a/doc/src/docbkx/common/storage-concepts.xml b/doc/src/docbkx/common/storage-concepts.xml index 03326dfdec..561376a364 100644 --- a/doc/src/docbkx/common/storage-concepts.xml +++ b/doc/src/docbkx/common/storage-concepts.xml @@ -55,7 +55,7 @@ - Other points of note include: + Other points of note include: OpenStack Object Storage is not used like a traditional hard drive. Object storage is all @@ -64,7 +64,7 @@ http). This is a good idea as if you don't have to provide atomic operations (that is, you can rely on eventual consistency), you can much more easily scale a storage - system and avoid a central point of failure. + system and avoid a central point of failure. The OpenStack Image Service is used to manage diff --git a/doc/src/docbkx/common/support-compute.xml b/doc/src/docbkx/common/support-compute.xml index ebdf55acd8..10f154bc7e 100644 --- a/doc/src/docbkx/common/support-compute.xml +++ b/doc/src/docbkx/common/support-compute.xml @@ -39,7 +39,7 @@ If it gets started out of order, you may not be able to create your zip file. Once your CA information is available, you should be able to go back to nova-manage to - create your zipfile. + create your zipfile. You may also need to check your proxy settings to see if they are causing problems with the novarc creation. Instance errors @@ -79,10 +79,10 @@ Check the file sizes to see if they are reasonable. If any are missing/zero/very small then nova-compute has somehow not completed download of the images from - objectstore. + objectstore. Also check nova-compute.log for exceptions. Sometimes - they don't show up in the console output. - Next, check the /var/log/libvirt/qemu/i-ze0bnh1q.log + they don't show up in the console output. + Next, check the /var/log/libvirt/qemu/i-ze0bnh1q.log file to see if it exists and has any useful error messages in it. diff --git a/doc/src/docbkx/common/support-object-storage.xml b/doc/src/docbkx/common/support-object-storage.xml index 6f231f41f4..2143b32751 100644 --- a/doc/src/docbkx/common/support-object-storage.xml +++ b/doc/src/docbkx/common/support-object-storage.xml @@ -6,7 +6,7 @@ For OpenStack Object Storage, everything is logged in /var/log/syslog (or messages on some distros). Several settings enable further customization of logging, such as log_name, log_facility, and log_level, within the object server configuration files.
Handling Drive Failure - In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for OpenStack Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up. + In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for OpenStack Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up. If the drive can’t be replaced immediately, then it is best to leave it unmounted, and remove the drive from the ring. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, it can be re-added to the ring. Rackspace has seen hints at drive failures by looking at error messages in /var/log/kern.log - do consider checking this in your monitoring @@ -41,7 +41,7 @@
Emergency Recovery of Ring Builder Files - You should always keep a backup of Swift ring builder files. + You should always keep a backup of Swift ring builder files. However, if an emergency occurs, this procedure may assist in returning your cluster to an operational state. Using existing Swift tools, there is no way to recover a builder @@ -50,7 +50,7 @@ the one you have lost. The following is what you will need to do. Warning This procedure is a last-resort for emergency circumstances - it - requires knowledge of the swift python code and may not succeed. + requires knowledge of the swift python code and may not succeed. First, load the ring and a new ringbuilder object in a Python REPL: >>> from swift.common.ring import RingData, RingBuilder diff --git a/doc/src/docbkx/common/support.xml b/doc/src/docbkx/common/support.xml index f573029853..e1f9dc9cbd 100644 --- a/doc/src/docbkx/common/support.xml +++ b/doc/src/docbkx/common/support.xml @@ -22,7 +22,7 @@ question. Be sure you give a clear, concise summary in the title and provide as much detail as possible in the description. Paste in your command output or stack - traces, link to screenshots, and so on. + traces, link to screenshots, and so on. OpenStack mailing lists Posting your question or scenario to the OpenStack @@ -47,7 +47,7 @@ you are searching for specific information, say about "networking" or "api" for nova, you can find lots of content using the search feature. More is being added all the time, so be sure to check back often. You can find the search box in the upper right hand corner of any OpenStack wiki - page. + page. The Launchpad Bugs area So you think you've found a bug. That's great! Seriously, it is. The OpenStack community values your setup and testing efforts and wants your feedback. To log a bug you must @@ -56,36 +56,36 @@ Launchpad Bugs area. It is suggested that you first use the search facility to see if the bug you found has already been reported (or even better, already fixed). If it still seems like your bug is new or unreported then it is time to fill out a bug - report. + report. Some tips: Give a clear, concise summary! Provide as much detail as possible in the description. Paste in your command output or stack traces, link to - screenshots, etc. + screenshots, etc. Be sure to include what version of the software you are using. This is especially critical if you are using a development branch eg. "Grizzly - release" vs git commit bc79c3ecc55929bac585d04a03475b72e06a3208. + release" vs git commit bc79c3ecc55929bac585d04a03475b72e06a3208. Any deployment specific info is helpful as well, such as Ubuntu 12.04, multi-node install. The Launchpad Bugs areas are available here - : - OpenStack Compute: OpenStack Compute: https://bugs.launchpad.net/nova - OpenStack Object Storage: OpenStack Object Storage: https://bugs.launchpad.net/swift - OpenStack Image Delivery and Registration: OpenStack Image Delivery and Registration: https://bugs.launchpad.net/glance - OpenStack Identity: OpenStack Identity: https://bugs.launchpad.net/keystone - OpenStack Dashboard: OpenStack Dashboard: https://bugs.launchpad.net/horizon - OpenStack Network Connectivity: OpenStack Network Connectivity: https://bugs.launchpad.net/neutron diff --git a/doc/src/docbkx/common/tables/glance-api.xml b/doc/src/docbkx/common/tables/glance-api.xml index c48b441452..ce50995ecc 100644 --- a/doc/src/docbkx/common/tables/glance-api.xml +++ b/doc/src/docbkx/common/tables/glance-api.xml @@ -29,11 +29,11 @@ enable_v1_api=True - (BoolOpt)Deploy the v1 OpenStack Images API. + (BoolOpt)Deploy the v1 OpenStack Images API. enable_v2_api=True - (BoolOpt)Deploy the v2 OpenStack Images API. + (BoolOpt)Deploy the v2 OpenStack Images API. image_size_cap=1099511627776 diff --git a/doc/src/docbkx/common/tables/ldap-keystone-conf.xml b/doc/src/docbkx/common/tables/ldap-keystone-conf.xml index 2c31fda4a3..acef1771a1 100644 --- a/doc/src/docbkx/common/tables/ldap-keystone-conf.xml +++ b/doc/src/docbkx/common/tables/ldap-keystone-conf.xml @@ -19,28 +19,28 @@ user = dc=Manager,dc=example,dc=com - (StrOpt) User for the LDAP server to use as default. + (StrOpt) User for the LDAP server to use as default. - password = None - (StrOpt) Password for LDAP server to connect to. + password = None + (StrOpt) Password for LDAP server to connect to. suffix = cn=example,cn=com - (StrOpt) Default suffix for your LDAP server. + (StrOpt) Default suffix for your LDAP server. use_dumb_member = False (Bool) Indicates whether dumb_member settings are in use. allow_subtree_delete = False - (Bool) Determine whether to delete LDAP subtrees. + (Bool) Determine whether to delete LDAP subtrees. dumb_member = cn=dumb,dc=example,dc=com - Mockup member as placeholder, for testing purposes. + Mockup member as placeholder, for testing purposes. query_scope = one @@ -48,48 +48,48 @@ user_tree_dn = ou=Users,dc=example,dc=com - + user_filter = - + user_objectclass = inetOrgPerson - + user_id_attribute = cn - + user_name_attribute = sn - + user_mail_attribute = email - + user_pass_attribute = userPassword - + user_enabled_attribute = enabled Example, userAccountControl. Combines with user_enabled_mask and user_enabled_default settings below to extract the value from an integer - attribute like in Active Directory. + attribute like in Active Directory. user_enabled_mask = 0 - + user_enabled_default = True - + user_attribute_ignore = tenant_id,tenants - + user_allow_create = True @@ -97,16 +97,16 @@ user_allow_update = True - + user_allow_delete = True - + tenant_tree_dn = ou=Groups,dc=example,dc=com - + tenant_filter = @@ -115,149 +115,149 @@ tenant_objectclass = groupOfNames - + tenant_id_attribute = cn - + tenant_member_attribute = member - + tenant_name_attribute = ou - + tenant_desc_attribute = desc - + tenant_enabled_attribute = enabled - + tenant_attribute_ignore = - + tenant_allow_create = True - + tenant_allow_update = True - + tenant_allow_delete = True - + role_tree_dn = ou=Roles,dc=example,dc=com - + role_filter = - + role_objectclass = organizationalRole - + role_id_attribute = cn - + role_name_attribute = ou - + role_member_attribute = roleOccupant - + role_attribute_ignore = - + role_allow_create = True - + role_allow_update = True - + role_allow_delete = True - + group_tree_dn = - + group_filter = - + group_objectclass = groupOfNames - + group_id_attribute = cn - + group_name_attribute = ou - + group_member_attribute = member - + group_desc_attribute = desc - + group_attribute_ignore = - + group_allow_create = True - + group_allow_update = True - + group_allow_delete = True - + diff --git a/doc/src/docbkx/common/trusted-compute-pools.xml b/doc/src/docbkx/common/trusted-compute-pools.xml index 3ed4c6b7a1..7d964775c0 100644 --- a/doc/src/docbkx/common/trusted-compute-pools.xml +++ b/doc/src/docbkx/common/trusted-compute-pools.xml @@ -14,10 +14,10 @@ external standalone web-based remote attestation server, cloud providers can ensure that the compute node is running software with verified measurements, thus they can establish the foundation for the secure cloud stack. Through the Trusted Computing Pools, cloud - subscribers can request services to be run on verified compute nodes. + subscribers can request services to be run on verified compute nodes. The remote attestation server performs node verification through the following steps: - Compute nodes boot with Intel TXT technology enabled. + Compute nodes boot with Intel TXT technology enabled. The compute node's BIOS, hypervisor and OS are measured. @@ -27,7 +27,7 @@ attestation server. - The attestation server verifies those measurements against good/known + The attestation server verifies those measurements against good/known database to determine nodes' trustworthiness. diff --git a/doc/src/docbkx/common/user-data.xml b/doc/src/docbkx/common/user-data.xml index 863173b7fa..9eef03c395 100644 --- a/doc/src/docbkx/common/user-data.xml +++ b/doc/src/docbkx/common/user-data.xml @@ -44,7 +44,7 @@ configured to run a service on boot that retrieves the user data from the metadata service and take some action based on the contents of the data. The cloud-init package was designed to do exactly this. In particular, cloud-init is compatible with the - Compute metadata service as well as the Compute config drive. + Compute metadata service as well as the Compute config drive. Note that cloud-init is not an OpenStack technology. Rather, it is a package that is designed to support multiple cloud providers, so that the same virtual machine image can be used in different clouds without modification. Cloud-init is an open source project, diff --git a/doc/src/docbkx/common/using-vnc-console.xml b/doc/src/docbkx/common/using-vnc-console.xml index 247675eae5..1dea9720cd 100644 --- a/doc/src/docbkx/common/using-vnc-console.xml +++ b/doc/src/docbkx/common/using-vnc-console.xml @@ -4,7 +4,7 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="using-vnc-console"> Using VNC Console - There are several methods to interact with the VNC console, +There are several methods to interact with the VNC console, using a VNC client directly, a special java client, or through the web browser. For information about configuring the console, please refer refer here. @@ -20,9 +20,9 @@ provided by the nova client: $ nova get-vnc-console [server_id] [novnc|xvpvnc] Specify 'novnc' to get a URL suitable - for pasting into a web browser. + for pasting into a web browser. Specify 'xvpvnc' for a URL suitable for - pasting into the Java client. + pasting into the Java client. To request a web browser URL: $ nova get-vnc-console [server_id] novnc
@@ -32,7 +32,7 @@ Access VNC consoles with a Java client To enable support for the OpenStack Java VNC client in - compute, run the nova-xvpvncproxy service. + compute, run the nova-xvpvncproxy service. xvpvncproxy_port=[port] @@ -51,7 +51,7 @@ $ make To create a session, request an access URL by using python-novaclient. Then, run the client - as follows. + as follows. To get an access URL: $ nova get-vnc-console [server_id] xvpvnc To run the client: @@ -63,7 +63,7 @@ Access a VNC console through a web browser Retrieving an access_url for a web browser is similar to - the flow for the Java client. + the flow for the Java client. To get the access URL, run the following command: $ nova get-vnc-console [server_id] novnc @@ -71,6 +71,6 @@ Additionally, you can use the OpenStack dashboard, known as horizon, to access browser-based VNC consoles for - instances. + instances.
diff --git a/doc/src/docbkx/common/xapi-install-plugins.xml b/doc/src/docbkx/common/xapi-install-plugins.xml index 297b887b51..9af5dedf82 100644 --- a/doc/src/docbkx/common/xapi-install-plugins.xml +++ b/doc/src/docbkx/common/xapi-install-plugins.xml @@ -55,7 +55,7 @@ $ ./build-rpm.sh
These commands leave an .rpm file in the rpmbuild/RPMS/noarch/ - directory. + directory. Pack the RPM packages to a @@ -73,7 +73,7 @@ > full_path_to_rpmfileThis command produces an .iso file in the output directory specified. Copy that file - to the hypervisor. + to the hypervisor. Install the Supplemental Pack. Log diff --git a/doc/src/docbkx/common/xen-install.xml b/doc/src/docbkx/common/xen-install.xml index 341e4230cb..b20b1abcfe 100644 --- a/doc/src/docbkx/common/xen-install.xml +++ b/doc/src/docbkx/common/xen-install.xml @@ -8,7 +8,7 @@ an appropriate server. Xen is a type 1 hypervisor: When your server starts, Xen is the first software that runs. Consequently, you must install XenServer or XCP before you install the operating system on which you want to run OpenStack code. The OpenStack services then run in a virtual machine that you install on top of XenServer. - Before you can install your system you must decide if you want +Before you can install your system you must decide if you want to install Citrix XenServer (either the free edition, or one of the paid editions) or Xen Cloud Platform from Xen.org. You can download the software from the following diff --git a/doc/src/docbkx/docbkx-example/src/docbkx/example.xml b/doc/src/docbkx/docbkx-example/src/docbkx/example.xml index 7824b470a1..f3012d3c81 100644 --- a/doc/src/docbkx/docbkx-example/src/docbkx/example.xml +++ b/doc/src/docbkx/docbkx-example/src/docbkx/example.xml @@ -30,7 +30,7 @@ - This document is intended for individuals who whish to produce documentation using Maven and having + This document is intended for individuals who whish to produce documentation using Maven and having the same "feel" as the documentation that is produced by the mainline OpenStack projects. @@ -88,7 +88,7 @@ Additional Resources - + Openstack - Cloud Software @@ -124,7 +124,7 @@ Notes and including images So I want an note and an image in this section ... - This is an example of a note. + This is an example of a note. Here's a sample figure in svg format. The build will convert it to png: @@ -164,7 +164,7 @@ then follow the link. - For the pom.xmlfile that was included in this distribution we will + For the pom.xmlfile that was included in this distribution we will parse the individual lines and explaine the meaning. diff --git a/doc/src/docbkx/openstack-admin-user/src/ch_overview.xml b/doc/src/docbkx/openstack-admin-user/src/ch_overview.xml index f7e4a3cbdf..2b20c460d3 100644 --- a/doc/src/docbkx/openstack-admin-user/src/ch_overview.xml +++ b/doc/src/docbkx/openstack-admin-user/src/ch_overview.xml @@ -17,7 +17,7 @@ command-line clients let you run simple commands to create and manage resources in a cloud and automate tasks by using scripts. Each of the core OpenStack projects has its own - command-line client. + command-line client. You can modify these examples for your specific use cases. In addition to these ways of interacting with a cloud, you diff --git a/doc/src/docbkx/openstack-admin-user/src/section_cli_floating_ips.xml b/doc/src/docbkx/openstack-admin-user/src/section_cli_floating_ips.xml index 78210bc069..4af084c9cd 100644 --- a/doc/src/docbkx/openstack-admin-user/src/section_cli_floating_ips.xml +++ b/doc/src/docbkx/openstack-admin-user/src/section_cli_floating_ips.xml @@ -5,27 +5,27 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> Manage IP addresses Each instance can have a private, or fixed, IP address and a - public, or floating, one. + public, or floating, one. Private IP addresses are used for communication between instances, and public ones are used for communication with the - outside world. + outside world. When you launch an instance, it is automatically assigned a private IP address that stays the same until you explicitly terminate the instance. Rebooting an instance has no effect on the private IP address. A pool of floating IPs, configured by the cloud operator, is - available in OpenStack Compute. + available in OpenStack Compute. You can allocate a certain number of these to a project: The maximum number of floating IP addresses per project is defined - by the quota. + by the quota. You can add a floating IP address from this set to an instance of the project. Floating IP addresses can be dynamically disassociated and associated with other instances - of the same project at any time. + of the same project at any time. Before you can assign a floating IP address to an instance, you first must allocate floating IPs to a project. After floating IP addresses have been allocated to the current - project, you can assign them to running instances. + project, you can assign them to running instances. One floating IP address can be assigned to only one instance at a time. Floating IP addresses can be managed with the nova *floating-ip-* commands, provided @@ -84,7 +84,7 @@ with:$ nova floating-ip-listIn addition, you must know the instance's name (or ID). To look up the instances that belong to the current - project, use the nova list command. + project, use the nova list command. $ nova add-floating-ip INSTANCE_NAME_OR_ID FLOATING_IP After you assign the IP with nova add-floating-ip and configure security group rules for the instance, the instance is diff --git a/doc/src/docbkx/openstack-admin-user/src/section_cli_manage_images.xml b/doc/src/docbkx/openstack-admin-user/src/section_cli_manage_images.xml index d0a52fda3f..503174cbf5 100644 --- a/doc/src/docbkx/openstack-admin-user/src/section_cli_manage_images.xml +++ b/doc/src/docbkx/openstack-admin-user/src/section_cli_manage_images.xml @@ -7,9 +7,9 @@ During set up of OpenStack cloud, the cloud operator sets user permissions to manage images. Image upload and management might be restricted to only - cloud administrators or cloud operators. + cloud administrators or cloud operators. After you upload an image, it is considered golden and you cannot change it. + role="italic">golden and you cannot change it. You can upload images through the glance client or the Image Service API. You can also use the nova client to list images, set and delete image metadata, delete images, and take a @@ -96,12 +96,12 @@ To update an image by name or ID: $ glance image-update IMAGE - To modify image properties, use the following + To modify image properties, use the following optional arguments: --name NAME. The - name of the image. + name of the image. @@ -109,36 +109,36 @@ DISK_FORMAT. The disk format of the image. Acceptable formats are ami, ari, aki, vhd, vmdk, raw, qcow2, - vdi, and iso. + vdi, and iso. --container-format CONTAINER_FORMAT. The container format of the image. Acceptable - formats are ami, ari, aki, bare, and ovf. + formats are ami, ari, aki, bare, and ovf. --owner TENANT_ID. The tenant who - should own the image. + should own the image. --size SIZE. The - size of image data, in bytes. + size of image data, in bytes. --min-disk DISK_GB. The minimum size of disk needed to boot image, in - gigabytes. + gigabytes. --min-ram DISK_RAM. The minimum amount of ram needed to boot image, in - megabytes. + megabytes. @@ -147,7 +147,7 @@ the data for this image resides. For example, if the image data is stored in swift, you could specify - swift://account:key@example.com/container/obj. + swift://account:key@example.com/container/obj. @@ -155,12 +155,12 @@ Local file that contains disk image to be uploaded during update. Alternatively, you can pass images to the client through - stdin. + stdin. --checksum CHECKSUM. Hash of image - data to use for verification. + data to use for verification. @@ -170,26 +170,26 @@ usage, but indicates that the Glance server should immediately copy the data and store it in its configured image - store. + store. --is-public [True|False]. Makes an - image accessible to the public. + image accessible to the public. --is-protected [True|False]. Prevents an - image from being deleted. + image from being deleted. --property KEY=VALUE. Arbitrary property to associate with image. Can be - used multiple times. + used multiple times. @@ -197,7 +197,7 @@ Deletes all image properties that are not explicitly set in the update request. Otherwise, those properties not referenced - are preserved. + are preserved. diff --git a/doc/src/docbkx/openstack-admin-user/src/section_cli_nova.xml b/doc/src/docbkx/openstack-admin-user/src/section_cli_nova.xml index d800a6bea3..d8a6fa201a 100644 --- a/doc/src/docbkx/openstack-admin-user/src/section_cli_nova.xml +++ b/doc/src/docbkx/openstack-admin-user/src/section_cli_nova.xml @@ -143,7 +143,7 @@ $ nova reboot <server> --hard Where server is the - server ID or name. + server ID or name. Omit the option to perform a soft reboot. @@ -171,7 +171,7 @@ Where server is the server ID or name and flavor is the ID or - name of the new flavor. + name of the new flavor. Specify the optional option to block while the instance resizes so that progress can be reported. @@ -497,7 +497,7 @@ . Optional. Opts out of attaching - PublicNet to your server. + PublicNet to your server. RackConnect and Managed Cloud customers will receive an error if @@ -540,7 +540,7 @@ is written to /meta.js on the new server. Can be specified - multiple times. + multiple times. @@ -565,19 +565,19 @@ user-data. User data file, which is exposed by the - metadata server. + metadata server. availability-zone. The availability zone for instance - placement. + placement. security_groups. A comma-separated list of security - group names. + group names. @@ -602,15 +602,15 @@ NIC with a specified UUID to a network, specify the =private-net-id - parameter. + parameter. Optionally, specify the ip-addr parameter to specify an IPv4 fixed - address for NIC. + address for NIC. If you do not specify any networks on the parameter, the Internet and ServiceNet - are attached to your server. + are attached to your server. ServiceNet is labeled as private and the @@ -641,12 +641,12 @@ value. Enables - a configuration drive. + a configuration drive. . Blocks while the instance builds so progress - can be reported. + can be reported. For example, you might issue the following diff --git a/doc/src/docbkx/openstack-admin-user/src/section_cli_set_quotas.xml b/doc/src/docbkx/openstack-admin-user/src/section_cli_set_quotas.xml index bf98817972..601727c75e 100644 --- a/doc/src/docbkx/openstack-admin-user/src/section_cli_set_quotas.xml +++ b/doc/src/docbkx/openstack-admin-user/src/section_cli_set_quotas.xml @@ -20,58 +20,58 @@ Metadata Items - Number of metadata items per instance. + Number of metadata items per instance. Injected Files - Number of injected files. + Number of injected files. Injected File Content Bytes - Number of bytes per injected file. + Number of bytes per injected file. VCPUs Number of virtual CPUs that can be allocated in - total. + total. Instances - Total number of instances. + Total number of instances. Volumes - Total number of volumes. + Total number of volumes. Gigabytes Total size of all volumes, measured in - gigabytes. + gigabytes. RAM (in MB) Total RAM size of all instances, measured in - megabytes. + megabytes. Floating IPs - Total number of floating IP addresses. + Total number of floating IP addresses. @@ -84,13 +84,13 @@ Security Groups - Number of security groups. + Number of security groups. You can manage quotas with the nova quota-* commands, which are provided by - the python-novaclient package. + the python-novaclient package. To show default quota values @@ -115,7 +115,7 @@ To set quota values for a project Set the quota value for the instances parameter to - 2. + 2. $ nova quota-update --instances 2 PROJECT_ID  To view a list of options for the quota-update command, diff --git a/doc/src/docbkx/openstack-admin-user/src/section_cli_swift.xml b/doc/src/docbkx/openstack-admin-user/src/section_cli_swift.xml index 76000a4ef5..df69200c42 100644 --- a/doc/src/docbkx/openstack-admin-user/src/section_cli_swift.xml +++ b/doc/src/docbkx/openstack-admin-user/src/section_cli_swift.xml @@ -150,7 +150,7 @@ $ swift reboot <server> --hard Where server is the - server ID or name. + server ID or name. Omit the option to perform a soft reboot. @@ -178,7 +178,7 @@ Where server is the server ID or name and flavor is the ID or - name of the new flavor. + name of the new flavor. Specify the optional option to block while the instance resizes so that progress can be reported. @@ -504,7 +504,7 @@ . Optional. Opts out of attaching - PublicNet to your server. + PublicNet to your server. RackConnect and Managed Cloud customers will receive an error if @@ -547,7 +547,7 @@ is written to /meta.js on the new server. Can be specified - multiple times. + multiple times. @@ -572,19 +572,19 @@ user-data. User data file, which is exposed by the - metadata server. + metadata server. availability-zone. The availability zone for instance - placement. + placement. security_groups. A comma-separated list of security - group names. + group names. @@ -609,15 +609,15 @@ NIC with a specified UUID to a network, specify the =private-net-id - parameter. + parameter. Optionally, specify the ip-addr parameter to specify an IPv4 fixed - address for NIC. + address for NIC. If you do not specify any networks on the parameter, the Internet and ServiceNet - are attached to your server. + are attached to your server. ServiceNet is labeled as private and the @@ -648,12 +648,12 @@ value. Enables - a configuration drive. + a configuration drive. . Blocks while the instance builds so progress - can be reported. + can be reported. For example, you might issue the following diff --git a/doc/src/docbkx/openstack-admin-user/src/section_dashboard_set_quotas.xml b/doc/src/docbkx/openstack-admin-user/src/section_dashboard_set_quotas.xml index 1af9e9e863..1d84299a1e 100644 --- a/doc/src/docbkx/openstack-admin-user/src/section_dashboard_set_quotas.xml +++ b/doc/src/docbkx/openstack-admin-user/src/section_dashboard_set_quotas.xml @@ -28,58 +28,58 @@ Metadata Items - Number of metadata items per instance. + Number of metadata items per instance. Injected Files - Number of injected files. + Number of injected files. Injected File Content Bytes - Number of bytes per injected file. + Number of bytes per injected file. VCPUs Number of virtual CPUs that can be allocated in - total. + total. Instances - Total number of instances. + Total number of instances. Volumes - Total number of volumes. + Total number of volumes. Gigabytes Total size of all volumes, measured in - gigabytes. + gigabytes. RAM (in MB) Total RAM size of all instances, measured in - megabytes. + megabytes. Floating IPs - Total number of floating IP addresses. + Total number of floating IP addresses. @@ -92,7 +92,7 @@ Security Groups - Number of security groups. + Number of security groups. @@ -100,28 +100,28 @@ The Quota category shows you the global default quota values that are hard-coded in OpenStack Nova. However, you cannot set quota values in the - Quota category. + Quota category.
Set quotas for a project To set quotas for a project On the Admin tab, select the - Projects category. + Projects category. Select the project for which to set or change - quota values. + quota values. From the Actions drop-down - list, select Modify Quota. + list, select Modify Quota. A window shows the default quota values for each - project, which are hard-coded in OpenStack Nova. + project, which are hard-coded in OpenStack Nova. Change the values for the quota parameters as - desired. + desired. - Confirm your changes. + Confirm your changes.
diff --git a/doc/src/docbkx/openstack-block-storage-admin/add-volume-node.xml b/doc/src/docbkx/openstack-block-storage-admin/add-volume-node.xml index bf371e69fd..22db83cbcc 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/add-volume-node.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/add-volume-node.xml @@ -3,12 +3,12 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.0"> Adding Block Storage Nodes - To offer more storage to your tenant's VMs, add another volume node running cinder services by following these steps. + To offer more storage to your tenant's VMs, add another volume node running cinder services by following these steps. Install the required packages for cinder. Create a volume group called cinder-volumes (configurable using the - cinder_volume parameter in cinder.conf). + cinder_volume parameter in cinder.conf). Configure tgtd with its targets.conf file and start the tgtd service. @@ -18,7 +18,7 @@ Make sure the iscsi_ip_address setting in cinder.conf matches the public IP of the node you're installing, then restart - the cinder services. + the cinder services. - When you issue a cinder-manage host list command you should see the new volume node listed. If not, look at the logs in /var/log/cinder/volume.log for issues. + When you issue a cinder-manage host list command you should see the new volume node listed. If not, look at the logs in /var/log/cinder/volume.log for issues.
diff --git a/doc/src/docbkx/openstack-block-storage-admin/backup-block-storage-disks.xml b/doc/src/docbkx/openstack-block-storage-admin/backup-block-storage-disks.xml index e1e7f2ac87..ea8d90f9ed 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/backup-block-storage-disks.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/backup-block-storage-disks.xml @@ -16,11 +16,11 @@ lvm2, directly - manipulates the volumes. + manipulates the volumes. kpartx discovers the - partition table created inside the instance. + partition table created inside the instance. tar creates a @@ -44,7 +44,7 @@ volume itself. Remember the volumes created through a nova volume-create - exist in an LVM's logical volume. + exist in an LVM's logical volume. Before creating the snapshot, ensure that you have enough space to save it. As a precaution, you @@ -65,7 +65,7 @@ end of the section, we will present a script that you could use to create scheduled backups. The script itself exploits what we - discuss here. + discuss here. First, create the snapshot; this can be achieved while the volume is attached to an instance : @@ -89,7 +89,7 @@ the snapshot volume. As a precaution, the size should be the same as that of the original volume, even if we know the whole space is not - currently used by the snapshot. + currently used by the snapshot. We now have a full snapshot, and it only took few seconds ! Run lvdisplay again to verify the snapshot. You should see now your @@ -142,14 +142,14 @@ If we want to exploit that snapshot with the tar program, we first - need to mount our partition on the Block Storage server. + need to mount our partition on the Block Storage server. kpartx is a small utility which performs table partition discoveries, and maps it. It can be used to view partitions created inside the instance. Without using the partitions created inside instances, we won' t be able to see its content and create - efficient backups. + efficient backups. $ kpartx -av /dev/nova-volumes/volume-00000001-snapshot @@ -173,7 +173,7 @@ several partitions; for example. nova--volumes-volume--00000001--snapshot2, nova--volumes-volume--00000001--snapshot3 - and so forth. + and so forth. We can now mount our partition : @@ -190,24 +190,24 @@ filesystem) there could be two causes : - You didn't allocate enough + You didn't allocate enough space for the snapshot kpartx was unable to discover the partition - table. + table. Allocate more space to the - snapshot and try the process again. + snapshot and try the process again. 3- Use tar in order to create archives - Now that the volume has been mounted, + Now that the volume has been mounted, you can create a backup of it : @@ -228,7 +228,7 @@ You should always have the checksum for your backup files. The checksum is a - unique identifier for a file. + unique identifier for a file. When you transfer that same file over the network, you can run another checksum calculation. If the checksums are @@ -246,7 +246,7 @@ sha1sum should be used carefully, since the required time for the calculation is directly - proportional to the file's size. + proportional to the file's size. For files larger than ~4-6 gigabytes, and depending on your CPU, the process may take a long time. diff --git a/doc/src/docbkx/openstack-block-storage-admin/bk-block-storage-adminguide.xml b/doc/src/docbkx/openstack-block-storage-admin/bk-block-storage-adminguide.xml index 12f84fe3f5..9acdfe5119 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/bk-block-storage-adminguide.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/bk-block-storage-adminguide.xml @@ -32,7 +32,7 @@ This document is for system administrators of the - OpenStack Block Storage Service. + OpenStack Block Storage Service. diff --git a/doc/src/docbkx/openstack-block-storage-admin/block-storage-manage-volumes.xml b/doc/src/docbkx/openstack-block-storage-admin/block-storage-manage-volumes.xml index 97aeedb4a4..2269dc36d5 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/block-storage-manage-volumes.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/block-storage-manage-volumes.xml @@ -16,14 +16,14 @@ storage solution like SAN of NFS, where you can attach a volume to multiple servers. With the OpenStack Block Storage service, you can attach a volume to only one - instance at a time. + instance at a time. The OpenStack Block Storage service also provides drivers that enable you to use several vendors' back-end storage devices, in addition to or instead of the base LVM implementation. The following high-level procedure shows you how to create - and attach a volume to a server instance. + and attach a volume to a server instance. To create and attach a volume to a server instance: @@ -65,7 +65,7 @@ and labels it as LVM. The network uses FlatManger is the NetworkManager setting for - OpenStack Compute (Nova). + OpenStack Compute (Nova). Please note that the network mode doesn't interfere at all with the way cinder works, but networking must be set up for cinder to work. Please refer to cinder-backup Provides a means to back up a Cinder Volume to - OpenStack Object Store (SWIFT). + OpenStack Object Store (SWIFT). The OpenStack Block Storage service contains the following @@ -76,7 +76,7 @@ password are assigned per user. Key pairs granting access to a volume are enabled per user, but quotas to control resource consumption across available hardware - resources are per tenant. + resources are per tenant. For tenants, quota controls are available to limit: diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/ceph-rbd-volume-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/ceph-rbd-volume-driver.xml index 6f021c65df..43635ed91e 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/ceph-rbd-volume-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/ceph-rbd-volume-driver.xml @@ -78,7 +78,7 @@ consistency of the data. In an ideal setup you need to run at least 3 ceph-mon daemons, on - separate servers. + separate servers. Ceph developers recommend you use btrfs as a @@ -134,7 +134,7 @@ For detailed installation instructions and benchmarking information, see http://www.sebastien-han.fr/blog/2012/06/10/introducing-ceph-to-openstack/. + >http://www.sebastien-han.fr/blog/2012/06/10/introducing-ceph-to-openstack/.
diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/glusterfs-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/glusterfs-driver.xml index 1a844190a8..781cddeb00 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/glusterfs-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/glusterfs-driver.xml @@ -11,13 +11,13 @@ homepage. This driver enables use of GlusterFS in a similar fashion as the NFS driver. It supports basic volume operations, and like - NFS, does not support snapshot/clone. + NFS, does not support snapshot/clone. You must use a Linux kernel of version 3.4 or greater (or version 2.6.32 or greater in RHEL/CentOS 6.3+) when working with Gluster-based volumes. See Bug 1177103 for more information. + >Bug 1177103 for more information. To use Cinder with GlusterFS, first set the volume_driver in @@ -104,7 +104,7 @@ volume_driver=cinder.volume.drivers.glusterfs.GlusterfsDriver "dd" or a similar command to create the full-sized file, so volume creation takes a greater amount of - time. + time. diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/hds-volume-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/hds-volume-driver.xml index 2a082562ad..dbb082b9ac 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/hds-volume-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/hds-volume-driver.xml @@ -51,7 +51,7 @@ Configuration HDS driver supports the concept of differentiated services, - Not to be confused with + Not to be confused with Cinder volume service where volume type can be associated with the fine tuned performance characteristics of HDP -- the @@ -67,10 +67,10 @@ Configuration is read from an xml format file. Its sample is shown below, for single backend and for multi-backend cases. - HUS configuration file is + HUS configuration file is read at the start of cinder-volume service. Any configuration changes after that will require a service restart. - It is not recommended to + It is not recommended to manage a HUS array simultaneously from multiple cinder instances or servers. It is okay to run manage multiple HUS arrays using multiple cinder @@ -259,12 +259,12 @@ HDS volume driver. Four differentiated service labels are predefined: svc_0, svc_1, svc_2, svc_3 There is no relative precedence + xml:id='hds-no-weight'>There is no relative precedence or weight amongst these four labels.. Each such service label in turn associates with the following parameters/tags: - volume-types: A + volume-types: A create_volume call with a certain volume type shall be matched up with this tag. default is special in that @@ -275,9 +275,9 @@ occur in volume creation. - HDP, the pool ID + HDP, the pool ID associated with the service. - + An iSCSI port dedicated to the service. @@ -286,7 +286,7 @@ svc_1, svc_2, svc_3) associated with it. But any mix of these four service labels can be used in the same instance - get_volume_stats() shall always provide the + get_volume_stats() shall always provide the available capacity based on the combined sum of all the HDPs used in these services labels.. diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/hp-lefthand-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/hp-lefthand-driver.xml index 4120c8e70e..87dc2b1f82 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/hp-lefthand-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/hp-lefthand-driver.xml @@ -48,7 +48,7 @@ Configuring the VSA In addition to configuring the cinder-volume service some pre configuration - has to happen on the VSA for proper functioning in an Openstack environment. + has to happen on the VSA for proper functioning in an Openstack environment. diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/ibm-storwize-svc-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/ibm-storwize-svc-driver.xml index e9dcd2f386..04b9f239c6 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/ibm-storwize-svc-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/ibm-storwize-svc-driver.xml @@ -30,7 +30,7 @@ If using iSCSI, ensure that the compute nodes have iSCSI network access to the Storwize family - or SVC system. + or SVC system. OpenStack Nova's Grizzly version supports iSCSI @@ -56,7 +56,7 @@ If using FC, ensure that the compute nodes have FC connectivity to the Storwize family - or SVC system. + or SVC system. @@ -68,13 +68,13 @@ iSCSI connections. CHAP secrets are not added to existing - hosts. + hosts. CHAP secrets are passed from Cinder to Nova in clear text. This communication should be secured to ensure that CHAP secrets are not - discovered. + discovered. @@ -89,7 +89,7 @@ Details about the configuration flags and how to provide the flags to the driver appear in the - next section. + next section. Configuring user authentication for the driver @@ -105,12 +105,12 @@ provided by the <literal>san_ssh_port</literal> flag. By default, the port value is configured to - be port 22 (SSH). </para> + be port 22 (SSH).</para> <note> <para>Make sure the compute node running the nova-volume management driver has SSH network access to - the storage system. </para> + the storage system.</para> </note> <para>To allow the driver to communicate with the Storwize family or SVC system, @@ -125,12 +125,12 @@ storage and security administrator regarding the preferred authentication method and how passwords or SSH keys should be stored in a - secure manner. </para> + secure manner.</para> <note> <para>When creating a new user on the Storwize or SVC system, make sure the user belongs to the Administrator group or to another group - that has an Administrator role. </para> + that has an Administrator role.</para> </note> <para>If using password authentication, assign a password to the user on the Storwize or SVC @@ -154,7 +154,7 @@ The private key should be provided to the driver using the <literal>san_private_key</literal> - configuration flag. </para> + configuration flag.</para> </simplesect> <simplesect> <title>Creating a SSH key pair using OpenSSH @@ -183,7 +183,7 @@ ssh-keygen -t rsa configuration flag. The public key should be uploaded to the Storwize family or SVC system using the storage management GUI or command - line interface. + line interface. Ensure that Cinder has read permissions on the private key file. @@ -209,7 +209,7 @@ volume_driver = cinder.volume.drivers.storwize_svc.StorwizeSVCDriver The following options specify default values for all volumes. Some can be over-ridden using volume types, which - are described below. + are described below. @@ -426,7 +426,7 @@ volume_driver = cinder.volume.drivers.storwize_svc.StorwizeSVCDriver or SVC system to prepare a new FlashCopy mapping. The driver accepts a maximum wait time of 600 - seconds (10 minutes). + seconds (10 minutes). (seconds) @@ -481,7 +481,7 @@ enabled_backends = v7k1,v7k2 common options are placed under [DEFAULT], while options specific to a back-end are placed in the appropriate - section. + section.By default, volumes will be allocated between back-ends to balance allocated space. @@ -493,7 +493,7 @@ enabled_backends = v7k1,v7k2 and can be managed using the cinder client, using the type-create, type-delete, type-key, and type-list - arguments. + arguments.The extra specs keys which have the "capabilities" prefix (called "scope") are interpreted by the diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/netapp-volume-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/netapp-volume-driver.xml index fc58bef2c1..687c9e96ca 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/netapp-volume-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/netapp-volume-driver.xml @@ -17,7 +17,7 @@ clustered Data ONTAP systems provide OpenStack compute instances with access to NetApp 7-Mode storage controllers and clustered Data ONTAP - storage systems. + storage systems.
NetApp iSCSI driver for 7-Mode storage controller The NetApp iSCSI driver for 7-Mode is a @@ -30,7 +30,7 @@ additional NetApp management software, namely OnCommand™, installed and configured for using 7-Mode storage controllers before configuring - the 7-Mode driver on OpenStack. + the 7-Mode driver on OpenStack. Configuration options available for the 7-Mode system driver @@ -139,7 +139,7 @@ using vFiler to host OpenStack volumes. MultiStore® must be enabled before using vFiler for - provisioning. + provisioning.
@@ -148,7 +148,7 @@ Make sure that at least one of the flags netapp_storage_service or netapp_storage_service_prefix is - specified in configuration. + specified in configuration.Refer to + using iSCSI protocol.The NetApp iSCSI driver for clustered Data ONTAP requires additional NetApp management software namely OnCommand, WFA and the NetApp Cloud Web Service application to be installed and configured for using clustered Data ONTAP systems before configuring ONTAP cluster - driver on OpenStack. + driver on OpenStack. Configuration options for the clustered Data ONTAP driver @@ -225,7 +225,7 @@ @@ -244,7 +244,7 @@ diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/nexenta-volume-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/nexenta-volume-driver.xml index 1b081aa3e9..e76d73a56e 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/nexenta-volume-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/nexenta-volume-driver.xml @@ -6,7 +6,7 @@ NexentaNexentaStor Appliance is NAS/SAN software platform designed for building reliable and fast network storage arrays. The the OpenSolaris and uses ZFS as a disk management system. NexentaStor can serve as a storage node for the OpenStack and - for the virtual servers via iSCSI protocol. + for the virtual servers via iSCSI protocol.The Nexenta driver allows you to use Nexenta SA to store Nova volumes. Every Nova volume is represented by a single zvol in a predefined Nexenta volume. For diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/nfs-volume-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/nfs-volume-driver.xml index 30c93a5039..eda5f3ea4b 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/nfs-volume-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/nfs-volume-driver.xml @@ -76,7 +76,7 @@ volume_driver=cinder.volume.drivers.nfs.NfsDriver + allocated to the volume destination. diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/xen-sm-driver.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/xen-sm-driver.xml index 1c085958cb..2363e45737 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/xen-sm-driver.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/xen-sm-driver.xml @@ -66,14 +66,14 @@ Backend: A term for a particular storage backend. This could - be iSCSI, NFS, Netapp etc. + be iSCSI, NFS, Netapp etc. Backend-config: All the parameters required to connect to a specific backend. For e.g. For NFS, - this would be the server, path, etc. + this would be the server, path, etc. + the one that is used. Operation The admin uses the nova-manage command - detailed below to add flavors and backends. + detailed below to add flavors and backends. One or more cinder-volume service instances will be deployed per availability zone. When an instance is started, it will create storage @@ -110,7 +110,7 @@ completely symmetric and hence should be able to service any create_volume request - within the zone. + within the zone. On XenServer, PV guests required @@ -171,7 +171,7 @@ Note: SR type and config connection parameters are in keeping with the XenAPI Co $ nova-manage sm backend_delete <backend-id> - Example: For the NFS storage manager plugin, the steps + Example: For the NFS storage manager plugin, the steps below may be used. @@ -197,11 +197,11 @@ Note: SR type and config connection parameters are in keeping with the XenAPI Co Currently, the flavors have not been tied to the volume types API. As a result, we simply end up creating volumes in a "first fit" order - on the given backends. + on the given backends. The standard euca-* or OpenStack API commands (such as volume extensions) should be used for creating, destroying, attaching, or - detaching volumes. + detaching volumes. diff --git a/doc/src/docbkx/openstack-block-storage-admin/drivers/xenapi-nfs.xml b/doc/src/docbkx/openstack-block-storage-admin/drivers/xenapi-nfs.xml index 06d45c8e1e..6f25f74126 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/drivers/xenapi-nfs.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/drivers/xenapi-nfs.xml @@ -4,9 +4,9 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> XenAPINFS - XenAPINFS is a Block Storage (Cinder) driver which is using an +XenAPINFS is a Block Storage (Cinder) driver which is using an NFS share through XenAPI's Storage Manager to store virtual - disk images and exposing those virtual disks as volumes. + disk images and exposing those virtual disks as volumes. This driver is not accessing the NFS share directly, it is only accessing the diff --git a/doc/src/docbkx/openstack-block-storage-admin/troubleshoot-cinder.xml b/doc/src/docbkx/openstack-block-storage-admin/troubleshoot-cinder.xml index 6da0cfaab0..378ae2d77a 100644 --- a/doc/src/docbkx/openstack-block-storage-admin/troubleshoot-cinder.xml +++ b/doc/src/docbkx/openstack-block-storage-admin/troubleshoot-cinder.xml @@ -3,7 +3,7 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.0"> Troubleshoot your cinder installation - This section is intended to help solve some basic and common errors that are encountered + This section is intended to help solve some basic and common errors that are encountered during setup and configuration of Cinder. The focus here is on failed creation of volumes. The most important thing to know is where to look in case of a failure. There are two log files that are especially helpful in the case of a volume creation failure. The first is the diff --git a/doc/src/docbkx/openstack-compute-admin/aboutcompute.xml b/doc/src/docbkx/openstack-compute-admin/aboutcompute.xml index 47ab5fbf92..017f46f962 100644 --- a/doc/src/docbkx/openstack-compute-admin/aboutcompute.xml +++ b/doc/src/docbkx/openstack-compute-admin/aboutcompute.xml @@ -84,7 +84,7 @@ keys, and users. A user can specify which tenant he or she wishes to be known as by appending :project_id to his or her access key. If no tenant is specified in the API request, Compute attempts to use a tenant with the same ID as the - user. + user. For tenants, quota controls are available to limit the: Number of volumes which may be created @@ -249,7 +249,7 @@ authentication service. A volume controller provides fast and permanent block-level storage for the compute servers. A network controller provides virtual networks to enable compute servers to interact with each other and with the public network. A - scheduler selects the most suitable compute controller to host an instance. OpenStack Compute is built on a shared-nothing, messaging-based architecture. You can run all + scheduler selects the most suitable compute controller to host an instance.OpenStack Compute is built on a shared-nothing, messaging-based architecture. You can run all of the major components on multiple servers including a compute controller, volume controller, network controller, and object store (or image service). A cloud controller communicates with the internal object store via HTTP (Hyper Text Transfer Protocol), but diff --git a/doc/src/docbkx/openstack-compute-admin/bk-compute-adminguide.xml b/doc/src/docbkx/openstack-compute-admin/bk-compute-adminguide.xml index ed9895407a..16f577dab5 100644 --- a/doc/src/docbkx/openstack-compute-admin/bk-compute-adminguide.xml +++ b/doc/src/docbkx/openstack-compute-admin/bk-compute-adminguide.xml @@ -44,7 +44,7 @@ cloud administration and management for any organization. This guide describes how to install, manage, and understand the software that runs - OpenStack Compute. + OpenStack Compute. diff --git a/doc/src/docbkx/openstack-compute-admin/ch_instance_mgmt.xml b/doc/src/docbkx/openstack-compute-admin/ch_instance_mgmt.xml index 3d42a1f293..faf8131c39 100644 --- a/doc/src/docbkx/openstack-compute-admin/ch_instance_mgmt.xml +++ b/doc/src/docbkx/openstack-compute-admin/ch_instance_mgmt.xml @@ -6,7 +6,7 @@ Instance Management Instances are the running virtual machines within an - OpenStack cloud. + OpenStack cloud. Migration provides a scheme to migrate running instances from one OpenStack Compute - server to another OpenStack Compute server. This feature can be used as described below. + server to another OpenStack Compute server. This feature can be used as described below. @@ -377,7 +377,7 @@ from above. ... +-------------------------------------+----------------------------------------------------------+ ]]> - In this example, vm1 is running on HostB. + In this example, vm1 is running on HostB. Third, select the server to migrate instances to. @@ -388,7 +388,7 @@ HostA nova-network enabled :-) None HostB nova-compute enabled :-) None HostC nova-compute enabled :-) None ]]> - In this example, HostC can be picked up because nova-compute is running on it. + In this example, HostC can be picked up because nova-compute is running on it. Third, ensure that HostC has enough resource for migration. @@ -446,7 +446,7 @@ HostC p2 5 10240 150
Recovering from a failed compute node If you have deployed OpenStack Compute with a shared filesystem, - you can quickly recover from a failed compute node. + you can quickly recover from a failed compute node. @@ -500,7 +500,7 @@ is achieved using this database command: UPDATE instances SET host = 'np-rcc46' WHERE uuid = '3f57699a-e773-4650-a443-b4b37eed5a06'; - Next, if using a hypervisor that relies on libvirt (such as KVM) +Next, if using a hypervisor that relies on libvirt (such as KVM) it is a good idea to update the libvirt.xml file (found in /var/lib/nova/instances/[instance ID]). The important changes to make are to change the DHCPSERVER @@ -528,8 +528,8 @@ nova services or updating the vm_state and an automated configuration tool, you could encounter a situation where some files on your compute node are using the wrong UID or GID. This causes a raft of errors, such as being unable to live - migrate, or start virtual machines. - The following is a basic procedure run on nova-compute hosts, based on the KVM hypervisor, + migrate, or start virtual machines. + The following is a basic procedure run on nova-compute hosts, based on the KVM hypervisor, that could help to restore the situation: First,make sure you don't use numbers that are already used for some other user/group. @@ -556,15 +556,15 @@ nova services or updating the vm_state and
Nova Disaster Recovery Process Sometimes, things just don't go right. An incident is - never planned, by its definition. + never planned, by its definition. In this section, we will review managing your cloud after a disaster, and how to easily backup the persistent storage volumes, which is another approach when you face a disaster. Even apart from the disaster scenario, backup - ARE mandatory. + ARE mandatory. For reference, you can find a DRP definition here : http://en.wikipedia.org/wiki/Disaster_Recovery_Plan. + >http://en.wikipedia.org/wiki/Disaster_Recovery_Plan. A- The disaster Recovery Process presentation A disaster could happen to several components of your @@ -572,13 +572,13 @@ nova services or updating the vm_state and cut, etc. In this example, we suppose the following setup : - A cloud controller (nova-api, nova-objecstore, nova-network) + A cloud controller (nova-api, nova-objecstore, nova-network) - A compute node (nova-compute) + A compute node (nova-compute) - A Storage Area Network used by cinder-volumes (aka SAN) + A Storage Area Network used by cinder-volumes (aka SAN) The example disaster will be the worst one : a power loss. That power loss applies to the @@ -587,22 +587,22 @@ nova services or updating the vm_state and crash : From the SAN to the cloud controller, we have an active iscsi session - (used for the "cinder-volumes" LVM's VG). + (used for the "cinder-volumes" LVM's VG). From the cloud controller to the compute node we also have active - iscsi sessions (managed by cinder-volume). + iscsi sessions (managed by cinder-volume). For every volume an iscsi session is made (so 14 ebs volumes equals 14 - sessions). + sessions). From the cloud controller to the compute node, we also have iptables/ ebtables rules which allows the access from the - cloud controller to the running instance. + cloud controller to the running instance. And at least, from the cloud controller @@ -619,7 +619,7 @@ nova services or updating the vm_state and From the SAN to the cloud, the ISCSI - session no longer exists. + session no longer exists. From the cloud controller to the compute @@ -630,7 +630,7 @@ nova services or updating the vm_state and From the cloud controller to the compute node, the iptables and ebtables are recreated, since, at boot, nova-network - reapply the configurations. + reapply the configurations. From the cloud controller, instances @@ -640,7 +640,7 @@ nova services or updating the vm_state and Into the database, data was not updated at all, since nova could not have guessed - the crash. + the crash. Before going further, and in order to prevent the admin to make fatal mistakes,vm_state and >destroy" or "terminate" command had been invoked, so the files for the instances remain on the compute - node. + node. The plan is to perform the following tasks, in that exact order. Any extra step would be dangerous at this stage @@ -662,19 +662,19 @@ nova services or updating the vm_state and We need to update the database in order to clean the stalled state. - (After that, we won't be able to perform the first step). + (After that, we won't be able to perform the first step). We need to restart the instances (so go from a "shutdown" to a - "running" state). + "running" state). After the restart, we can reattach the volumes to their respective - instances. + instances. - That step, which is not a mandatory one, exists in an SSH into the - instances in order to reboot them. + That step, which is not a mandatory one, exists in an SSH into the + instances in order to reboot them. @@ -709,7 +709,7 @@ nova services or updating the vm_state and mysql> update volumes set instance_id=0; Now, when running nova volume-list all volumes should be - available. + available. Instances Restart @@ -723,7 +723,7 @@ nova services or updating the vm_state and At that stage, depending on your image, some instances will completely reboot and become reachable, while others will stop - on the "plymouth" stage. + on the "plymouth" stage. DO NOT reboot a second time the ones which are stopped at that stage (vm_state and >help.ubuntu.com) But remember that the idea of that stage is only to ask nova to reboot every instance, so the - stored state is preserved. + stored state is preserved. Volume Attachment @@ -772,7 +772,7 @@ done < $volumes_tmp_file role="italic">plymouth) will automatically continue their boot, and restart normally, while the ones which - booted will see the volume. + booted will see the volume. SSH into @@ -790,22 +790,22 @@ done < $volumes_tmp_file Voila! You successfully recovered your - cloud after that. + cloud after that. Here are some suggestions : - Use the parameter + Use the parameter errors=remount in the fstab file, which will prevent data corruption. - The system would lock any write to the disk if it detects an I/O + The system would lock any write to the disk if it detects an I/O error. This configuration option should be added into the cinder-volume server (the one which performs the ISCSI connection to the SAN), but also into the instances' fstab file. Do not add the entry for the SAN's disks to the cinder-volume's - fstab file. + fstab file. Some systems will hang on that step, which means you could lose access to your cloud-controller. In order to re-run the @@ -817,18 +817,18 @@ done < $volumes_tmp_file - For your instances, if you have the + For your instances, if you have the whole /home/ directory on the disk, instead of emptying the /home directory and map the disk on it, leave a user's directory with the user's bash files and the authorized_keys - file. + file. This will allow you to connect to the instance, even without the volume attached, if you allow only connections - via public keys. + via public keys. @@ -846,7 +846,7 @@ done < $volumes_tmp_file session. Do not dettach the volume via nova volume-detach, but - instead manually close the iscsi session. + instead manually close the iscsi session. In the following example, the iscsi session is number 15 for that instance : diff --git a/doc/src/docbkx/openstack-compute-admin/computeautomation.xml b/doc/src/docbkx/openstack-compute-admin/computeautomation.xml index 2f7a0dab69..d1a01d88b0 100644 --- a/doc/src/docbkx/openstack-compute-admin/computeautomation.xml +++ b/doc/src/docbkx/openstack-compute-admin/computeautomation.xml @@ -243,7 +243,7 @@ format="SVG" scale="60"/> - Start servers. + Start servers. Execute the following command on the dodai-deploy server to start the web server and job server. $ sudo $home/script/start-servers production diff --git a/doc/src/docbkx/openstack-compute-admin/computeconfigure.xml b/doc/src/docbkx/openstack-compute-admin/computeconfigure.xml index b21a4c6eaf..a47ff38f61 100644 --- a/doc/src/docbkx/openstack-compute-admin/computeconfigure.xml +++ b/doc/src/docbkx/openstack-compute-admin/computeconfigure.xml @@ -90,10 +90,10 @@ The credentials you will use to launch instances, bundle images, and all the other assorted API functions can be sourced in a single file, such as - creating one called /creds/openrc. + creating one called /creds/openrc. Here's an example openrc file you can download from the Dashboard in Settings > Project Settings > - Download RC File. + Download RC File. #!/bin/bash @@ -146,7 +146,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set Next, add these credentials to your environment prior to running any nova client commands or nova - commands. + commands. $ cat /root/creds/openrc >> ~/.bashrc source ~/.bashrc
@@ -174,7 +174,7 @@ source ~/.bashrc the /16 range that was set in fixed-range in nova.conf. Currently, there can only be one network, and this set up would use the max IPs available in a /24. You can choose values that let - you use any valid amount that you would like.
+ you use any valid amount that you would like.
OpenStack Compute assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range @@ -239,7 +239,7 @@ source ~/.bashrc nova.conf and copy it to additional compute nodes. Ensure each nova.conf file points to the correct IP addresses for the respective - services. + services. By default, Nova sets the bridge device based on the setting in flat_network_bridge. Now you can @@ -398,7 +398,7 @@ $ sudo service nova-compute restart can use the state_path configuration option to indicate a top-level directory for storing data related to the state of Compute including images if you are using the Compute object - store. + store.
@@ -499,7 +499,7 @@ xenapi_remap_vbd_dev=true There are different methods of authentication for the OpenStack Compute project, including no authentication. The preferred system is the OpenStack Identity Service, code-named Keystone. Refer to - Identity Management for additional information. + Identity Management for additional information. To customize authorization settings for Compute, see these configuration settings in nova.conf. @@ -559,7 +559,7 @@ xenapi_remap_vbd_dev=true Configuring Compute API password handling - The OpenStack Compute API allows the user to specify an admin + The OpenStack Compute API allows the user to specify an admin password when creating (or rebuilding) a server instance. If no password is specified, a randomly generated password is used. The password is returned in the API response. diff --git a/doc/src/docbkx/openstack-compute-admin/computeinstall.xml b/doc/src/docbkx/openstack-compute-admin/computeinstall.xml index e0b087b543..10fb40d79e 100644 --- a/doc/src/docbkx/openstack-compute-admin/computeinstall.xml +++ b/doc/src/docbkx/openstack-compute-admin/computeinstall.xml @@ -59,7 +59,7 @@ URI: http://download.opensuse.org/repositories/Cloud:/OpenStack:/Grizzly/SLE_11_ - You can list all available packages for OpenStack with + You can list all available packages for OpenStack with zypper se openstack. You can install packages with zypper in PACKAGE. @@ -96,7 +96,7 @@ GPG check: Yes URI: http://download.opensuse.org/repositories/Cloud:/OpenStack:/Grizzly/openSUSE_12.3/ - You can list all available packages for OpenStack with + You can list all available packages for OpenStack with zypper se openstack. You can install packages with zypper in PACKAGE. diff --git a/doc/src/docbkx/openstack-compute-admin/computenetworking.xml b/doc/src/docbkx/openstack-compute-admin/computenetworking.xml index 2ce6221891..c149d6861c 100644 --- a/doc/src/docbkx/openstack-compute-admin/computenetworking.xml +++ b/doc/src/docbkx/openstack-compute-admin/computenetworking.xml @@ -52,7 +52,7 @@ role="italic">internal network interface (controlled by the options: public_interface for the public interface, and flat_interface and vlan_interface for the - internal interface with flat / VLAN managers). + internal interface with flat / VLAN managers). The internal network interface is used for communication with VMs, it shouldn't have an IP address attached to it before OpenStack installation (it serves merely as a fabric where the actual endpoints are VMs and dnsmasq). Also, the internal network @@ -67,7 +67,7 @@ libvirt's network filtering facilities. The driver isn't tied to any particular network manager; all network managers use the same driver. The driver usually initializes (creates bridges - etc.) only when the first VM lands on this host node. + etc.) only when the first VM lands on this host node. All network managers operate in either single-host or multi-host mode. This choice greatly influences the network configuration. In single-host mode, there is just 1 instance of @@ -83,7 +83,7 @@ instance is explicitly terminated. By contrast, floating IPs are addresses that can be dynamically associated with an instance. A floating IP address can be disassociated and associated with another instance at any time. A user can reserve a floating IP for their - project. + project. In Flat Mode, a network administrator specifies a subnet. The IP addresses for VM instances are grabbed from the subnet, and then injected into the image on launch. Each instance receives a fixed IP address from the pool of @@ -103,7 +103,7 @@ In Flat DHCP Mode, OpenStack starts a DHCP server (dnsmasq) to pass out IP addresses to VM instances from the specified subnet in addition to manually configuring the networking bridge. IP addresses for VM instances are grabbed - from a subnet specified by the network administrator. + from a subnet specified by the network administrator. Like Flat Mode, all instances are attached to a single bridge on the compute node. In addition a DHCP server is running to configure instances (depending on single-/multi-host mode, alongside each nova-network). In this mode, @@ -140,7 +140,7 @@ compatibility, Grizzly supports the fixed_range option and if set will perform the default logic from Folsom and - earlier releases. + earlier releases. In single-host Flat DHCP mode you will be able @@ -157,7 +157,7 @@ and key for the user to access the VPN and starts the VPN automatically. It provides a private network segment for each project's instances that can be accessed via a dedicated VPN connection from the Internet. In this mode, each project gets its own - VLAN, Linux networking bridge, and subnet. + VLAN, Linux networking bridge, and subnet. The subnets are specified by the network administrator, and are assigned dynamically to a project when required. A DHCP Server is started for each VLAN to pass out IP @@ -219,7 +219,7 @@ instances to retrieve instance-specific data. Instances access the metadata service at http://169.254.169.254. The metadata service supports two sets of APIs: an OpenStack metadata API and an EC2-compatible API. Each of the APIs - is versioned by date. + is versioned by date. To retrieve a list of supported versions for the OpenStack metadata API, make a GET request to http://169.254.169.254/openstackFor example: @@ -394,7 +394,7 @@ echo 'Extra user data here' Mode.
- Integrate the bridge with your network. + Integrate the bridge with your network. By default, Compute uses the VLAN Network Mode. You @@ -447,7 +447,7 @@ echo 'Extra user data here' into the instance via the file system (or passed in via a guest agent). Metadata forwarding must be configured manually on the gateway if it is required - within your network. + within your network. To configure flat networking, ensure that your nova.conf file contains the following line: @@ -489,7 +489,7 @@ network_manager=nova.network.manager.FlatManager talking to any other nodes that are hosting VMs. With either of the Flat Networking options, the default gateway for the virtual machines is set to - the host which is running nova-network. + the host which is running nova-network. Set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces configuration to look @@ -604,7 +604,7 @@ iface br100 inet dhcp FlatDHCP doesn't create VLANs, it creates a bridge. This bridge works just fine on a single host, but when there are multiple hosts, traffic needs a way to get - out of the bridge onto a physical interface. + out of the bridge onto a physical interface.
Libvirt Flat DHCP Networking @@ -632,7 +632,7 @@ iface br100 inet dhcp or 1,048,576 IP addresses. That said, it will take a very long time for you to create your initial network, as an entry for each IP will be created in the - database. + database. If you have an unused interface on your hosts (eg eth2) that has connectivity with no IP address, you can simply tell FlatDHCP to bridge into the @@ -655,7 +655,7 @@ flat_injected=False public_interface=eth0 You can also add the unused interface to br100 - manually and not set flat_interface. + manually and not set flat_interface. Integrate your network interfaces to match this configuration.
@@ -728,7 +728,7 @@ firewall_driver=nova.virt.xenapi.firewall.Dom0IptablesFirewallDriver Outbound Traffic Flow with Any Flat Networking - In any set up with FlatNetworking, the host with + In any set up with FlatNetworking, the host with nova-network on it is responsible for forwarding traffic from the private network dynamically determined by Compute with the @@ -739,10 +739,10 @@ firewall_driver=nova.virt.xenapi.firewall.Dom0IptablesFirewallDriver - When a virtual machine sends traffic out to the + running nova-network. + When a virtual machine sends traffic out to the public networks, it sends it first to its default - gateway, which is where nova-network is configured. + gateway, which is where nova-network is configured.
Single adaptor hosts, first route @@ -792,7 +792,7 @@ firewall_driver=nova.virt.xenapi.firewall.Dom0IptablesFirewallDriversubnet (e.g., 172.16.20.0/24) that are on the same VLAN (layer 2 - network). + network). Running in VLAN mode is more complex than the other network modes. In particular: @@ -817,10 +817,10 @@ firewall_driver=nova.virt.xenapi.firewall.Dom0IptablesFirewallDriver The network_manager=nova.network.manager.VlanManager option specifies VLAN - mode, which happens to be the default networking mode. + mode, which happens to be the default networking mode. The bridges that are created by the network manager will be attached to the interface specified by vlan_interface, the example above uses the - eth0 interface, which is the default. + eth0 interface, which is the default. The fixed_range option deprecated in Grizzly and should be set to fixed_range='' so that Nova @@ -1145,7 +1145,7 @@ firewall_driver=nova.virt.xenapi.firewall.Dom0IptablesFirewallDriverhas integrated a fix for the issue in dnsmaq version 2.61. + >has integrated a fix for the issue in dnsmaq version 2.61. If upgrading dnsmasq is out of the question, a possible workaround is to minimize lease renewals with something like the following combination of config options. @@ -1160,23 +1160,23 @@ fixed_ip_disassociate_timeout=1209600
Cloudpipe — Per Project Vpns - Cloudpipe is a method for connecting end users to - their project instances in VLAN networking mode. - The support code for cloudpipe implements admin + Cloudpipe is a method for connecting end users to + their project instances in VLAN networking mode. + The support code for cloudpipe implements admin commands (via an extension) to automatically create a VM for a project that allows users to VPN into the private network of their project. Access to this VPN is provided through a public port on the network host for the project. This allows users to have free access to the virtual machines in their project without - exposing those machines to the public internet. - The cloudpipe image is basically just a Linux + exposing those machines to the public internet. + The cloudpipe image is basically just a Linux instance with openvpn installed. It needs a simple script to grab user data from the metadata server, b64 decode it into a zip file, and run the autorun.sh script from inside the zip. The autorun script will configure and run openvpn to run using the data from - nova. + nova. It is also useful to have a cron script that will periodically redownload the metadata and copy the new Certificate Revocation List (CRL). This list is contained within the payload file and will keeps revoked users from connecting and will @@ -1193,12 +1193,12 @@ fixed_ip_disassociate_timeout=1209600 Installing the required packages - We start by installing the required packages on our instance : + We start by installing the required packages on our instance : # apt-get update && apt-get upgrade && apt-get install openvpn bridge-utils unzip -y - Creating the server configuration template - Create a configuration for Openvpn, and save it under + Creating the server configuration template + Create a configuration for Openvpn, and save it under /etc/openvpn/server.conf : port 1194 @@ -1240,7 +1240,7 @@ mute 20 Create the network scripts - The next step is to create both scripts that will be used when the + The next step is to create both scripts that will be used when the network components will start up and shut down. The scripts will be respectively saved under /etc/openvpn/up.sh and /etc/openvpn/down.sh : @@ -1268,7 +1268,7 @@ DEV=$2 Edit the network interface configuration file - Update the /etc/network/interfaces accordingly + Update the /etc/network/interfaces accordingly (We tear down the main interface and enable the bridged interface) : # This file describes the network interfaces available on your system @@ -1290,7 +1290,7 @@ bridge_ports eth0 Edit the rc.local file - The next step consists in updating the + The next step consists in updating the /etc/rc.local file. We will ask our image to retrive the payload, decrypt it, and use both key and CRL for our Openvpn service : /etc/rc.local @@ -1343,7 +1343,7 @@ exit 0
Upload your instance to Glance - We will make use of the nova + We will make use of the nova snapshot feature in order to create an image from our running instance. We start by retrieving the instance ID : @@ -1356,7 +1356,7 @@ exit 0 | 739079ab-0f8e-404a-ae6e-a91f4fe99c94 | cloud-pipe | ACTIVE | vlan1=192.168.22.43 | +--------------------------------------+------------+--------+---------------------+ - We create an image with, using the instance ID : + We create an image with, using the instance ID : $ nova image-create 739079a-b-0f8e-404a-ae6e-a91f4fe99c94 Make sure the instance has been upload to the Glance repository : $ nova image-list @@ -1387,7 +1387,7 @@ cnt_vpn_clients=5
Power-up your instance - Use the nova cloudpipe feature the + Use the nova cloudpipe feature the following way : $ nova cloud-pipe create $tenant_id Retrive all the tenants : @@ -1421,7 +1421,7 @@ ALLOW -1:-1 from 0.0.0.0/0
VPN Access - In VLAN networking mode, the second IP in each + In VLAN networking mode, the second IP in each private network is reserved for the cloudpipe instance. This gives a consistent IP to the instance so that nova-network can create @@ -1429,8 +1429,8 @@ ALLOW -1:-1 from 0.0.0.0/0 world. The network for each project is given a specific high-numbered port on the public IP of the network host. This port is automatically - forwarded to 1194 on the VPN instance. - If specific high numbered ports do not work for + forwarded to 1194 on the VPN instance. + If specific high numbered ports do not work for your users, you can always allocate and associate a public IP to the instance, and then change the vpn_public_ip and @@ -1456,11 +1456,11 @@ ALLOW -1:-1 from 0.0.0.0/0 to sign the certificate for the vpn, and is also passed to the user for bundling images. When a certificate is revoked using nova-manage, a new Certificate Revocation List (crl) is generated. As long as cloudpipe has an updated crl, it - will block revoked users from connecting to the vpn. - The userdata for cloudpipe isn't currently + will block revoked users from connecting to the vpn. + The userdata for cloudpipe isn't currently updated when certs are revoked, so it is necessary to restart the cloudpipe instance if a user's - credentials are revoked. + credentials are revoked.
Remote access to your cloudpipe instance from an OpenVPN client - Now your cloudpipe instance is running, you can use your favorite OpenVPN + Now your cloudpipe instance is running, you can use your favorite OpenVPN client in order to access your instances within their private network cloudpipe is connected to. In these sections we will present both ways of using cloudpipe, the first using a configuration file for clients without interfaces, and for - clients using an interface. + clients using an interface. Connect to your cloudpipe instance without an interface (CLI) Generate your certificates - Start by generating a private key and a certificate for your project: + Start by generating a private key and a certificate for your project: $ nova x509-create-cert Create the openvpn configuration file - The following template, which can be found under + The following template, which can be found under nova/cloudpipe/client.ovpn.template contains the necessary instructions for establishing a connection : # NOVA user connection @@ -1542,7 +1542,7 @@ keepalive 10 120 ping-timer-rem persist-tun persist-key - Update the file accordingly. In order to get the public IP and port + Update the file accordingly. In order to get the public IP and port of your cloudpipe instance, you can run the following command : $ nova cloudpipe-list +----------------------------------+------------+-------------+---------------+ @@ -1557,7 +1557,7 @@ persist-key Start your OpenVPN client - Depending on the client you are using, make sure to save the + Depending on the client you are using, make sure to save the configuration file under the directory it should be, so the certificate file and the private key. Usually, the file is saved under /etc/openvpn/clientconf/client.conf @@ -1565,11 +1565,11 @@ persist-key Connect to your cloudpipe instance using an interface - Download an OpenVPN client - In order to connect to the project's network, you will need an + Download an OpenVPN client + In order to connect to the project's network, you will need an OpenVPN client for your computer. Here are several clients - For Ubuntu : + For Ubuntu : OpenVPN @@ -1583,7 +1583,7 @@ persist-key gopenvpn - For Mac OsX : + For Mac OsX : OpenVPN (Official Client) @@ -1599,7 +1599,7 @@ persist-key - For Windows : + For Windows : OpenVPN (Official Client) @@ -1608,8 +1608,8 @@ persist-key - Configure your client - In this example we will use Viscosity, but the same settings + Configure your client + In this example we will use Viscosity, but the same settings apply to any client. Start by filling the public ip and the public port of the cloudpipe instance. This information can be found by running a @@ -1642,7 +1642,7 @@ persist-key - Certificate : The generated certificate + Certificate : The generated certificate Key : The private key @@ -1691,7 +1691,7 @@ persist-key Troubleshoot your cloudpipe instance - A periodic task disassociates the fixed ip address for the cloudpipe + A periodic task disassociates the fixed ip address for the cloudpipe instance. Into /var/log/nova/nova-network.log, the following line should appear : Running periodic task VlanManager._disassociate_stale_fixed_ips from (pid=21578) periodic_tasks /usr/lib/python2.7/dist-packages/nova/manager.py:152 @@ -1723,8 +1723,8 @@ persist-key Cloudpipe-related files Nova stores cloudpipe keys into - /var/lib/nova/keys. - Certificates are stored into /var/lib/nova/CA. + /var/lib/nova/keys. + Certificates are stored into /var/lib/nova/CA. Credentials are stored into /var/lib/nova/CA/projects/ @@ -1755,7 +1755,7 @@ persist-key put under /root/.bashrc. If the EC2 credentials have been put into another user's .bashrc file, then, it is - necessary to run these commands as the user. + necessary to run these commands as the user. Using the nova command-line tool: @@ -1792,7 +1792,7 @@ persist-key public) that can be dynamically added to a running virtual instance. OpenStack Compute uses Network Address Translation (NAT) to assign floating IPs to - virtual instances. + virtual instances. If you plan to use this feature, you must add the following to your nova.conf file to specify which interface the nova-network service will bind public IP @@ -1828,7 +1828,7 @@ public_interface=vlan100 service. - you can check if the forwarding is enabled by running the following command: $ cat /proc/sys/net/ipv4/ip_forward + you can check if the forwarding is enabled by running the following command: $ cat /proc/sys/net/ipv4/ip_forward 0 Or using sysctl$ sysctl net.ipv4.ip_forward net.ipv4.ip_forward = 0 @@ -1915,7 +1915,7 @@ auto_assign_floating_ip=True Removing a Network from a Project You will find that you cannot remove a network that has already been associated to a project by simply deleting - it. + it. To determine the project ID you must have admin rights. You can disassociate the project from the network with a scrub command and the project ID as the final parameter: @@ -1927,7 +1927,7 @@ auto_assign_floating_ip=True Using multiple interfaces for your instances (multinic) - The multi-nic feature allows you to plug more than one + The multi-nic feature allows you to plug more than one interface to your instances, making it possible to make several use cases available : @@ -1973,7 +1973,7 @@ auto_assign_floating_ip=True
Using the multinic feature - In order to use the multinic feature, first create two networks, and attach them + In order to use the multinic feature, first create two networks, and attach them to your project : $ nova network-create first-net --fixed-range-v4=20.20.0.0/24 --project-id=$your-project $ nova network-create second-net --fixed-range-v4=20.20.10.0/24 --project-id=$your-project @@ -2087,7 +2087,7 @@ iface eth1 inet dhcp multi_host=True send_arp_for_ha=true - The send_arp_for_ha option + The send_arp_for_ha option facilitates sending of gratuitous arp messages to ensure the arp caches on compute hosts are up to date. @@ -2274,7 +2274,7 @@ valid_lft forever preferred_lft forever Note that you cannot SSH to an instance with a public IP from within the same server as the routing - configuration won't allow it. + configuration won't allow it. You can use tcpdump to identify if packets are being routed to the inbound interface on the compute host. If the packets are reaching the compute hosts but the connection is failing, the issue may be that the packet is being diff --git a/doc/src/docbkx/openstack-compute-admin/computescheduler.xml b/doc/src/docbkx/openstack-compute-admin/computescheduler.xml index 9cf2d188c9..d72323b854 100644 --- a/doc/src/docbkx/openstack-compute-admin/computescheduler.xml +++ b/doc/src/docbkx/openstack-compute-admin/computescheduler.xml @@ -22,14 +22,14 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter scheduler_weight_classes=nova.scheduler.weights.all_weighers ram_weight_multiplier=1.0 - Compute is configured by default to use the Multi + Compute is configured by default to use the Multi Scheduler, which allows the admin to specify different scheduling behavior for compute requests versus volume - requests. + requests. The volume scheduler is configured by default as a Chance Scheduler, which picks a host at random that has the cinder-volume service running. - The compute scheduler is configured by default as a Filter + The compute scheduler is configured by default as a Filter Scheduler, described in detail in the next section. In the default configuration, this scheduler will only consider hosts that are in the requested availability zone @@ -47,7 +47,7 @@ ram_weight_multiplier=1.0 created. This Scheduler can only be used for scheduling compute requests, not volume requests, i.e. it can only be used with the compute_scheduler_driver - configuration option. + configuration option.
@@ -101,7 +101,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter AggregateCoreFilter Implements blueprint per-aggregate-resource-ratio. AggregateCoreFilter supports per-aggregate cpu_allocation_ratio. If the per-aggregate - value is not found, the value falls back to the global setting. + value is not found, the value falls back to the global setting.
@@ -120,7 +120,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter filter_tenant_id it will only create instances from that tenant (or list of tenants). A host can be in different aggregates. If a host does not belong to an aggregate with the metadata key, it can create instances from all - tenants. + tenants.
AggregateRamFilter @@ -138,7 +138,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
AvailabilityZoneFilter Filters hosts by availability zone. This filter must be enabled for the scheduler - to respect availability zones in requests. + to respect availability zones in requests.
@@ -157,18 +157,18 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
ComputeFilter - Passes all hosts that are operational and enabled. + Passes all hosts that are operational and enabled. In general, this filter should always be enabled.
CoreFilter - Only schedule instances on hosts if there are + Only schedule instances on hosts if there are sufficient CPU cores available. If this filter is not set, the scheduler may over provision a host based on cores (i.e., the virtual cores running on an instance - may exceed the physical cores). + may exceed the physical cores). This filter can be configured to allow a fixed amount of vCPU overcommitment by using the cpu_allocation_ratio @@ -180,7 +180,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter With this setting, if there are 8 vCPUs on a node, the scheduler will allow instances up to 128 vCPU to be - run on that node. + run on that node. To disallow vCPU overcommitment set: cpu_allocation_ratio=1.0 @@ -221,8 +221,8 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
DiskFilter - Only schedule instances on hosts if there is - sufficient disk space available for root and ephemeral storage. + Only schedule instances on hosts if there is + sufficient disk space available for root and ephemeral storage. This filter can be configured to allow a fixed amount of disk overcommitment by using the @@ -244,7 +244,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
GroupAntiAffinityFilter - The GroupAntiAffinityFilter ensures that each + The GroupAntiAffinityFilter ensures that each instance in a group is on a different host. To take advantage of this filter, the requester must pass a scheduler hint, using group as the @@ -269,7 +269,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter these properties using glance image-update img-uuid --property architecture=arm --property hypervisor_type=qemu - The image properties that + The image properties that the filter checks for are: @@ -298,12 +298,12 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
IsolatedHostsFilter - Allows the admin to define a special (isolated) set + Allows the admin to define a special (isolated) set of images and a special (isolated) set of hosts, such that the isolated images can only run on the isolated hosts, and the isolated hosts can only run isolated - images. - The admin must specify the isolated set of images + images. + The admin must specify the isolated set of images and hosts in the nova.conf file using the isolated_hosts and isolated_images configuration @@ -402,7 +402,7 @@ ram_allocation_ratio=1.5 With this setting, if there is 1GB of free RAM, the scheduler will allow instances up to size 1.5GB to be - run on that instance. + run on that instance.
diff --git a/doc/src/docbkx/openstack-compute-admin/computetutorials.xml b/doc/src/docbkx/openstack-compute-admin/computetutorials.xml index 4d53f96e16..1b7cc1d6b9 100644 --- a/doc/src/docbkx/openstack-compute-admin/computetutorials.xml +++ b/doc/src/docbkx/openstack-compute-admin/computetutorials.xml @@ -13,7 +13,7 @@ The tutorial assumes you have obtained a TryStack account at http://trystack.org. It has a working installation of OpenStack - Compute, or you can install your own using the installation guides. + Compute, or you can install your own using the installation guides. We'll go through this tutorial in parts: @@ -32,10 +32,10 @@ Go to the TryStack Facebook account at https://www.facebook.com/groups/269238013145112/ - and request to join the group. + and request to join the group. Once you've joined the group, go to the TryStack dashboard and click Login using - Facebook. + Facebook. Enter your Facebook login information to receive your username and password that you can use with the Compute API. @@ -61,7 +61,7 @@ export NOVA_VERSION=1.1 from https://trystack.org/dash/api_info/ after - logging in with Facebook. + logging in with Facebook. Okay, you've created the basic scaffolding for your cloud user so that you can get some images and run instances on TryStack with your starter @@ -79,7 +79,7 @@ export NOVA_VERSION=1.1 information about which operating system to run, the user login and password, files stored on the system, and so on. Fortunately, TryStack provides images for - your use. + your use. Basically, run: @@ -236,7 +236,7 @@ export NOVA_VERSION=1.1 $ tar -xzvf latest.tar.gz The WordPress package will extract into a folder called wordpress in the same - directory that you downloaded latest.tar.gz. + directory that you downloaded latest.tar.gz. Next, enter "exit" and disconnect from this SSH session. @@ -256,6 +256,6 @@ export NOVA_VERSION=1.1 Configure the Wordpress Memcache pluginFrom a web browser, point to the IP address of your Wordpress server. Download and install the Memcache Plugin. Enter the IP address of your Memcache server.
- Running a Blog in the CloudThat's it! You're now running your blog on a cloud server in OpenStack Compute, and you've scaled it horizontally using additional virtual images to run the database and Memcache. Now if your blog gets a big boost of comments, you'll be ready for the extra reads-and-writes to the database.
+ Running a Blog in the CloudThat's it! You're now running your blog on a cloud server in OpenStack Compute, and you've scaled it horizontally using additional virtual images to run the database and Memcache. Now if your blog gets a big boost of comments, you'll be ready for the extra reads-and-writes to the database.
diff --git a/doc/src/docbkx/openstack-compute-admin/moosefsbackend.xml b/doc/src/docbkx/openstack-compute-admin/moosefsbackend.xml index 9b6e3b8d67..c470695f97 100644 --- a/doc/src/docbkx/openstack-compute-admin/moosefsbackend.xml +++ b/doc/src/docbkx/openstack-compute-admin/moosefsbackend.xml @@ -5,15 +5,15 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> Installing MooseFS as shared storage for the instances directory - In the sections about Block Storage you see a convenient way to deploy a shared storage using - NFS. For better transactions performance, you could deploy MooseFS instead. + In the sections about Block Storage you see a convenient way to deploy a shared storage using + NFS. For better transactions performance, you could deploy MooseFS instead. MooseFS (Moose File System) is a shared file system ; it implements the same rough - concepts of shared storage solutions - such as Ceph, Lustre or even GlusterFS. + concepts of shared storage solutions - such as Ceph, Lustre or even GlusterFS. Main concepts - A metadata server (MDS), also called master server, which manages the file + A metadata server (MDS), also called master server, which manages the file repartition, their access and the namespace. @@ -35,13 +35,13 @@ - Two compute nodes running both MooseFS chunkserver and client services. + Two compute nodes running both MooseFS chunkserver and client services. - One MooseFS master server, running the metadata service. + One MooseFS master server, running the metadata service. - One MooseFS slave server, running the metalogger service. + One MooseFS slave server, running the metalogger service. For that particular walkthrough, we will use the following network schema : @@ -78,10 +78,10 @@
Installing the MooseFS metadata and metalogger servers You can run these components anywhere as long as the MooseFS chunkservers can reach - the MooseFS master server. + the MooseFS master server. In our deployment, both MooseFS master and slave run their services inside a virtual machine ; you just need to make sure to allocate enough memory to the MooseFS metadata - server, all the metadata being stored in RAM when the service runs. + server, all the metadata being stored in RAM when the service runs. @@ -100,13 +100,13 @@ User and group creation - Create the adequate user and group : + Create the adequate user and group : $ groupadd mfs && useradd -g mfs mfs Download the sources - Go to the MooseFS download page + Go to the MooseFS download page and fill the download form in order to obtain your URL for the package. @@ -122,7 +122,7 @@ Create configuration files - We will keep the default settings, for tuning performance, you can read the We will keep the default settings, for tuning performance, you can read the MooseFS official FAQ $ cd /etc/moosefs @@ -151,7 +151,7 @@ Power up the MooseFS metalogger service - + $ /usr/sbin/mfsmetalogger start @@ -161,8 +161,8 @@
Installing the MooseFS chunk and client services - In the first part, we will install the last version of FUSE, and proceed to the - installation of the MooseFS chunk and client in the second part. + In the first part, we will install the last version of FUSE, and proceed to the + installation of the MooseFS chunk and client in the second part. Installing FUSE @@ -175,7 +175,7 @@ Download the sources and configure them - For that setup we will retrieve the last version of fuse to make sure every + For that setup we will retrieve the last version of fuse to make sure every function will be available : $ wget http://downloads.sourceforge.net/project/fuse/fuse-2.X/2.9.1/fuse-2.9.1.tar.gz && tar -zxvf fuse-2.9.1.tar.gz && cd fuse-2.9.1$ ./configure && make && make install @@ -183,10 +183,10 @@ Installing the MooseFS chunk and client services - For installing both services, you can follow the same steps that were presented before + For installing both services, you can follow the same steps that were presented before (Steps 1 to 4) : - Hosts entry configuration + Hosts entry configuration Required packages @@ -208,7 +208,7 @@ Create configuration files - The chunk servers configuration is relatively easy to setup. You only need to + The chunk servers configuration is relatively easy to setup. You only need to create on every server directories that will be used for storing the datas of your cluster. $ cd /etc/moosefs @@ -239,7 +239,7 @@
Access to your cluster storage - You can now access your cluster space from the compute node, (both acting as + You can now access your cluster space from the compute node, (both acting as chunkservers) : $ mfsmount /var/lib/nova/instances -H mfsmaster mfsmaster accepted connection with parameters: read-write,restricted_ip ; root mapped to root:root @@ -261,7 +261,7 @@ none on /var/lib/ureadahead/debugfs type debugfs (rw,relatime) can interact with it the way you would interact with a classical mount, using build-in linux commands (cp, rm, etc...). - The MooseFS client has several tools for managing the objects within the cluster (set + The MooseFS client has several tools for managing the objects within the cluster (set replication goals, etc..). You can see the list of the available tools by running $ mfs <TAB> <TAB> mfsappendchunks mfschunkserver mfsfileinfo mfsgetgoal mfsmount mfsrsetgoal mfssetgoal mfstools diff --git a/doc/src/docbkx/openstack-compute-admin/preface.xml b/doc/src/docbkx/openstack-compute-admin/preface.xml index 584e2d498b..2a07eb57ae 100644 --- a/doc/src/docbkx/openstack-compute-admin/preface.xml +++ b/doc/src/docbkx/openstack-compute-admin/preface.xml @@ -6,7 +6,7 @@ Preface OpenStack™ Compute offers open source software for cloud administration and management for any organization. This - guide describes how to install, manage, and understand the software that runs OpenStack Compute. + guide describes how to install, manage, and understand the software that runs OpenStack Compute.
Document Change History The most recent changes are described in the table diff --git a/doc/src/docbkx/openstack-compute-admin/rootwrap.xml b/doc/src/docbkx/openstack-compute-admin/rootwrap.xml index a611c24823..c285d23eb9 100644 --- a/doc/src/docbkx/openstack-compute-admin/rootwrap.xml +++ b/doc/src/docbkx/openstack-compute-admin/rootwrap.xml @@ -15,7 +15,7 @@ root. However this was difficult to maintain (the sudoers file was in packaging), and did not allow for complex filtering of parameters (advanced filters). The rootwrap was designed to - solve those issues. + solve those issues. How rootwrap works: Instead of just calling sudo make me a sandwich, Compute services starting with nova- call sudo nova-rootwrap /etc/nova/rootwrap.conf make me a sandwich. @@ -24,7 +24,7 @@ in its configuration file, and loads command filters from them. Then it checks if the command requested by Compute matches one of those filters, in which case it executes the command - (as root). If no filter matches, it denies the request. + (as root). If no filter matches, it denies the request. Security model @@ -38,7 +38,7 @@ filter definition directories, which contain root-owned filters definition files. This chain ensures that the nova user itself is not in control of the configuration or - modules used by the nova-rootwrap executable. + modules used by the nova-rootwrap executable. Details of rootwrap.conf @@ -63,12 +63,12 @@ [DEFAULT] filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap -
+ by the root user.
List of configuration flags for Storwize storage and SVC driver
The host name/IP address of NetApp Cloud Web Service - installation. + installation.
Login user name for NetApp Cloud Web Service - installation. + installation.
Optional 0.95 (FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be - allocated to the volume destination.
nfs_oversub_ratio (ListOpt) Comma-separated list of + (ListOpt) Comma-separated list of directories containing filter definition files. Defines where filters for root wrap are stored. Directories defined on this line should all exist, be owned and writeable only - by the root user.
@@ -94,7 +94,7 @@ [Filters] filter_name=kpartx: CommandFilter, /sbin/kpartx, root - (ListOpt) Comma-separated list containing first the Filter class to use, followed by that Filter arguments (which vary depending on the Filter class selected). . + (ListOpt) Comma-separated list containing first the Filter class to use, followed by that Filter arguments (which vary depending on the Filter class selected). . diff --git a/doc/src/docbkx/openstack-compute-admin/section_dashboard.xml b/doc/src/docbkx/openstack-compute-admin/section_dashboard.xml index 3504b37672..c10164d681 100644 --- a/doc/src/docbkx/openstack-compute-admin/section_dashboard.xml +++ b/doc/src/docbkx/openstack-compute-admin/section_dashboard.xml @@ -9,18 +9,18 @@ xlink:href="https://github.com/openstack/horizon/" >horizon, is a Web interface that allows cloud administrators and users to manage various OpenStack resources - and services. + and services. The dashboard enables web-based interactions with the - OpenStack Compute cloud controller through the OpenStack APIs. + OpenStack Compute cloud controller through the OpenStack APIs. The following instructions show an example deployment - configured with an Apache web server. + configured with an Apache web server. After you install and configure the dashboard, you can complete the following tasks: Customize your dashboard. See . + linkend="dashboard-custom-brand"/>. Set up session storage for the dashboard. See Setting Configuration Options in the cinder.conf File The configuration file cinder.conf is installed in /etc/cinder by default. A default set of options are already configured - in cinder.conf when you install manually. + in cinder.conf when you install manually. Here is a simple example cinder.conf file. diff --git a/doc/src/docbkx/openstack-config/ch_computecells.xml b/doc/src/docbkx/openstack-config/ch_computecells.xml index afae387dd8..d5ab8e17a7 100644 --- a/doc/src/docbkx/openstack-config/ch_computecells.xml +++ b/doc/src/docbkx/openstack-config/ch_computecells.xml @@ -44,7 +44,7 @@ name - Name of the current cell. This must be unique for each cell. + Name of the current cell. This must be unique for each cell. @@ -54,7 +54,7 @@ key=value pairs defining capabilities of the current cell. These are sent to parent cells, but aren't used in scheduling until later filter/weight - support is added. + support is added. diff --git a/doc/src/docbkx/openstack-config/ch_computeconfigure.xml b/doc/src/docbkx/openstack-config/ch_computeconfigure.xml index 171aa9ca68..a4e0dbdf49 100644 --- a/doc/src/docbkx/openstack-config/ch_computeconfigure.xml +++ b/doc/src/docbkx/openstack-config/ch_computeconfigure.xml @@ -91,10 +91,10 @@ The credentials you will use to launch instances, bundle images, and all the other assorted API functions can be sourced in a single file, such as - creating one called /creds/openrc. + creating one called /creds/openrc. Here's an example openrc file you can download from the Dashboard in Settings > Project Settings > - Download RC File. + Download RC File. #!/bin/bash @@ -147,7 +147,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set Next, add these credentials to your environment prior to running any nova client commands or nova - commands. + commands. $ cat /root/creds/openrc >> ~/.bashrc source ~/.bashrc
@@ -178,7 +178,7 @@ source ~/.bashrc the /16 range that was set in fixed-range in nova.conf. Currently, there can only be one network, and this set up would use the max IPs available in a /24. You can choose values that let - you use any valid amount that you would like. + you use any valid amount that you would like. OpenStack Compute assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range @@ -245,7 +245,7 @@ source ~/.bashrc nova.conf and copy it to additional compute nodes. Ensure each nova.conf file points to the correct IP addresses for the respective - services. + services. By default, Nova sets the bridge device based on the setting in flat_network_bridge. Now you can @@ -407,7 +407,7 @@ $ sudo service nova-compute restart can use the state_path configuration option to indicate a top-level directory for storing data related to the state of Compute including images if you are using the Compute object - store. + store.
@@ -551,7 +551,7 @@ xenapi_remap_vbd_dev=true Configuring Compute API password handling - The OpenStack Compute API allows the user to specify an admin + The OpenStack Compute API allows the user to specify an admin password when creating (or rebuilding) a server instance. If no password is specified, a randomly generated password is used. The password is returned in the API response. diff --git a/doc/src/docbkx/openstack-config/ch_computehypervisors.xml b/doc/src/docbkx/openstack-config/ch_computehypervisors.xml index 737bb99c5f..27e69b7120 100644 --- a/doc/src/docbkx/openstack-config/ch_computehypervisors.xml +++ b/doc/src/docbkx/openstack-config/ch_computehypervisors.xml @@ -14,20 +14,20 @@ information for choosing a hypervisor. Refer to http://wiki.openstack.org/HypervisorSupportMatrix for a detailed list of - features and support across the hypervisors. + features and support across the hypervisors.
Here is a list of the supported hypervisors with links to a relevant web site for configuration and use: KVM - Kernel-based Virtual Machine. The virtual disk formats that it supports it inherits from QEMU since it uses a modified QEMU program to launch the virtual machine. The supported - formats include raw images, the qcow2, and VMware formats. + formats include raw images, the qcow2, and VMware formats. LXC - Linux Containers (through libvirt), use to run Linux-based virtual machines. QEMU - Quick EMUlator, generally only used for development purposes. UML - User Mode Linux, - generally only used for development purposes. + generally only used for development purposes. VMWare vSphere 4.1 update 1 and newer, runs VMWare-based Linux and Windows images through a connection with a vCenter server or directly with an ESXi host. @@ -49,10 +49,10 @@
Hypervisor Configuration Basics The node where the nova-compute service is installed and running is the machine that - runs all the virtual machines, referred to as the compute node in this guide. + runs all the virtual machines, referred to as the compute node in this guide. By default, the selected hypervisor is KVM. To change to another hypervisor, change the libvirt_type option in nova.conf and restart - the nova-compute service. + the nova-compute service. Here are the general nova.conf options that are used to configure the compute node's hypervisor. Specific options for particular hypervisors can be found in following sections. diff --git a/doc/src/docbkx/openstack-config/ch_computescheduler.xml b/doc/src/docbkx/openstack-config/ch_computescheduler.xml index 91b902763e..247ef6dc70 100644 --- a/doc/src/docbkx/openstack-config/ch_computescheduler.xml +++ b/doc/src/docbkx/openstack-config/ch_computescheduler.xml @@ -21,11 +21,11 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter least_cost_functions=nova.scheduler.least_cost.compute_fill_first_cost_fn compute_fill_first_cost_fn_weight=-1.0 - Compute is configured by default to use the Multi + Compute is configured by default to use the Multi Scheduler, which allows the admin to specify different scheduling behavior for compute requests versus volume - requests. - The compute scheduler is configured by default as a Filter + requests. + The compute scheduler is configured by default as a Filter Scheduler, described in detail in the next section. In the default configuration, this scheduler will only consider hosts that are in the requested availability zone @@ -33,7 +33,7 @@ compute_fill_first_cost_fn_weight=-1.0 sufficient RAM available (RamFilter), and that are actually capable of servicing the request (ComputeFilter). - From the resulting filtered list of eligible hosts, the + From the resulting filtered list of eligible hosts, the scheduler will assign a cost to each host based on the amount of free RAM (nova.scheduler.least_cost.compute_fill_first_cost_fn), @@ -41,10 +41,10 @@ compute_fill_first_cost_fn_weight=-1.0 (compute_fill_first_cost_fn_weight), and will select the host with the minimum cost. This is equivalent to selecting the host with the maximum amount of - RAM available. + RAM available. For information on the volume scheduler, refer to the OpenStack Block Storage - Admin Manual for information. + Admin Manual for information.
Filter Scheduler The Filter Scheduler @@ -55,7 +55,7 @@ compute_fill_first_cost_fn_weight=-1.0 created. This Scheduler can only be used for scheduling compute requests, not volume requests, i.e. it can only be used with the compute_scheduler_driver - configuration option. + configuration option.
@@ -128,7 +128,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter only create instances from that tenant (or list of tenants). A host can be in different aggregates. If a host does not belong to an aggregate with the metadata key, it can create instances - from all tenants. + from all tenants.
@@ -139,9 +139,9 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
AvailabilityZoneFilter - Filters hosts by availability zone. This filter + Filters hosts by availability zone. This filter must be enabled for the scheduler to respect - availability zones in requests. + availability zones in requests.
@@ -160,18 +160,18 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
ComputeFilter - Passes all hosts that are operational and enabled. + Passes all hosts that are operational and enabled. In general, this filter should always be enabled.
CoreFilter - Only schedule instances on hosts if there are + Only schedule instances on hosts if there are sufficient CPU cores available. If this filter is not set, the scheduler may over provision a host based on cores (i.e., the virtual cores running on an instance - may exceed the physical cores). + may exceed the physical cores). This filter can be configured to allow a fixed amount of vCPU overcommitment by using the cpu_allocation_ratio @@ -183,7 +183,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter With this setting, if there are 8 vCPUs on a node, the scheduler will allow instances up to 128 vCPU to be - run on that node. + run on that node. To disallow vCPU overcommitment set: cpu_allocation_ratio=1.0 @@ -223,7 +223,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
DiskFilter - Only schedule instances on hosts if there is sufficient disk + Only schedule instances on hosts if there is sufficient disk space available for root and ephemeral storage. This filter can be configured to allow a fixed amount of disk overcommitment by using the disk_allocation_ratio @@ -240,7 +240,7 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
GroupAntiAffinityFilter - The GroupAntiAffinityFilter ensures that each instance in a + The GroupAntiAffinityFilter ensures that each instance in a group is on a different host. To take advantage of this filter, the requester must pass a scheduler hint, using group as the key and a list of instance @@ -294,12 +294,12 @@ scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
IsolatedHostsFilter - Allows the admin to define a special (isolated) set + Allows the admin to define a special (isolated) set of images and a special (isolated) set of hosts, such that the isolated images can only run on the isolated hosts, and the isolated hosts can only run isolated - images. - The admin must specify the isolated set of images + images. + The admin must specify the isolated set of images and hosts in the nova.conf file using the isolated_hosts and isolated_images configuration @@ -398,7 +398,7 @@ ram_allocation_ratio=1.5 With this setting, if there is 1GB of free RAM, the scheduler will allow instances up to size 1.5GB to be - run on that instance. + run on that instance.
@@ -509,7 +509,7 @@ ram_allocation_ratio=1.5 - The Filter Scheduler takes the hosts that remain after + The Filter Scheduler takes the hosts that remain after the filters have been applied and applies one or more cost function to each host to get numerical scores for each host. Each cost score is multiplied by a weighting @@ -523,7 +523,7 @@ ram_allocation_ratio=1.5 least_cost_functions=nova.scheduler.least_cost.compute_fill_first_cost_fn compute_fill_first_cost_fn_weight=-1.0 - Multiple cost functions can be specified in the + Multiple cost functions can be specified in the least_cost_functions configuration option, separated by commas. For example: @@ -531,10 +531,10 @@ least_cost_functions=nova.scheduler.least_cost.compute_fill_first_cost_fn,nova.s compute_fill_first_cost_fn_weight=-1.0 noop_cost_fn_weight=1.0 - If there are multiple cost functions, then the weighted + If there are multiple cost functions, then the weighted cost scores are added together. The scheduler selects the host that has the minimum weighted cost. - The Compute service comes with three cost functions: + The Compute service comes with three cost functions:
nova.scheduler.least_cost.compute_fill_first_cost_fn This cost function calculates the amount of free @@ -560,8 +560,8 @@ compute_fill_first_cost_fn_weight=-1.0 nova.scheduler.least_cost.retry_host_cost_fn This cost function adds additional cost for retrying scheduling a host that was already used for a previous - scheduling attempt. - The normal method of using this function is to set + scheduling attempt. + The normal method of using this function is to set retry_host_cost_fn_weight to a positive value, so that hosts which consistently encounter build failures will be used less diff --git a/doc/src/docbkx/openstack-config/ch_config-overview.xml b/doc/src/docbkx/openstack-config/ch_config-overview.xml index 59e09ed053..953c5c52cb 100644 --- a/doc/src/docbkx/openstack-config/ch_config-overview.xml +++ b/doc/src/docbkx/openstack-config/ch_config-overview.xml @@ -8,7 +8,7 @@ setting up cloud services. Each project uses similar configuration techniques and a common framework for INI file options. This guide pulls together multiple references for - each type of configuration. + each type of configuration. .conf files .ini files Compute diff --git a/doc/src/docbkx/openstack-config/ch_identityconfigure.xml b/doc/src/docbkx/openstack-config/ch_identityconfigure.xml index 2a590d7f99..d05f211256 100644 --- a/doc/src/docbkx/openstack-config/ch_identityconfigure.xml +++ b/doc/src/docbkx/openstack-config/ch_identityconfigure.xml @@ -17,7 +17,7 @@ The configuration file keystone.conf is installed in /etc/keystone by default. A default set of options are already configured in - keystone.conf when you install manually. + keystone.conf when you install manually. Here is a simple example keystone.conf file. diff --git a/doc/src/docbkx/openstack-config/ch_imageservice.xml b/doc/src/docbkx/openstack-config/ch_imageservice.xml index ccd907362c..1131e60b79 100644 --- a/doc/src/docbkx/openstack-config/ch_imageservice.xml +++ b/doc/src/docbkx/openstack-config/ch_imageservice.xml @@ -21,7 +21,7 @@ If your installation requires euca2ools to register new images, you must run the nova-objectstore service. This service provides an Amazon S3 front-end for Glance, - which is required by euca2ools. + which is required by euca2ools. diff --git a/doc/src/docbkx/openstack-config/compute-configure-console.xml b/doc/src/docbkx/openstack-config/compute-configure-console.xml index e43343eca1..cd53fd918f 100644 --- a/doc/src/docbkx/openstack-config/compute-configure-console.xml +++ b/doc/src/docbkx/openstack-config/compute-configure-console.xml @@ -27,7 +27,7 @@ streaming and more. SPICE is a new protocol which aims to address all the limitations in VNC, to provide good remote desktop support. - SPICE support in OpenStack Compute shares a similar + SPICE support in OpenStack Compute shares a similar architecture to the VNC implementation. The OpenStack Dashboard uses a SPICE-HTML5 widget in its console tab, that communicates to the nova-spicehtml5proxy service using diff --git a/doc/src/docbkx/openstack-config/compute-configure-migrations.xml b/doc/src/docbkx/openstack-config/compute-configure-migrations.xml index 25407b6bbf..dbc607d68c 100644 --- a/doc/src/docbkx/openstack-config/compute-configure-migrations.xml +++ b/doc/src/docbkx/openstack-config/compute-configure-migrations.xml @@ -17,7 +17,7 @@ Migration allows an administrator to move a virtual machine instance from one compute host to another. This feature is useful when a compute host requires maintenance. Migration can also - be useful to redistribute the load when many VM instances are running on a specific physical machine. + be useful to redistribute the load when many VM instances are running on a specific physical machine. There are two types of migration: @@ -80,14 +80,14 @@ KVM and Libvirt in the OpenStack Compute Administration Guide for more details. - This guide assumes the default value for instances_path in your + This guide assumes the default value for instances_path in your nova.conf (NOVA-INST-DIR/instances). If you have changed the state_path or instances_path - variables, please modify accordingly. + variables, please modify accordingly. You must specify vncserver_listen=0.0.0.0 or live migration will - not work correctly. + not work correctly. @@ -141,7 +141,7 @@ the Ubuntu NFS HowTo to setup an NFS server on HostA, and NFS Clients on HostB and HostC. - Our aim is to export NOVA-INST-DIR/instances from HostA, + Our aim is to export NOVA-INST-DIR/instances from HostA, and have it readable and writable by the nova user on HostB and HostC. @@ -254,7 +254,7 @@ root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l - Configure your firewall to allow libvirt to communicate between nodes. + Configure your firewall to allow libvirt to communicate between nodes. Information about ports used with libvirt can be found at the libvirt documentation By default, libvirt listens on TCP port 16509 and an ephemeral TCP range from 49152 to 49261 is used for the KVM communications. As this guide has disabled libvirt auth, you @@ -276,7 +276,7 @@ root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVEThe Compute service does not use libvirt's live miration by default because there is a risk that the migration process will never terminate. This can happen if the guest operating system - dirties blocks on the disk faster than they can migrated. + dirties blocks on the disk faster than they can migrated.
diff --git a/doc/src/docbkx/openstack-config/compute-configure-quotas.xml b/doc/src/docbkx/openstack-config/compute-configure-quotas.xml index d1885513b2..a8cdec299e 100644 --- a/doc/src/docbkx/openstack-config/compute-configure-quotas.xml +++ b/doc/src/docbkx/openstack-config/compute-configure-quotas.xml @@ -47,14 +47,14 @@ parenthesis): Number of rules per security group (security_group_rules=20)
- The defaults may be modified by setting the variable in + The defaults may be modified by setting the variable in nova.conf, then restarting the nova-api service. To modify a value for a specific project, the nova-manage command should be used. For example: $ nova-manage project quota --project=1113f5f266f3477ac03da4e4f82d0568 --key=cores --value=40 Alternately, quota settings are available through the OpenStack Dashboard in the "Edit - Project" page. + Project" page.
diff --git a/doc/src/docbkx/openstack-config/compute-configure-service-groups.xml b/doc/src/docbkx/openstack-config/compute-configure-service-groups.xml index ef52f306fd..502f5afc63 100644 --- a/doc/src/docbkx/openstack-config/compute-configure-service-groups.xml +++ b/doc/src/docbkx/openstack-config/compute-configure-service-groups.xml @@ -8,7 +8,7 @@ xmlns:ns="http://docbook.org/ns/docbook" version="5.0"> Configuring Compute Service Groups - To effectively manage and utilize compute nodes, Nova needs + To effectively manage and utilize compute nodes, Nova needs to know the status of them. For example, when a user launches a new VM, the Nova scheduler should send the request to a live node (with enough capacity too, of course). From the Grizzly release @@ -35,7 +35,7 @@ timeout (service_down_time) is used to determine if a node is dead. - The driver has two limitations, which may or may not be an + The driver has two limitations, which may or may not be an issue for you, depending on your setup. First, the more compute worker nodes you have, the more pressure you put on the database. Second, the timeout is by default 60 seconds. So, it might take a diff --git a/doc/src/docbkx/openstack-config/compute-configure-vnc.xml b/doc/src/docbkx/openstack-config/compute-configure-vnc.xml index 4b9ffdb5f3..2f26c133a5 100644 --- a/doc/src/docbkx/openstack-config/compute-configure-vnc.xml +++ b/doc/src/docbkx/openstack-config/compute-configure-vnc.xml @@ -5,7 +5,7 @@ xml:id="getting-started-with-vnc-proxy"> VNC Console Proxy The VNC proxy is an OpenStack component that enables compute - service users to access their instances through VNC clients. + service users to access their instances through VNC clients. The VNC console connection works as follows: @@ -16,10 +16,10 @@ The user pastes the URL in a browser or as a client - parameter. + parameter. - The browser or client connects to the proxy. + The browser or client connects to the proxy. The proxy talks to nova-consoleauth to @@ -31,11 +31,11 @@ nova.conf file option, . In this way, the VNC proxy works as a bridge between the public network and - private host network. + private host network. The proxy initiates the connection to VNC server, and - continues to proxy until the session ends. + continues to proxy until the session ends. The proxy also tunnels the VNC protocol over WebSockets so @@ -44,10 +44,10 @@ Bridges between the public network, where the clients - live, and the private network, where vncservers live. + live, and the private network, where vncservers live. - Mediates token authentication. + Mediates token authentication. Transparently deals with hypervisor-specific connection @@ -74,11 +74,11 @@ service must be running for either proxy to work. Many proxies of either type can be run against a single nova-consoleauth service in a cluster - configuration. + configuration. Do not confuse the nova-consoleauth shared service with nova-console, which is a XenAPI-specific service that most recent VNC proxy architectures - do not use. + do not use.
@@ -88,25 +88,25 @@ A nova-consoleauth process. Typically - runs on the controller host. + runs on the controller host. One or more nova-novncproxy services. Supports browser-based noVNC clients. For simple deployments, this service typically runs on the same machine as nova-api because it proxies between the public network - and the private compute host network. + and the private compute host network. One or more nova-xvpvncproxy services. Supports the special Java client discussed here. For simple deployments, this service typically runs on the same machine as nova-api because it proxies between the - public network and the private compute host network. + public network and the private compute host network. One or more compute hosts. These compute hosts must have - correctly configured options, as follows. + correctly configured options, as follows.
@@ -125,11 +125,11 @@ The vncserver_proxyclient_address defaults to 127.0.0.1, which is the address of the compute host that nova instructs proxies to - use when connecting to instance servers. + use when connecting to instance servers. For all-in-one XenServer domU deployments, set this to - 169.254.0.1. + 169.254.0.1. For multi-host XenServer domU deployments, set to a dom0 - management IP on the same network as the proxies. + management IP on the same network as the proxies. For multi-host libvirt deployments, set to a host management IP on the same network as the proxies. @@ -139,7 +139,7 @@ nova-novncproxy (noVNC) You must install the noVNC package, which contains the - nova-novncproxy service. + nova-novncproxy service. As root, run the following command: # apt-get install novnc The service starts automatically on installation. @@ -147,9 +147,9 @@ # service novnc restart The configuration option parameter should point to your nova.conf file, which includes the - message queue server address and credentials. + message queue server address and credentials. By default, nova-novncproxy binds on - 0.0.0.0:6080. + 0.0.0.0:6080. To connect the service to your nova deployment, add the following configuration options to your nova.conf file : @@ -161,12 +161,12 @@ Specifies the address on which the VNC service should bind. Make sure it is assigned one of the compute node interfaces. This address is the one used by your domain - file. + file. <graphics type="vnc" autoport="yes" keymap="en-us" listen="0.0.0.0"/> To use live migration, make sure to use the - 0.0.0.0address. + 0.0.0.0address. @@ -194,14 +194,14 @@ A: nova-xvpvncproxy, which ships with nova, is a proxy that supports a simple Java client. nova-novncproxy uses noVNC to provide - VNC support through a web browser. + VNC support through a web browser. Q: I want VNC support in the Dashboard. What services do I need? - A: You need nova-novncproxy, + A: You need nova-novncproxy, nova-consoleauth, and correctly - configured compute hosts. + configured compute hosts. Q: When I use nova @@ -242,7 +242,7 @@ vncserver_listen=192.168.1.2 network. Your PROXYSERVER must be able to reach vncserver_proxyclient_address, because that is the address over which the VNC connection - is proxied. + is proxied. @@ -250,21 +250,21 @@ vncserver_listen=192.168.1.2 Q: My noVNC does not work with recent versions of web browsers. Why? - A: Make sure you have python-numpy + A: Make sure you have python-numpy installed, which is required to support a newer version of - the WebSocket protocol (HyBi-07+). + the WebSocket protocol (HyBi-07+). Q: How do I adjust the dimensions of the VNC window image in the OpenStack dashboard? - A: These values are hard-coded in a Django HTML + A: These values are hard-coded in a Django HTML template. To alter them, edit the _detail_vnc.html template file. The location of this file varies based on Linux distribution. On Ubuntu 12.04, the file is at - /usr/share/pyshared/horizon/dashboards/nova/instances/templates/instances/_detail_vnc.html. + /usr/share/pyshared/horizon/dashboards/nova/instances/templates/instances/_detail_vnc.html. Modify the width and height parameters, as follows: <iframe src="{{ vnc_url }}" width="720" height="430"></iframe> diff --git a/doc/src/docbkx/openstack-image/bk-imageguide.xml b/doc/src/docbkx/openstack-image/bk-imageguide.xml index 0ab4216410..8184aa1782 100644 --- a/doc/src/docbkx/openstack-image/bk-imageguide.xml +++ b/doc/src/docbkx/openstack-image/bk-imageguide.xml @@ -36,7 +36,7 @@ This guide describes how to obtain, create, and modify virtual machine images that are compatible with - OpenStack. + OpenStack. diff --git a/doc/src/docbkx/openstack-image/centos-example.xml b/doc/src/docbkx/openstack-image/centos-example.xml index 05e9d28ec2..945bc3f8d4 100644 --- a/doc/src/docbkx/openstack-image/centos-example.xml +++ b/doc/src/docbkx/openstack-image/centos-example.xml @@ -75,7 +75,7 @@ Configure TCP/IP The default TCP/IP settings are fine. In particular, ensure - that Enable IPv4 support is enabled with DHCP, which is the default. + that Enable IPv4 support is enabled with DHCP, which is the default. @@ -297,7 +297,7 @@ kernel ... console=tty0 console=ttyS0,115200n8 /etc/sysconfig/network-scripts/ifcfg-eth0 and /etc/udev/rules.d/70-persistent-net.rules during the instance process. However, each time the image boots up, the virtual ethernet card will have a - different MAC address, so this information must be deleted from the configuration file. + different MAC address, so this information must be deleted from the configuration file. There is a utility called virt-sysprep, that performs various cleanup tasks such as removing the MAC address references. It will clean up a virtual machine image in @@ -316,6 +316,6 @@ kernel ... console=tty0 console=ttyS0,115200n8 Image is complete The underlying image file you created with qemu-img create (e.g. /tmp/centos-6.4.qcow2) is now ready for uploading to the OpenStack - Image service. + Image service.
diff --git a/doc/src/docbkx/openstack-image/ch_creating_images_automatically.xml b/doc/src/docbkx/openstack-image/ch_creating_images_automatically.xml index e0426623f0..9a2b1e51b6 100644 --- a/doc/src/docbkx/openstack-image/ch_creating_images_automatically.xml +++ b/doc/src/docbkx/openstack-image/ch_creating_images_automatically.xml @@ -61,7 +61,7 @@ echo -n > /lib/udev/rules.d/75-persistent-net-generator.rules >RHEL6.auto. It adds EPEL as a repository and install the epel-release, cloud-utils, and cloud-init packages, as specified in the - packages section of the file. + packages section of the file. After Oz does the initial OS install using the kickstart file, it will customize the image by doing an update and removing any reference to the eth0 device that libvirt creates while Oz was doing the customizing, as specified in the @@ -87,7 +87,7 @@ echo -n > /lib/udev/rules.d/75-persistent-net-generator.rules example: # oz-customize -d3 centos64.tdl centos64-libvirt.xml Oz will invoke libvirt to boot the image inside of KVM, then Oz will ssh into the - instance and perform the customizations. + instance and perform the customizations.
VMBuilder @@ -107,7 +107,7 @@ echo -n > /lib/udev/rules.d/75-persistent-net-generator.rules BoxGrinder  is another tool for creating virtual machine images, which it calls appliances. BoxGrinder can create Fedora, Red Hat Enterprise Linux, or CentOS images. BoxGrinder is currently only - supported on Fedora. + supported on Fedora.
VeeWee diff --git a/doc/src/docbkx/openstack-image/ch_creating_images_manually.xml b/doc/src/docbkx/openstack-image/ch_creating_images_manually.xml index bce2358e90..7b49b2a201 100644 --- a/doc/src/docbkx/openstack-image/ch_creating_images_manually.xml +++ b/doc/src/docbkx/openstack-image/ch_creating_images_manually.xml @@ -21,7 +21,7 @@ installable as the virt-manager package on both Fedora-based and Debian-based systems. This GUI has an embedded VNC client in it that will let you view and interact with the guest's graphical console. - If you are building the image on a headless server, and you have an X server on your + If you are building the image on a headless server, and you have an X server on your local machine, you can launch virt-manager  using ssh X11 forwarding to access the GUI. Since virt-manager interacts directly with libvirt, you typically need to be root to access it. If you can ssh directly in as root (or with a user that has permissions diff --git a/doc/src/docbkx/openstack-image/ch_introduction.xml b/doc/src/docbkx/openstack-image/ch_introduction.xml index 8b8fecb3bc..241c868ca3 100644 --- a/doc/src/docbkx/openstack-image/ch_introduction.xml +++ b/doc/src/docbkx/openstack-image/ch_introduction.xml @@ -12,7 +12,7 @@ What is a virtual machine image? A virtual machine image is a single file which contains a virtual disk that has a - bootable operating system installed on it. + bootable operating system installed on it. Virtual machine images come in different formats, some of which are described below. In a later chapter, we'll describe how to convert between formats. @@ -43,7 +43,7 @@ Because qcow2 is sparse, it's often faster to convert a raw image to qcow2 and upload - it then to upload the raw file. + it then to upload the raw file. Because raw images don't support snapshots, OpenStack Compute will @@ -99,7 +99,7 @@ VMDK VMWare's ESXi hypervisor uses the VMDK (Virtual Machine Disk) format for images. + >VMDK (Virtual Machine Disk) format for images. VDI diff --git a/doc/src/docbkx/openstack-image/ch_obtaining_images.xml b/doc/src/docbkx/openstack-image/ch_obtaining_images.xml index 85e552d9be..e05ce05312 100644 --- a/doc/src/docbkx/openstack-image/ch_obtaining_images.xml +++ b/doc/src/docbkx/openstack-image/ch_obtaining_images.xml @@ -6,14 +6,14 @@ Obtaining images The simplest way to obtain a virtual machine image that works with OpenStack is is to - download one that someone else has already created. + download one that someone else has already created.
CirrOS (test) images CirrOS is a minimal Linux distribution that was designed for use as a test image on clouds such as OpenStack Compute. You can download a CirrOS image in various formats from the CirrOS Launchpad download page. - If your deployment uses QEMU or KVM, we recommend using the images in qcow2 + If your deployment uses QEMU or KVM, we recommend using the images in qcow2 format. The most recent 64-bit qcow2 image as of this writing is cirros-0.3.0-x86_64-disk.img @@ -32,7 +32,7 @@ Ubuntu 12.04 "Precise Pangolin" is http://cloud-images.ubuntu.com/precise/current/. Scroll to the bottom of the - page for links to images that can be downloaded directly. + page for links to images that can be downloaded directly. If your deployment uses QEMU or KVM, we recommend using the images in qcow2 format. The most recent version of the 64-bit QCOW2 image for Ubuntu 12.04 is SUSE Studio - that you can use to build openSUSE and SLES images. + that you can use to build openSUSE and SLES images. For example, Christian Berendt used openSUSE to create Disk partitions and resize root partition on boot (cloud-init) When you create a new Linux image, the first decision you will need to make is how to partition the disks. The choice of partition method can affect the resizing - functionality, as described below. + functionality, as described below. The size of the disk in a virtual machine image is determined when you initially create the image. However, OpenStack lets you launch instances with different size drives by specifying different flavors. For example, if your image was created with a 5 @@ -121,7 +121,7 @@ boot. If you are able to install the cloud-utils and cloud-init packages, we recommend that when you create your images, you create a single ext3 or ext4 partition (not - managed by LVM). + managed by LVM). Non-Xen without cloud-init/cloud-tools: LVM @@ -130,7 +130,7 @@ boot to modify the partition table. In this case, we recommend using LVM to manage your partitions. Due to a limitation in the Linux kernel (as of this writing), you cannot modify a partition table of a raw disk that has partition currently mounted, - but you can do this for LVM. + but you can do this for LVM. Your script will need to do something like the following: Detect if there is any additional space on the disk (e.g., parsing diff --git a/doc/src/docbkx/openstack-image/ubuntu-example.xml b/doc/src/docbkx/openstack-image/ubuntu-example.xml index f553563eb1..0caadadeb9 100644 --- a/doc/src/docbkx/openstack-image/ubuntu-example.xml +++ b/doc/src/docbkx/openstack-image/ubuntu-example.xml @@ -60,7 +60,7 @@ Step through the install Step through the install, using the default options. When prompted for a - username, the default (ubuntu) is fine. + username, the default (ubuntu) is fine. Partition the disks @@ -176,7 +176,7 @@ such as /etc/udev/rules.d/70-persistent-net.rules during the instance process. However, each time the image boots up, the virtual ethernet card will have a different MAC address, so this information must be deleted from the configuration - file. + file. There is a utility called virt-sysprep, that performs various cleanup tasks such as removing the MAC address references. It will clean up a virtual machine image in @@ -193,6 +193,6 @@ Image is complete The underlying image file you created with qemu-img create (e.g. /tmp/precise.qcow2) is now ready for uploading to the OpenStack - Image service. + Image service.
diff --git a/doc/src/docbkx/openstack-image/windows-example.xml b/doc/src/docbkx/openstack-image/windows-example.xml index f3e716baf5..6c58354b17 100644 --- a/doc/src/docbkx/openstack-image/windows-example.xml +++ b/doc/src/docbkx/openstack-image/windows-example.xml @@ -35,7 +35,7 @@ Jordan Rinke's OpenStack Windows resources Jordan Rinke maintains a collection of resources for managing OpenStack Windows virtual machine - guests. + guests. diff --git a/doc/src/docbkx/openstack-install/ap_configuration_files.xml b/doc/src/docbkx/openstack-install/ap_configuration_files.xml index dcb5d63211..ee65d6b2f6 100644 --- a/doc/src/docbkx/openstack-install/ap_configuration_files.xml +++ b/doc/src/docbkx/openstack-install/ap_configuration_files.xml @@ -7,11 +7,11 @@ keystone.conf The configuration file for the Identity Service is /etc/keystone/keystone.conf. + >/etc/keystone/keystone.conf. After you install the Identity Service, modify this file to use SQL for endpoint data and to replace the ADMIN key with the one created that was created during the - installation. + installation.
@@ -19,8 +19,8 @@ keystone-paste.ini The Identity Service middleware pipeline configuration file is /etc/keystone/keystone-paste.ini. - You should not need to modify this file. + class="directory">/etc/keystone/keystone-paste.ini. + You should not need to modify this file.
@@ -28,10 +28,10 @@ glance-registry.conf The Image Service registry configuration file is /etc/glance/glance-registry.conf. + >/etc/glance/glance-registry.conf.
This file stores metadata for images. After you install the Image Service, modify this - configuration file. + configuration file.
@@ -39,9 +39,9 @@ glance-registry-paste.ini The Image Service API middleware pipeline configuration file is /etc/glance/glance-registry-paste.ini. + >/etc/glance/glance-registry-paste.ini.
After you install the Image Service, modify this - configuration file. + configuration file.
@@ -50,9 +50,9 @@ glance-api.conf The configuration file for the Image API is /etc/glance/glance-api.conf. + >/etc/glance/glance-api.conf.
After you install the Image Service API, update this - file as shown in the following example. + file as shown in the following example.
@@ -60,8 +60,8 @@ glance-api-paste.ini The Image Service API middleware pipeline configuration file is /etc/glance/glance-api-paste.ini. - You should not need to modify this file. + >/etc/glance/glance-api-paste.ini.
+ You should not need to modify this file. @@ -70,7 +70,7 @@ glance-scrubber.conf An additional configuration file for the Image Service is /etc/glance/glance-scrubber.conf. + >/etc/glance/glance-scrubber.conf.
The scrubber is a utility that cleans up images that have been deleted. @@ -79,12 +79,12 @@
nova.conf The Compute configuration file is /etc/nova/nova.conf. + class="directory">/etc/nova/nova.conf. For a list of configuration options for this file, see the List of Tables on this page: OpenStack Compute Administration - Manual . + Manual . This guide assumes that the IP address of the machine that runs the Identity Service is 192.168.206.130. If the IP address @@ -108,7 +108,7 @@ api-paste.ini The middleware configuration file used by the EC2 API and OpenStack Compute API is /etc/nova/api-paste.ini. + >/etc/nova/api-paste.ini. You should not need to edit it.
@@ -117,9 +117,9 @@ This file contains the credentials used by Compute, Image, and Identity services. You can optionally store this file in - /home/openrc. + /home/openrc.
Do not source this file in the environment from where - you issue commands. + you issue commands.
Run "env | grep OS_" or "env | grep NOVA_" to view what is being used in your environment. diff --git a/doc/src/docbkx/openstack-install/bk_openstackinstallguide.xml b/doc/src/docbkx/openstack-install/bk_openstackinstallguide.xml index 5ca4dfbb32..808c8e8da8 100644 --- a/doc/src/docbkx/openstack-install/bk_openstackinstallguide.xml +++ b/doc/src/docbkx/openstack-install/bk_openstackinstallguide.xml @@ -55,7 +55,7 @@ available through Fedora 17 as well as on RHEL and derivatives through the EPEL repository. It offers explanations for the configuration choices - and provides sample configuration files.
+ and provides sample configuration files.
@@ -267,7 +267,7 @@ xlink:href="https://bugs.launchpad.net/openstack-manuals/+bug/1002294" >1002294, 1010163.
+ >1010163.
diff --git a/doc/src/docbkx/openstack-install/ch_assumptions.xml b/doc/src/docbkx/openstack-install/ch_assumptions.xml index c00fb409f8..577c8f4100 100644 --- a/doc/src/docbkx/openstack-install/ch_assumptions.xml +++ b/doc/src/docbkx/openstack-install/ch_assumptions.xml @@ -112,6 +112,6 @@ the nova-compute service. You only need one nova-network service running in a multi-node install, though if high availability for networks is required, there are additional - options. + options. diff --git a/doc/src/docbkx/openstack-install/ch_installing-openstack-overview.xml b/doc/src/docbkx/openstack-install/ch_installing-openstack-overview.xml index d6e25292db..aae2d44b0a 100644 --- a/doc/src/docbkx/openstack-install/ch_installing-openstack-overview.xml +++ b/doc/src/docbkx/openstack-install/ch_installing-openstack-overview.xml @@ -27,7 +27,7 @@ Review the most supported platforms. Red Hat Enterprise Linux, Scientific Linux, CentOS, Fedora, Debian, and Ubuntu are the most tested platforms - currently. + currently. Install the Identity Service (Keystone). @@ -52,7 +52,7 @@ Configure Compute with FlatDHCP networking using 192.168.100.0/24 as the fixed range for our guest VMs on a bridge named - br100. + br100. Create and initialize the Compute database with @@ -67,7 +67,7 @@ (Swift). - Install the OpenStack Dashboard. + Install the OpenStack Dashboard. Launch the Dashboard. @@ -112,7 +112,7 @@ packages backported on Ubuntu 12.04 LTS using the Cloud Archive as a user with root (or sudo) permission. This guide provides instructions for installing using - Ubuntu packages. + Ubuntu packages.
diff --git a/doc/src/docbkx/openstack-install/ch_instances-running.xml b/doc/src/docbkx/openstack-install/ch_instances-running.xml index 8d2a3da9d3..c5cbac2a19 100644 --- a/doc/src/docbkx/openstack-install/ch_instances-running.xml +++ b/doc/src/docbkx/openstack-install/ch_instances-running.xml @@ -10,7 +10,7 @@ (TCP, UDP, ICMP), ports, and IP addresses are permitted to access instances. Each tenant manages its own list of security groups and starts off with a security group called default. If no security group is specified upon boot, the virtual - machine will be associated with the default security group. + machine will be associated with the default security group. Security groups can be listed by the nova secgroup-list command.$ nova secgroup-list +---------+-------------+ diff --git a/doc/src/docbkx/openstack-install/ch_terminology.xml b/doc/src/docbkx/openstack-install/ch_terminology.xml index 2b60695131..b815106f8c 100644 --- a/doc/src/docbkx/openstack-install/ch_terminology.xml +++ b/doc/src/docbkx/openstack-install/ch_terminology.xml @@ -78,7 +78,7 @@ Beginning with the Cactus release, OpenStack adopted a six month release - schedule. The Havana release is scheduled for October 2013. + schedule. The Havana release is scheduled for October 2013.
@@ -250,7 +250,7 @@ object storage solutions is as a content delivery network (CDN) for hosting static web content (e.g., images, and media files), since object storage already - provides an HTTP interface. + provides an HTTP interface.
Block storage (SAN) @@ -265,12 +265,12 @@ physical disk (e.g., using the "mount" command in Linux). In OpenStack, the cinder-volume service that forms part of the Compute service provides this type of functionality, and uses iSCSI to expose - remote data as a SCSI disk that is attached to the network. + remote data as a SCSI disk that is attached to the network. Because the data is exposed as a physical device, the end-user is responsible for creating partitions and formatting the exposed disk device. In addition, in OpenStack Compute a device can only be attached to one server at a time, so block storage cannot be used to share data across virtual machine instances - concurrently. + concurrently.
File storage (NAS) @@ -281,7 +281,7 @@ attached storage). Clients access data through the operating system at the file system level: users access the data by mounting a remote file system. Examples of file storage include NFS and GlusterFS. The operating system needs to have the - appropriate client software installed to be able to access the remote file system. + appropriate client software installed to be able to access the remote file system. Currently, OpenStack Compute does not have any native support for this type of file storage inside of an instance. However, there is a You must add every physical volume that is needed for LVM on the Cinder host. You can get a list by running pvdisplay. - Each item in the filter array starts with either an + Each item in the filter array starts with either an "a" for accept, or an "r" for reject. Physical volumes that are needed on the Cinder host begin with "a". The array must end with diff --git a/doc/src/docbkx/openstack-install/compute-config-guest-network.xml b/doc/src/docbkx/openstack-install/compute-config-guest-network.xml index d2c95bceb0..4b8bfeb949 100644 --- a/doc/src/docbkx/openstack-install/compute-config-guest-network.xml +++ b/doc/src/docbkx/openstack-install/compute-config-guest-network.xml @@ -47,7 +47,7 @@ NETMASK=255.255.255.0 $ sudo yum install bridge-utils Ensure that you set up the bridge, although if you use flat_network_bridge=br100 in your nova.conf file, nova will set up the bridge for you when you run - the nova network-create command. + the nova network-create command. sudo brctl addbr br100 Lastly, restart networking to have these changes take effect. (This method is deprecated but "restart diff --git a/doc/src/docbkx/openstack-install/compute-database-mysql.xml b/doc/src/docbkx/openstack-install/compute-database-mysql.xml index bca190449b..c4bdf7b657 100644 --- a/doc/src/docbkx/openstack-install/compute-database-mysql.xml +++ b/doc/src/docbkx/openstack-install/compute-database-mysql.xml @@ -9,11 +9,11 @@ mysql -u root -p Enter the mysql root user's password when prompted. - To configure the MySQL database, create the nova database. + To configure the MySQL database, create the nova database. mysql> CREATE DATABASE nova; Create a MySQL user and password for the newly-created nova database that has full control - of the database. + of the database. mysql> GRANT ALL ON nova.* TO 'nova'@'%' IDENTIFIED BY '[YOUR_NOVADB_PASSWORD]'; mysql> GRANT ALL ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '[YOUR_NOVADB_PASSWORD]'; @@ -32,7 +32,7 @@ ERROR 1045 (28000): Access denied for user 'nova'@'localhost' (using password: YES) Thus, we create a separate User='nova' Host='localhost' entry that - will match with higher precedence. + will match with higher precedence. See the MySQL documentation on connection verification for more details on how MySQL determines which row in the user table it uses when authenticating connections. diff --git a/doc/src/docbkx/openstack-install/compute-database-postgresql.xml b/doc/src/docbkx/openstack-install/compute-database-postgresql.xml index a1d6986795..41cd411704 100644 --- a/doc/src/docbkx/openstack-install/compute-database-postgresql.xml +++ b/doc/src/docbkx/openstack-install/compute-database-postgresql.xml @@ -10,7 +10,7 @@ Start the PostgreSQL command line client by running: sudo su - postgres Enter the postgresql root user's password if prompted. - To configure the database, create the nova database. + To configure the database, create the nova database. postgres> psql postgres=# CREATE USER novadbadmin; postgres=# ALTER USER novadbadmin WITH PASSWORD '[YOUR_NOVADB_PASSWORD]'; diff --git a/doc/src/docbkx/openstack-install/compute-minimum-configuration.xml b/doc/src/docbkx/openstack-install/compute-minimum-configuration.xml index 3832e7c49a..827de32bca 100644 --- a/doc/src/docbkx/openstack-install/compute-minimum-configuration.xml +++ b/doc/src/docbkx/openstack-install/compute-minimum-configuration.xml @@ -19,11 +19,11 @@ vncserver_proxyclient_address For the above configuration options, you must use the IP address of the - specific compute host, not the cloud controller. + specific compute host, not the cloud controller. The packages automatically do these steps for a user named nova, but if you are installing as another user you should ensure that the nova.conf file should have its owner set to root:nova, and mode set to 0640, - since the file contains your MySQL server’s username and password. + since the file contains your MySQL server’s username and password. If you are installing as another user, you should set permissions correctly. This packaged install ensures that the nova user belongs to the nova group and that the .conf @@ -39,12 +39,12 @@ The hypervisor is set by editing /etc/nova/nova.conf. The hypervisor defaults to kvm, but if you are working within a VM already, switch to qemu on the libvirt_type= line. To use Xen, refer - to the overview in this book for where to install nova components. + to the overview in this book for where to install nova components. You can also configure the nova-compute service (and, for example configure a hypervisor-per-compute-node) with a separate nova-compute.conf file and then referring to nova-compute.conf in the - nova.conf file. + nova.conf file. Ensure the database connection defines your backend data store by adding a sql_connection line to nova.conf: @@ -57,7 +57,7 @@ sign at the beginning of the line. To see a listing of all possible configuration option settings, see the reference in the OpenStack Compute Administration Manual. + >the reference in the OpenStack Compute Administration Manual. auth_strategy=keystone network_manager=nova.network.manager.FlatDHCPManager diff --git a/doc/src/docbkx/openstack-install/compute-scripted-ubuntu-install.xml b/doc/src/docbkx/openstack-install/compute-scripted-ubuntu-install.xml index 4ed2817dc5..aaa1f9291f 100644 --- a/doc/src/docbkx/openstack-install/compute-scripted-ubuntu-install.xml +++ b/doc/src/docbkx/openstack-install/compute-scripted-ubuntu-install.xml @@ -25,13 +25,13 @@ installs OpenStack Compute with the Image Service and the Identity Service and OpenStack Object Storage. It offers templates for - configuration files plus data scripts. + configuration files plus data scripts. Start the install:cd devstack; ./stack.shIt takes a few minutes, we recommend reading the well-documented script while it is building to learn - more about what is going on. + more about what is going on. @@ -53,7 +53,7 @@ git clone git://github.com/maoy/devstack/tree/upstart.git The devstack repo contains a script that installs OpenStack Compute, the Image Service and the Identity Service and offers templates for configuration - files plus data scripts. + files plus data scripts. Run the upstart install script. ./upstart.sh install diff --git a/doc/src/docbkx/openstack-install/compute-sys-requirements.xml b/doc/src/docbkx/openstack-install/compute-sys-requirements.xml index 7fea38c2a8..3fe9f3381a 100644 --- a/doc/src/docbkx/openstack-install/compute-sys-requirements.xml +++ b/doc/src/docbkx/openstack-install/compute-sys-requirements.xml @@ -10,7 +10,7 @@ production deployment are as follows for the cloud controller nodes and compute nodes for Compute and the Image Service, and object, account, container, and proxy - servers for Object Storage. + servers for Object Storage. @@ -58,7 +58,7 @@ Specifically for virtualization on certain hypervisors on the node or nodes running nova-compute, you need a x86 machine with an AMD processor with SVM extensions (also called AMD-V) or an Intel processor with VT - (virtualization technology) extensions. + (virtualization technology) extensions.For XenServer and XCP refer to the XenServer installation guide and the + (Raring Ringtail).The Grizzly release of OpenStack Compute requires Fedora 16 or later. @@ -99,7 +99,7 @@ Permissions: You can install OpenStack services either as root or as a user with sudo permissions if you configure the sudoers file - to enable all the permissions. + to enable all the permissions.Network Time Protocol: You must install a time synchronization program such as NTP. For Compute, time synchronization avoids problems diff --git a/doc/src/docbkx/openstack-install/configuring-multiple-compute-nodes.xml b/doc/src/docbkx/openstack-install/configuring-multiple-compute-nodes.xml index 22e897a138..191f5318c4 100644 --- a/doc/src/docbkx/openstack-install/configuring-multiple-compute-nodes.xml +++ b/doc/src/docbkx/openstack-install/configuring-multiple-compute-nodes.xml @@ -8,7 +8,7 @@ one server, you can connect an additional nova-compute node to a cloud controller node. This configuring can be reproduced on multiple compute servers to start building a - true multi-node OpenStack Compute cluster. + true multi-node OpenStack Compute cluster.To build out and scale the Compute platform, you spread out services amongst many servers. While there are additional ways to accomplish the build-out, this section @@ -20,7 +20,7 @@ file points to the correct IP addresses for the respective services. Customize the nova.conf example below to match your environment. The CC_ADDR - is the Cloud Controller IP Address. + is the Cloud Controller IP Address. dhcpbridge_flagfile=/etc/nova/nova.conf dhcpbridge=/usr/bin/nova-dhcpbridge @@ -37,7 +37,7 @@ network_manager=nova.network.manager.FlatManager fixed_range= network/CIDR network_size=number of addresses - By default, Nova sets the bridge device based on the + By default, Nova sets the bridge device based on the setting in --flat_network_bridge. Now you can edit /etc/network/interfaces with the following template, updated with your IP information. diff --git a/doc/src/docbkx/openstack-install/example-compute-install-arch.xml b/doc/src/docbkx/openstack-install/example-compute-install-arch.xml index 76d3820d8d..8098099957 100644 --- a/doc/src/docbkx/openstack-install/example-compute-install-arch.xml +++ b/doc/src/docbkx/openstack-install/example-compute-install-arch.xml @@ -33,7 +33,7 @@ likely needed to bundle images and interfacing to the servers, but a client is not required. Use this configuration for proof of concepts or - development environments. + development environments. Multiple nodes, Compute-only: You can add more compute @@ -64,7 +64,7 @@ nova.conf is configured to point to the RabbitMQ server and the server can send messages to the server. Multiple installation architectures are possible, here is another example - illustration. + illustration. diff --git a/doc/src/docbkx/openstack-install/example-object-storage-install-arch.xml b/doc/src/docbkx/openstack-install/example-object-storage-install-arch.xml index 3e022aa5ac..6ac995909e 100644 --- a/doc/src/docbkx/openstack-install/example-object-storage-install-arch.xml +++ b/doc/src/docbkx/openstack-install/example-object-storage-install-arch.xml @@ -26,7 +26,7 @@ To increase reliability, you may want to add additional Proxy servers - for performance. + for performance.This document describes each Storage node as a separate zone in the ring. It is recommended to have a minimum of 5 zones. A zone is a group of nodes that is as isolated as possible from other nodes diff --git a/doc/src/docbkx/openstack-install/identity-config-keystone.xml b/doc/src/docbkx/openstack-install/identity-config-keystone.xml index af601e0e1e..fc8fdf9218 100644 --- a/doc/src/docbkx/openstack-install/identity-config-keystone.xml +++ b/doc/src/docbkx/openstack-install/identity-config-keystone.xml @@ -6,7 +6,7 @@ Defining Roles and Users in the Identity Service (Keystone) Before you begin, ensure that the OpenStack Compute and Image services are installed and connect all databases prior to - configuring the Identity Service endpoints. + configuring the Identity Service endpoints.Create tenants first. Here is an example set.ADMIN_TENANT=$(get_id keystone tenant-create --name=admin) SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME) @@ -41,7 +41,7 @@
Define Services and Endpoints Now that all your starter tenants, users, and roles have - been created, let's move on to endpoints. + been created, let's move on to endpoints. First add all the services you want to have the Identity service connected with. Here's an example using all the available services in this example. diff --git a/doc/src/docbkx/openstack-install/identity-install-keystone.xml b/doc/src/docbkx/openstack-install/identity-install-keystone.xml index a9fbee6468..717aee9303 100644 --- a/doc/src/docbkx/openstack-install/identity-install-keystone.xml +++ b/doc/src/docbkx/openstack-install/identity-install-keystone.xml @@ -40,7 +40,7 @@ database. mysql> CREATE DATABASE keystone; Create a MySQL user for the newly-created keystone database that has full control of the - keystone database. + keystone database. Note Choose a secure password for the keystone user and replace @@ -66,7 +66,7 @@ ERROR 1045 (28000): Access denied for user 'keystone'@'localhost' (using password: YES) Thus, we create a separate User='keystone' Host='localhost' entry - that will match with higher precedence. + that will match with higher precedence. See the MySQL documentation on connection verification for more details on how MySQL determines which row in the user table it uses when authenticating connections. @@ -122,19 +122,19 @@ either follow the manual steps or use a - script. + script.
Setting up tenants, users, and roles - manually You need to minimally define a tenant, user, and role to link the tenant and user as the most basic set of details to get other services authenticating and authorizing with the - Identity service. + Identity service. Scripted method available These are the manual, unscripted steps using the keystone client. A scripted method is available at Setting up tenants, - users, and roles - scripted. + users, and roles - scripted. Typically, you would use a username and password to authenticate with the Identity service. However, at this point @@ -173,7 +173,7 @@ | id | b5815b046cfe47bb891a7b64119e7f80 | | name | demo | +-------------+----------------------------------+ - Create a default user named admin. + Create a default user named admin. $ keystone user-create --tenant-id b5815b046cfe47bb891a7b64119e7f80 --name admin --pass secrete +----------+----------------------------------+ | Property | Value | @@ -201,7 +201,7 @@ $ keystone user-role-add --user-id a4c2d43f80a549a19864c89d759bb3fe --tenant-id b5815b046cfe47bb891a7b64119e7f80 --role-id e3d9d157cc95410ea45d23bbbc2e5c10 Create a service tenant named service. This tenant contains all the - services that we make known to the service catalog. + services that we make known to the service catalog. $ keystone tenant-create --name service --description "Service Tenant" +-------------+----------------------------------+ | Property | Value | @@ -229,7 +229,7 @@ service tenant. $ keystone user-role-add --user-id 46b2667a7807483d983e0b4037a1623b --tenant-id eb7e0c10a99446cfa14c244374549e9d --role-id e3d9d157cc95410ea45d23bbbc2e5c10 - Create a nova service user in the service tenant. + Create a nova service user in the service tenant. $ keystone user-create --tenant-id eb7e0c10a99446cfa14c244374549e9d --name nova --pass nova WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored). +----------+----------------------------------+ @@ -246,7 +246,7 @@ service tenant. $ keystone user-role-add --user-id 54b3776a8707834d983e0b4037b1345c --tenant-id eb7e0c10a99446cfa14c244374549e9d --role-id e3d9d157cc95410ea45d23bbbc2e5c10 - Create a cinder service user in the service tenant. + Create a cinder service user in the service tenant. $ keystone user-create --tenant-id eb7e0c10a99446cfa14c244374549e9d --name cinder --pass openstack WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored). +----------+----------------------------------+ @@ -263,7 +263,7 @@ tenant. $ keystone user-role-add --user-id c95bf79153874ac69b4758ebf75498a6 --tenant-id eb7e0c10a99446cfa14c244374549e9d --role-id e3d9d157cc95410ea45d23bbbc2e5c10 - Create an ec2 service user in the service tenant. + Create an ec2 service user in the service tenant. $ keystone user-create --tenant-id eb7e0c10a99446cfa14c244374549e9d --name ec2 --pass ec2 +----------+----------------------------------+ | Property | Value | @@ -280,7 +280,7 @@ $ keystone user-role-add --user-id 32e7668b8707834d983e0b4037b1345c --tenant-id eb7e0c10a99446cfa14c244374549e9d --role-id e3d9d157cc95410ea45d23bbbc2e5c10 - Create an Object Storage service user in the service tenant. + Create an Object Storage service user in the service tenant. $ keystone user-create --tenant-id eb7e0c10a99446cfa14c244374549e9d --name swift --pass swiftpass +----------+----------------------------------+ | Property | Value | @@ -427,7 +427,7 @@ driver = keystone.catalog.backends.sql.Catalog - Keystone allows some URLs to contain special variables, + Keystone allows some URLs to contain special variables, which are automatically substituted with the correct value at runtime. Some examples in this document employ the tenant_id variable, which we use when @@ -439,7 +439,7 @@ driver = keystone.catalog.backends.sql.Catalog %(varname)s notation (e.g., %(tenant_id)s) since $ is interpreted as a special character - by Unix shells. + by Unix shells.
Creating keystone services and service endpoints @@ -658,7 +658,7 @@ driver = keystone.catalog.backends.sql.Catalog xlink:href="https://github.com/openstack/keystone/blob/master/tools/sample_data.sh" >https://github.com/openstack/keystone/blob/master/tools/sample_data.sh with sample data. This script uses 127.0.0.1 for all endpoint - IP addresses. This script also defines services for you. + IP addresses. This script also defines services for you.
diff --git a/doc/src/docbkx/openstack-install/identity-verify-install.xml b/doc/src/docbkx/openstack-install/identity-verify-install.xml index 79b8d902d2..49696d29d5 100644 --- a/doc/src/docbkx/openstack-install/identity-verify-install.xml +++ b/doc/src/docbkx/openstack-install/identity-verify-install.xml @@ -45,11 +45,11 @@ You should receive a new token in response, this time including the ID - of the tenant you specified. + of the tenant you specified. This verifies that your user account has an explicitly defined role on - the specified tenant, and that the tenant exists as expected. + the specified tenant, and that the tenant exists as expected. You can also set your --os-* variables in your - environment to simplify CLI usage. + environment to simplify CLI usage. Best practice for bootstrapping the first administrative user is to use the OS_SERVICE_ENDPOINT and OS_SERVICE_TOKEN together as environment variables. @@ -84,7 +84,7 @@ +-----------+----------------------------------+ - The command returns a token and the ID of the specified tenant. + The command returns a token and the ID of the specified tenant. This verifies that you have configured your environment variables correctly. @@ -95,12 +95,12 @@ Reminder - Unlike basic authentication/authorization, which can be performed + Unlike basic authentication/authorization, which can be performed against either port 5000 or 35357, administrative commands MUST be performed against the admin API port: 35357). This means that you MUST use port 35357 in your OS_AUTH_URL or --os-auth-url setting when working with - keystone CLI. + keystone CLI. $ keystone user-list diff --git a/doc/src/docbkx/openstack-install/install-config-glance.xml b/doc/src/docbkx/openstack-install/install-config-glance.xml index d52748e4d6..cb5416d282 100644 --- a/doc/src/docbkx/openstack-install/install-config-glance.xml +++ b/doc/src/docbkx/openstack-install/install-config-glance.xml @@ -19,8 +19,8 @@ Configure the backend data store. For MySQL, create a glance MySQL database and a glance MySQL user. Grant the "glance" user full access to the glance MySQL database.Start the MySQL command line client by running: $ mysql -u root -pEnter the MySQL root user's password when prompted. - To configure the MySQL database, create the glance database. mysql> CREATE DATABASE glance; - Create a MySQL user for the newly-created glance database that has full control of the database. + To configure the MySQL database, create the glance database.mysql> CREATE DATABASE glance; + Create a MySQL user for the newly-created glance database that has full control of the database. mysql> GRANT ALL ON glance.* TO 'glance'@'%' IDENTIFIED BY '[YOUR_GLANCEDB_PASSWORD]'; mysql> GRANT ALL ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '[YOUR_GLANCEDB_PASSWORD]'; @@ -39,7 +39,7 @@ ERROR 1045 (28000): Access denied for user 'glance'@'localhost' (using password: YES) Thus, we create a separate User='glance' Host='localhost' entry that - will match with higher precedence. + will match with higher precedence. See the MySQL documentation on connection verification for more details on how MySQL determines which row in the user table it uses when authenticating connections. @@ -60,7 +60,7 @@ storage backend is in file, specified in the glance-api.conf config file in the section [DEFAULT]. - The glance-api service implements + The glance-api service implements versions 1 and 2 of the OpenStack Images API. By default, both are enabled by setting these configuration options to True in the glance-api.conf @@ -72,18 +72,18 @@ option to False in the glance-api.conf file. - In order to use the v2 API, you must copy the + In order to use the v2 API, you must copy the necessary SQL configuration from your glance-registry service to your glance-api configuration file. The following instructions assume that you want to use the v2 Image API for your installation. The v1 API is implemented on top of the glance-registry service - while the v2 API is not. + while the v2 API is not. - Most configuration is done via configuration files, with the Glance API server (and + Most configuration is done via configuration files, with the Glance API server (and possibly the Glance Registry server) using separate configuration files. When installing through an operating system package management system, sample configuration files are - installed in /etc/glance. + installed in /etc/glance. This walkthrough installs the image service using a file backend and the Identity service (Keystone) for authentication. diff --git a/doc/src/docbkx/openstack-install/install-config-proxy-node.xml b/doc/src/docbkx/openstack-install/install-config-proxy-node.xml index 445695c753..54762a9767 100644 --- a/doc/src/docbkx/openstack-install/install-config-proxy-node.xml +++ b/doc/src/docbkx/openstack-install/install-config-proxy-node.xml @@ -8,7 +8,7 @@ for the account, container, or object and routes the requests correctly. The proxy server also handles API requests. You enable account management by configuring it in the - proxy-server.conf file. + proxy-server.conf file. It is assumed that all commands are run as the root user. diff --git a/doc/src/docbkx/openstack-install/install-config-storage-nodes.xml b/doc/src/docbkx/openstack-install/install-config-storage-nodes.xml index a44d45b5b3..20ce2132ef 100644 --- a/doc/src/docbkx/openstack-install/install-config-storage-nodes.xml +++ b/doc/src/docbkx/openstack-install/install-config-storage-nodes.xml @@ -73,7 +73,7 @@ lock file = /var/lock/object.lock a local, private network. - Create the swift recon cache directory and set its + Create the swift recon cache directory and set its permissions. # mkdir -p /var/swift/recon # chown -R swift:swift /var/swift/recon diff --git a/doc/src/docbkx/openstack-install/install-nova-volume.xml b/doc/src/docbkx/openstack-install/install-nova-volume.xml index 36aa3d8991..48ef2861af 100644 --- a/doc/src/docbkx/openstack-install/install-nova-volume.xml +++ b/doc/src/docbkx/openstack-install/install-nova-volume.xml @@ -4,7 +4,7 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> Install nova-volume on the cloud controller - While nova-volume does not directly use the network, you must have networking set up for Compute prior to configuring the volumes. + While nova-volume does not directly use the network, you must have networking set up for Compute prior to configuring the volumes. On Ubuntu, install nova-volume and lvm2. $ apt-get install lvm2 nova-volume diff --git a/doc/src/docbkx/openstack-install/installing-additional-compute-nodes.xml b/doc/src/docbkx/openstack-install/installing-additional-compute-nodes.xml index b0395bc693..7a08ed8d61 100644 --- a/doc/src/docbkx/openstack-install/installing-additional-compute-nodes.xml +++ b/doc/src/docbkx/openstack-install/installing-additional-compute-nodes.xml @@ -7,7 +7,7 @@ There are many different ways to perform a multinode install of Compute in order to scale out your deployment and run more compute nodes, enabling more virtual machines to run - simultaneously. + simultaneously. Ensure that the networking on each node is configured as documented in the Pre-configuring the network section. @@ -21,7 +21,7 @@ compute node is where you configure the compute network, the networking between your instances. Learn more about high-availability for networking in the Compute Administration - manual. + manual. Because you may need to query the database from the compute node and learn more information about instances, the nova client and MySQL client or PostgresSQL client packages should diff --git a/doc/src/docbkx/openstack-install/installing-mysql.xml b/doc/src/docbkx/openstack-install/installing-mysql.xml index 57dbd3cd08..43015ec149 100644 --- a/doc/src/docbkx/openstack-install/installing-mysql.xml +++ b/doc/src/docbkx/openstack-install/installing-mysql.xml @@ -14,7 +14,7 @@ Use sed to edit /etc/mysql/my.cnf to change bind-address from localhost (127.0.0.1) to - any (0.0.0.0) and restart the mysql service, as root. + any (0.0.0.0) and restart the mysql service, as root. # sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf # service mysql restart diff --git a/doc/src/docbkx/openstack-install/object-storage-howto-install-multinode.xml b/doc/src/docbkx/openstack-install/object-storage-howto-install-multinode.xml index 11f17971b3..75d04241ee 100644 --- a/doc/src/docbkx/openstack-install/object-storage-howto-install-multinode.xml +++ b/doc/src/docbkx/openstack-install/object-storage-howto-install-multinode.xml @@ -14,7 +14,7 @@ Swift can run with other distros, but for this document we will focus on installing on Ubuntu Server, ypmv (your packaging - may vary). + may vary). Basic architecture and terms @@ -59,7 +59,7 @@ - Fewer Storage nodes can be used initially, but a minimum of 5 is recommended for a production cluster. + Fewer Storage nodes can be used initially, but a minimum of 5 is recommended for a production cluster. This document describes each Storage node as a separate zone in the ring. It is recommended to have a minimum of 5 zones. A zone @@ -84,7 +84,7 @@ daemon on the Storage nodes are configured to listen on their STORAGE_LOCAL_NET IP addresses. - Run all commands as the root user + Run all commands as the root user
General OS configuration and partitioning for each node @@ -147,11 +147,11 @@ export PROXY_LOCAL_NET_IP=10.1.2.4 - The random string of text in /etc/swift/swift.conf is used as a salt when hashing to determine mappings in the ring. + The random string of text in /etc/swift/swift.conf is used as a salt when hashing to determine mappings in the ring.
Configure the Proxy node - It is assumed that all commands are run as the root user + It is assumed that all commands are run as the root user @@ -171,7 +171,7 @@ openssl req -new -x509 -nodes -out cert.crt -keyout cert.key - If you don't create the cert files, Swift silently uses http internally rather than https. This document assumes that you have created these certs, so if you're following along step-by-step, create them. + If you don't create the cert files, Swift silently uses http internally rather than https. This document assumes that you have created these certs, so if you're following along step-by-step, create them. @@ -274,7 +274,7 @@ swift-ring-builder object.builder add z$ZONE-$STORAGE_LOCAL_NET_IP:6000/$DEVICE - Assuming there are 5 zones with 1 node per zone, ZONE should start at 1 and increment by one for each additional node. + Assuming there are 5 zones with 1 node per zone, ZONE should start at 1 and increment by one for each additional node. @@ -297,7 +297,7 @@ swift-ring-builder object.builder rebalance - Rebalancing rings can take some time. + Rebalancing rings can take some time. @@ -326,7 +326,7 @@ swift-init proxy start
Configure the Storage nodes - Swift should work on any modern filesystem that supports Extended Attributes (XATTRS). We currently recommend XFS as it demonstrated the best overall performance for the swift use case after considerable testing and benchmarking at Rackspace. It is also the only filesystem that has been thoroughly tested. These instructions assume that you are going to devote /dev/sdb1 to an XFS filesystem. + Swift should work on any modern filesystem that supports Extended Attributes (XATTRS). We currently recommend XFS as it demonstrated the best overall performance for the swift use case after considerable testing and benchmarking at Rackspace. It is also the only filesystem that has been thoroughly tested. These instructions assume that you are going to devote /dev/sdb1 to an XFS filesystem. @@ -399,7 +399,7 @@ service rsync start - The rsync daemon requires no authentication, so it should be run on a local, private network. + The rsync daemon requires no authentication, so it should be run on a local, private network. diff --git a/doc/src/docbkx/openstack-install/object-storage-install-ubuntu.xml b/doc/src/docbkx/openstack-install/object-storage-install-ubuntu.xml index 8effa27e2c..ab001ec00b 100644 --- a/doc/src/docbkx/openstack-install/object-storage-install-ubuntu.xml +++ b/doc/src/docbkx/openstack-install/object-storage-install-ubuntu.xml @@ -15,7 +15,7 @@
Before You Begin Have a copy of the Ubuntu Server installation media on - hand if you are installing on a new server. + hand if you are installing on a new server. This document demonstrates installing a cluster using the following types of nodes: diff --git a/doc/src/docbkx/openstack-install/object-storage-network-planning.xml b/doc/src/docbkx/openstack-install/object-storage-network-planning.xml index 84e0469a2f..8c6caa01a6 100644 --- a/doc/src/docbkx/openstack-install/object-storage-network-planning.xml +++ b/doc/src/docbkx/openstack-install/object-storage-network-planning.xml @@ -8,7 +8,7 @@ public IP addresses for providing access to the APIs and storage network as necessary, this section offers recommendations and required minimum sizes. Throughput of at - least 1000 Mbps is suggested. + least 1000 Mbps is suggested. This document refers to two networks. One is a Public Network for connecting to the Proxy server, and the second is a Storage Network that is not accessible from outside the cluster, to which all of the nodes are @@ -20,5 +20,5 @@ Minimum size: 8 IPs (CIDR /29) Storage Network (RFC1918 IP Range, not publicly routable): This network is utilized for all inter-server communications - within the Object Storage infrastructure. + within the Object Storage infrastructure. Recommended size: 255 IPs (CIDR /24)
diff --git a/doc/src/docbkx/openstack-install/object-storage-sys-requirements.xml b/doc/src/docbkx/openstack-install/object-storage-sys-requirements.xml index a00412e989..d28e515cb6 100644 --- a/doc/src/docbkx/openstack-install/object-storage-sys-requirements.xml +++ b/doc/src/docbkx/openstack-install/object-storage-sys-requirements.xml @@ -50,7 +50,7 @@ Memory: 8 or 12 GB RAM Network: one 1 GB Network Interface Card (NIC) -
+ @@ -69,7 +69,7 @@ Networking: 1000 Mbps are suggested. For OpenStack Object Storage, an external network should connect the outside world to the proxy servers, and the storage network is intended to be - isolated on a private network or multiple private networks. + isolated on a private network or multiple private networks.Database: For OpenStack Object Storage, a SQLite database is part of the OpenStack Object Storage container and account management process. diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/app_core.xml b/doc/src/docbkx/openstack-network-connectivity-admin/app_core.xml index 346ac7dd05..45b7793d88 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/app_core.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/app_core.xml @@ -23,7 +23,7 @@ - + @@ -51,7 +51,7 @@ Overrides any other logging options specified. See the Python logging module documentation for details on logging - configuration files. + configuration files. + log records. + stdout. + --logfile). - + @@ -141,7 +141,7 @@ - +
Hardware Recommendations
Optimized for IOPS due to tracking with SQLite databases.
Optimized for IOPS due to tracking with SQLite databases.
Object Storage proxy server Processor: dual quad coreNetwork: one 1 GB Network Interface Card (NIC)
debug=False (BoolOpt) Prints debugging output. (BoolOpt) Prints debugging output.
verbose=False
log_format=%(asctime)s %(levelname)8s @@ -64,23 +64,23 @@
log_date_format=%Y-%m-%d %H:%M:%S (StrOpt) Format string for %(asctime)s in - log records.
log_file= (StrOpt) (Optional) Name of log file to output to.  If not set, logging goes to - stdout.
log_dir= (StrOpt) (Optional) The directory to keep log files in (will be prepended to - --logfile).
use_syslog=False (BoolOpt) Uses syslog for logging. (BoolOpt) Uses syslog for logging.
syslog_log_facility=LOG_USER
signing_dir = /var/lib/neutron/keystone-signing(StrOpt) The signing directory. (StrOpt) The signing directory.
@@ -159,11 +159,11 @@ bind_host=0.0.0.0 - (StrOpt) Server listening IP. + (StrOpt) Server listening IP. bind_port=9696 - (IntOpt) Server listening port. + (IntOpt) Server listening port. api_paste_config=api-paste.ini @@ -174,7 +174,7 @@ api_extensions_path= (StrOpt) Enables custom addition to be - made to the above configuration. + made to the above configuration. policy_file=policy.json @@ -188,19 +188,19 @@ auth_strategy=keystone (StrOpt) The strategy used for authentication. The supported values are - ‘keystone’ and ‘noauth’. + ‘keystone’ and ‘noauth’. - core_plugin=neutron.plugins.sample.SamplePlugin.FakePlugin + core_plugin=neutron.plugins.sample.SamplePlugin.FakePlugin (StrOpt) The plugin to be loaded by the - service. + service. - pagination_max_limit=-1 + pagination_max_limit=-1 (StrOpt) The maximum number of items returned in a single response. A value of 'infinite', or a negative integer means no - limit. + limit. @@ -223,7 +223,7 @@ generated. The first 3 octets will remain unchanged. If the 4h octet is not 00, it will also used. The others will be - randomly generated. + randomly generated. mac_generation_retries=16 @@ -234,7 +234,7 @@ allow_bulk=True (BoolOpt) Enables or disables bulk - create/update/delete operations. + create/update/delete operations. allow_overlapping_ips=False @@ -261,7 +261,7 @@ returns the maximum limit of items as request. If the plugin didn't support that, neutron API can emulate the pagination behavior. The performance of native pagination is better than - emulated pagination. + emulated pagination. @@ -275,7 +275,7 @@ If the plugin supports native sorting, the plugin returns ordered items as request. If the plugin didn't support that, neutron API can emulate the sorting behavior. The performance of - native sorting is better than emulated sorting. + native sorting is better than emulated sorting. @@ -289,22 +289,22 @@ max_dns_nameservers=5 (IntOpt) The maximum amount of DNS nameservers that can be configured per - subnet. + subnet. max_subnet_host_routes=20 (IntOpt) The maximum amount of host routes - that can be configured per subnet. + that can be configured per subnet. - state_path=. + state_path=. (StrOpt) Top level directory for - configuration files. + configuration files. dhcp_lease_duration=120 (IntOpt) The default expiration time, in - seconds, for a DHCP address. + seconds, for a DHCP address. @@ -325,16 +325,16 @@ control_exchange=neutron (StrOpt) AMQP exchange to connect to if - using RabbitMQ or QPID. + using RabbitMQ or QPID. - rpc_backend=neutron.openstack.common.rpc.impl_kombu + rpc_backend=neutron.openstack.common.rpc.impl_kombu (StrOpt) The messaging module to use, defaults to kombu. For qpid, make - use of neutron.openstack.common.rpc.impl_qpid. + use of neutron.openstack.common.rpc.impl_qpid. rpc_thread_pool_size=64 - (IntOpt) Size of RPC thread pool. + (IntOpt) Size of RPC thread pool. rpc_conn_pool_size=30 @@ -347,11 +347,11 @@ from call or multi call. - allowed_rpc_exception_modules='neutron.openstack.common.exception', + allowed_rpc_exception_modules='neutron.openstack.common.exception', 'nova.exception' (ListOpt) Modules of exceptions that are permitted to be recreated upon receiving - exception data from an rpc call. + exception data from an rpc call. fake_rabbit=False @@ -376,17 +376,17 @@ kombu_ssl_version= (StrOpt) SSL version to use (valid only if - SSL enabled). + SSL enabled). kombu_ssl_keyfile= (StrOpt) SSL key file (valid only if SSL - enabled). + enabled). kombu_ssl_certfile= (StrOpt) SSL cert file (valid only if SSL - enabled). + enabled). kombu_ssl_ca_certs= @@ -396,37 +396,37 @@ rabbit_host=localhost (StrOpt) IP address of the RabbitMQ - installation. + installation. rabbit_password=guest - Password of the RabbitMQ server. + Password of the RabbitMQ server. rabbit_port=5672 (IntOpt) Port where RabbitMQ server is - running/listening. + running/listening. rabbit_userid=guest (StrOpt) User ID used for RabbitMQ - connections. + connections. rabbit_virtual_host=/ (StrOpt) Location of a virtual RabbitMQ - installation. + installation. rabbit_max_retries=0 (IntOpt) Maximum retries with trying to connect to RabbitMQ. The default of 0 - implies an infinite retry count. + implies an infinite retry count. rabbit_retry_interval=1 (IntOpt) RabbitMQ connection retry - interval. + interval. @@ -445,11 +445,11 @@ qpid_hostname=localhost - (StrOpt) Qpid broker hostname. + (StrOpt) Qpid broker hostname. qpid_port=5672 - (IntOpt) Qpid broker port. + (IntOpt) Qpid broker port. qpid_username= @@ -468,39 +468,39 @@ qpid_reconnect=True - (BoolOpt) Automatically reconnect. + (BoolOpt) Automatically reconnect. qpid_reconnect_timeout=0 (IntOpt) The number of seconds to wait before deciding that a reconnect attempt - has failed. + has failed. qpid_reconnect_limit=0 (IntOpt) The limit for the number of times to reconnect before considering the - connection to be failed. + connection to be failed. qpid_reconnect_interval_min=0 (IntOpt) Minimum seconds between - reconnection attempts. + reconnection attempts. qpid_reconnect_interval_max=0 (IntOpt) Maximum seconds between - reconnection attempts. + reconnection attempts. qpid_reconnect_interval=0 (IntOpt) Equivalent to setting max and min - to the same value. + to the same value. qpid_heartbeat=60 (IntOpt) Seconds between connection - keepalive heartbeats. + keepalive heartbeats. qpid_protocol=tcp @@ -509,7 +509,7 @@ qpid_tcp_nodelay=True - (BoolOpt) Disable Nagle algorithm. + (BoolOpt) Disable Nagle algorithm. @@ -526,32 +526,32 @@ - notification_driver=neutron.openstack.common.notifier.list_notifier + notification_driver=neutron.openstack.common.notifier.list_notifier (MultiStrOpt) Driver or drivers to handle sending notifications. The default is set as notifier as the DHCP agent makes use of - the notifications. + the notifications. default_notification_level=INFO (StrOpt) Default notification level for - outgoing notifications. + outgoing notifications. default_publisher_id=$host (StrOpt) Default publisher_id for outgoing - notifications. + notifications. list_notifier_drivers='neutron.openstack.common.notifier.no_op_notifier' (MultiStrOpt) List of drivers to send - notifications. + notifications. notification_topics='notifications' (ListOpt) AMQP topic used for openstack - notifications. + notifications. @@ -579,7 +579,7 @@ quota_items=network,subnet,port (ListOpt) Resource names that are - supported by the Quotas feature. + supported by the Quotas feature. default_quota=-1 @@ -590,17 +590,17 @@ quota_network=10 (IntOpt) Number of networks allowed per - tenant, and minus means unlimited. + tenant, and minus means unlimited. quota_subnet=10 (IntOpt) Number of subnets allowed per - tenant, and minus means unlimited. + tenant, and minus means unlimited. quota_port=50 (IntOpt) Number of ports allowed per - tenant, and minus means unlimited. + tenant, and minus means unlimited. @@ -612,7 +612,7 @@ For information about the Open vSwitch plugin configurations, see http://wiki.openstack.org/ConfigureOpenvswitch. + >http://wiki.openstack.org/ConfigureOpenvswitch.
Database Access by Plugin @@ -634,7 +634,7 @@ Change this line to ensure that the database values are persistent. - The sqlite is used for testing. + The sqlite is used for testing. @@ -642,13 +642,13 @@ + implies an infinite retry count. + is lost.
(IntOpt) The number of database re-connection retry times. Used if connectivity is lost with the database. -1 - implies an infinite retry count.
reconnect_interval=2 (IntOpt) The database reconnection interval in seconds. Used if connectivity - is lost.
@@ -656,7 +656,7 @@
OVS Options Specify these parameters in the - ovs section. + ovs section. These OVS options are common to the plugin and agent. @@ -676,14 +676,14 @@ <physical_network>:<vlan_min>:<vlan_max> tuples enumerating ranges of VLAN IDs on named physical networks that are available - for allocation. + for allocation. tunnel_id_ranges= (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that - are available for allocation. + are available for allocation. integration_bridge=br-int @@ -694,7 +694,7 @@ this bridge and then "patched" according to their network connectivity. Do not change this parameter unless you have a - good reason to. + good reason to. tunnel_bridge=br-tun @@ -711,13 +711,13 @@ agent's node-specific OVS bridge names. Each bridge must exist, and should have physical network # interface configured as - a port. + a port. local_ip=10.0.0.3 (StrOpt) The local IP address of this hypervisor. Used only when - tunnel_id_ranges are used. + tunnel_id_ranges are used. enable_tunneling=False @@ -726,7 +726,7 @@ Open vSwitch support its GRE tunneling feature, that is, it is not supported in the Linux kernel source tree. This applies - to both the server and agent. + to both the server and agent. @@ -752,18 +752,18 @@ neutron.conf. If False, the agent polls the database for changes. If False, you must update the relevant database settings on the agent so that it can - access the database. + access the database. polling_interval=2 (IntOpt) Agent's polling interval in - seconds. + seconds. root_helper=sudo (StrOpt) Limits the commands that can be run. See the rootwrap section for more - details. + details. @@ -803,13 +803,13 @@ (IntOpt) Database re-connection retry times. Used if connectivity is lost with the database. -1 implies an - infinite retry count. + infinite retry count. reconnect_interval=2 (IntOpt) Database reconnection interval in seconds. Used if - connectivity is lost. + connectivity is lost. @@ -833,7 +833,7 @@ <physical_network>:<vlan_min>:<vlan_max> tuples enumerating ranges of VLAN IDs on named physical networks that are - available for allocation. + available for allocation. @@ -859,7 +859,7 @@ to agent's node-specific physical network interfaces. Server uses physical network names for validation - but ignores interfaces. + but ignores interfaces. @@ -884,18 +884,18 @@ neutron.conf. If False, the agent polls the database for changes. If False, you must update the relevant database - settings on the agent so that it can access the database. + settings on the agent so that it can access the database. polling_interval=2 (IntOpt) Agent's polling interval in - seconds. + seconds. root_helper=sudo (StrOpt) Limits the commands that can be run. See the rootwrap section for - more details. + more details.   @@ -924,18 +924,18 @@ root_helper=sudo (StrOpt) Limits the commands that can be run. See the rootwrap section for - more details. + more details. dhcp_driver=neutron.agent.linux.dhcp.Dnsmasq (StrOpt) The driver used to manage the - DHCP server. + DHCP server. dhcp_lease_relay_socket=$state_path/dhcp/lease_relay (StrOpt) Location to DHCP lease relay - UNIX domain socket. + UNIX domain socket. use_namespaces=True @@ -984,7 +984,7 @@ dnsmasq_config_file= (StrOpt) Overrides the default dnsmasq - settings with this file. + settings with this file. dnsmasq_dns_server= @@ -1019,16 +1019,16 @@ root_helper=sudo (StrOpt) Limits the commands that can be run. See the rootwrap section for - more details. + more details. external_network_bridge=br-ex (StrOpt) Name of bridge used for - external network traffic. + external network traffic. use_namespaces=True - (BoolOpt) Allows overlapping IP. + (BoolOpt) Allows overlapping IP. If you run multiple agents with different IP addresses on the same host, set this parameter to @@ -1040,7 +1040,7 @@ polling_interval=3 (IntOpt) The time, in seconds, between - state poll requests. + state poll requests. metadata_port=9697 @@ -1051,12 +1051,12 @@ (StrOpt) If namespaces is disabled, the l3 agent can only configure a router whose ID matches this - parameter. + parameter. handle_internal_only_routers=True (BoolOpt) Agent should implement - routers with no gateway. + routers with no gateway. gateway_external_network_id= @@ -1082,11 +1082,11 @@ nova_metadata_ip=127.0.0.1 - (StrOpt) The IP address of the Nova metadata service. + (StrOpt) The IP address of the Nova metadata service. nova_metadata_port=8775 - (IntOpt) The TCP port of the Nova metadata service. + (IntOpt) The TCP port of the Nova metadata service. metadata_proxy_shared_secret= @@ -1100,7 +1100,7 @@ admin_user= (StrOpt) The administrative user name for OpenStack Networking, which is defined in OpenStack Identity - (keystone). + (keystone). admin_password= @@ -1113,7 +1113,7 @@ auth_url= (StrOpt) The URL used to validate tokens. For example, - `auth_protocol`://`auth_host`:`auth_port`/v2.0. + `auth_protocol`://`auth_host`:`auth_port`/v2.0. auth_region= @@ -1139,7 +1139,7 @@ interface_driver= (StrOpt) The driver used to manage the - virtual interface. + virtual interface. ovs_use_veth=False @@ -1197,25 +1197,25 @@ network_device_mtu= (StrOpt) MTU setting for device. Only - relevant if using Open vSwitch. + relevant if using Open vSwitch. meta_flavor_driver_mappings= (StrOpt). Mappings between flavors and drivers. Only relevant if using - MetaPlugin. + MetaPlugin. resync_interval=30 (IntOpt) If an exception occurs on the neutron-server service, the DHCP agent ensures that it syncs with the neutron.conf configuration. The validation about syncing occurs every - resync_interval seconds. + resync_interval seconds. diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_flat.xml b/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_flat.xml index ab6fa0ada6..c619dd9e70 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_flat.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_flat.xml @@ -5,7 +5,7 @@ This section describes how to install the OpenStack Networking service and its components for the "Use Case: Single Flat Network - ". + ". The diagram below shows the setup. For simplicity all of the nodes should have one interface for management traffic and one or more interfaces for traffic to and from VMs. The management diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_multi_dhcp_agents.xml b/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_multi_dhcp_agents.xml index 60fdd4c0a9..3c22744a94 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_multi_dhcp_agents.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_multi_dhcp_agents.xml @@ -183,7 +183,7 @@ interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
Commands in agent management and scheduler extensions The following commands require the tenant running the command to have an admin role. - Please ensure that the following environment variables are set. + Please ensure that the following environment variables are set. These are used by the various clients to access Keystone. @@ -238,7 +238,7 @@ $ neutron net-list the period defined by the option 'agent_down_time' in neutron server's neutron.conf. Otherwise the 'alive' is - 'xxx'. + 'xxx'. List the DHCP agents hosting a given network @@ -274,7 +274,7 @@ $ neutron net-list Show the agent detail - information. + information. The 'agent-list' command givs very general information about agents. To obtain the detail @@ -474,7 +474,7 @@ $ nova list disabled, we can remove the agent safely. We should remove the resources on the agent before we delete the agent - itself. To run the commands below, we need first stop the DHCP agent on HostA. + itself.To run the commands below, we need first stop the DHCP agent on HostA. $ neutron agent-update --admin-state-up False a0c1c21c-d4f4-4577-9ec7-908f2d48622d $ neutron agent-list +--------------------------------------+--------------------+-------+-------+----------------+ diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_single_router.xml b/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_single_router.xml index b144418f07..2cd02d1715 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_single_router.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/app_demo_single_router.xml @@ -11,7 +11,7 @@ xlink:href="http://docs.openstack.org/grizzly/basic-install/content/basic-install_intro.html" >Basic Install document except for the Neutron, Open-vSwitch, and Virtual Networking sections on each of the - nodes. + nodes. The Basic Install document uses gre tunnels. This document describes how to use vlans for separation instead. @@ -46,7 +46,7 @@ Runs the OpenStack Networking service, OpenStack Identity and all of the OpenStack Compute services that are - required to deploy a VM. + required to deploy a VM. The service must have at least two network interfaces. The first should be connected to the "Management Network" to @@ -58,9 +58,9 @@ Compute Runs OpenStack Compute and the OpenStack - Networking L2 agent. + Networking L2 agent. This node will not have access the - public network. + public network. The node must have at least two network interfaces. The first is used to communicate with the controller node, @@ -71,12 +71,12 @@ Network Runs OpenStack Networking L2 agent, DHCP - agent, and L3 agent. + agent, and L3 agent. This node will have access to the public network. The DHCP agent will allocate IP addresses to the VMs on the network. The L3 agent will perform NAT and enable the - VMs to access the public network. + VMs to access the public network. The node must have at least three network interfaces. The first communicates with the controller node through the @@ -396,7 +396,7 @@ echo "source novarc">>.bashrc You can repeat this step to add more interfaces for other networks that belong - to other tenants. + to other tenants. diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/bk-networking-admin-guide.xml b/doc/src/docbkx/openstack-network-connectivity-admin/bk-networking-admin-guide.xml index ee93f931ba..98c2ed4292 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/bk-networking-admin-guide.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/bk-networking-admin-guide.xml @@ -42,7 +42,7 @@ This document is for administrators - interested in running the OpenStack Networking Service. + interested in running the OpenStack Networking Service. diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_config.xml b/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_config.xml index f7f4fd6b38..7b4c8f482c 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_config.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_config.xml @@ -18,7 +18,7 @@ config contains the plugin specific flags. The plugin that is run on the service is loaded via the configuration parameter ‘core_plugin’. In some cases a plugin may have an agent that performs the actual networking. Specific configuration details can be seen in - the Appendix - Configuration File Options. + the Appendix - Configuration File Options. Most plugins require a SQL database. After installing and starting the database server, set a password for the root account and delete the anonymous accounts: $> mysql -u root @@ -101,7 +101,7 @@ mysql> grant all on <database-name>.* to '<user-name>'@'%'; All of the plugin configuration files options can be found in the Appendix - - Configuration File Options. + Configuration File Options.
DHCP Agent @@ -143,17 +143,17 @@ mysql> grant all on <database-name>.* to '<user-name>'@'%'; - All of the DHCP agent configuration options can be found in the Appendix - Configuration File Options. + All of the DHCP agent configuration options can be found in the Appendix - Configuration File Options.
Namespace - By default the DHCP agent makes use of Linux network namespaces in order to + By default the DHCP agent makes use of Linux network namespaces in order to support overlapping IP addresses. Requirements for network namespaces support are - described in the Limitation section. + described in the Limitation section. If the Linux installation does not support network namespace, you must disable using network namespace in the DHCP agent config - file (The default value of use_namespaces is True). + file (The default value of use_namespaces is True). use_namespaces = False
@@ -203,7 +203,7 @@ mysql> grant all on <database-name>.* to '<user-name>'@'%';
- The L3 agent communicates with the OpenStack Networking server via the OpenStack Networking API, so the + The L3 agent communicates with the OpenStack Networking server via the OpenStack Networking API, so the following configuration is required: OpenStack Identity authentication: @@ -219,27 +219,27 @@ admin_password $SERVICE_PASSWORD - All of the L3 agent configuration options can be found in the Appendix - Configuration File Options. + All of the L3 agent configuration options can be found in the Appendix - Configuration File Options.
Namespace - By default the L3 agent makes use of Linux network namespaces in order to support + By default the L3 agent makes use of Linux network namespaces in order to support overlapping IP addresses. Requirements for network namespaces support are described - in the Limitation section. + in the Limitation section. If the Linux installation does not support network namespace, you must disable using network namespace in the L3 agent config file - (The default value of use_namespaces is True). + (The default value of use_namespaces is True). use_namespaces = False - When use_namespaces is set to False, only one router ID can be supported per + When use_namespaces is set to False, only one router ID can be supported per node. This must be configured via the configuration variable - router_id. + router_id. # If use_namespaces is set to False then the agent can only configure one router. # This is done by setting the specific router_id. router_id = 1064ad16-36b7-4c2f-86f0-daa2bcbd6b2a - To configure it, you need to run the OpenStack Networking service and create a router, and + To configure it, you need to run the OpenStack Networking service and create a router, and then set an ID of the router created to router_id in the L3 - agent configuration file. + agent configuration file. $ neutron router-create myrouter1 Created a new router: +-----------------------+--------------------------------------+ @@ -256,7 +256,7 @@ Created a new router:
Multiple Floating IP Pools - The L3 API in OpenStack Networking supports + The L3 API in OpenStack Networking supports multiple floating IP pools. In OpenStack Networking, a floating IP pool is represented as an external network and a floating IP is allocated from a subnet @@ -267,24 +267,24 @@ Created a new router: >'gateway_external_network_id' in L3 agent configuration file indicates the external network that the L3 agent handles. You can run - multiple L3 agent instances on one host. - In addition, when you run multiple L3 agents, make sure that + In addition, when you run multiple L3 agents, make sure that handle_internal_only_routers is set to True only for one L3 agent in an OpenStack Networking deployment and set to False for all other L3 agents. Since the - default value of this parameter is True, you need to configure it carefully. - Before starting L3 agents, you need to create routers and external networks, then - update the configuration files with UUID of external networks and start L3 agents. - For the first agent, invoke it with the following l3_agent.ini where - handle_internal_only_routers is True. + default value of this parameter is True, you need to configure it carefully. + Before starting L3 agents, you need to create routers and external networks, then + update the configuration files with UUID of external networks and start L3 agents. + For the first agent, invoke it with the following l3_agent.ini where + handle_internal_only_routers is True. handle_internal_only_routers = True gateway_external_network_id = 2118b11c-011e-4fa5-a6f1-2ca34d372c35 external_network_bridge = br-ex python /opt/stack/neutron/bin/neutron-l3-agent --config-file /etc/neutron/neutron.conf --config-file=/etc/neutron/l3_agent.ini - For the second (or later) agent, invoke it with the following l3_agent.ini where - handle_internal_only_routers is False. + For the second (or later) agent, invoke it with the following l3_agent.ini where + handle_internal_only_routers is False. handle_internal_only_routers = False gateway_external_network_id = e828e54c-850a-4e74-80a8-8b79c6a285d8 external_network_bridge = br-ex-2 diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_features.xml b/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_features.xml index 5d53734ab6..d4d322d44b 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_features.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_features.xml @@ -263,7 +263,7 @@ and for details on common models of deploying OpenStack Networking L3 routers. - The L3 router provides basic NAT capabilities on + The L3 router provides basic NAT capabilities on "gateway" ports that uplink the router to external networks. This router SNATs all traffic by default, and supports "Floating IPs", which creates a static one-to-one @@ -273,7 +273,7 @@ private networks to other hosts on the external network (and often to all hosts on the Internet). Floating IPs can be allocated and then mapped from one OpenStack Networking - port to another, as needed. + port to another, as needed.
L3 API Abstractions @@ -295,14 +295,14 @@ - + + not be unique. @@ -317,7 +317,7 @@ + currently operational. @@ -356,7 +356,7 @@ - + @@ -381,7 +381,7 @@ + associated. @@ -406,7 +406,7 @@ + own.
id uuid-str generated UUID for the router. UUID for the router.
name String None Human-readable name for the router. Might - not be unique.
admin_state_up String N/A Indicates whether router is - currently operational.
tenant_id id uuid-str generated UUID for the floating IP. UUID for the floating IP.
floating_ip_addressRead-only value indicating the router that connects the external network to the associated internal port, if a port is - associated.
port_idN/A Owner of the Floating IP. Only admin users can specify a tenant_id other than its - own.
@@ -550,7 +550,7 @@ neutron floatingip-associate <floatingip-id> <internal VM port-id> < id uuid-str generated - UUID for the security group. + UUID for the security group. name @@ -574,7 +574,7 @@ neutron floatingip-associate <floatingip-id> <internal VM port-id> < N/A Owner of the security group. Only admin users can specify a tenant_id other than - their own. + their own. @@ -597,7 +597,7 @@ neutron floatingip-associate <floatingip-id> <internal VM port-id> < id uuid-str generated - UUID for the security group rule. + UUID for the security group rule. security_group_id @@ -658,7 +658,7 @@ neutron floatingip-associate <floatingip-id> <internal VM port-id> < N/A Owner of the security group rule. Only admin users can specify a tenant_id other - than its own. + than its own. @@ -779,7 +779,7 @@ neutron lb-vip-create --name myvip --protocol-port 80 --protocol HTTP --subnet-i id uuid-str generated - UUID for the QoS queue. + UUID for the QoS queue. default @@ -794,7 +794,7 @@ neutron lb-vip-create --name myvip --protocol-port 80 --protocol HTTP --subnet-i name String None - Name for QoS queue. + Name for QoS queue. min @@ -815,13 +815,13 @@ neutron lb-vip-create --name myvip --protocol-port 80 --protocol HTTP --subnet-i String untrusted by default Whether QoS marking should be - trusted or untrusted. + trusted or untrusted. dscp Integer 0 - DSCP Marking value. + DSCP Marking value. tenant_id diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_operational_features.xml b/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_operational_features.xml index dd6ca91846..a80f7fbe40 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_operational_features.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/ch_adv_operational_features.xml @@ -160,8 +160,8 @@ notification_topics = notifications_one,notifications_two
Quotas - Quota is a function to limit number of resources. You can enforce default quota for all - tenants. You will get error when you try to create more resources than the limit. + Quota is a function to limit number of resources. You can enforce default quota for all + tenants. You will get error when you try to create more resources than the limit. $ neutron net-create test_net Quota exceeded for resources: ['network'] @@ -171,14 +171,14 @@ Quota exceeded for resources: ['network']
Basic quota configuration - In OpenStack Networking default quota mechanism, all + In OpenStack Networking default quota mechanism, all tenants have a same quota value, i.e., a number of resources - that a tenant can create. This is enabled by default. - The value of quota is defined in the OpenStack Networking configuration file + that a tenant can create. This is enabled by default. + The value of quota is defined in the OpenStack Networking configuration file (neutron.conf). If you want to disable quotas for a specific resource (e.g., network, subnet, port), remove a corresponding item from quota_items. Each of the quota values in the example below is the - default value. + default value. [quotas] # resource name(s) that are supported in quota features quota_items = network,subnet,port @@ -218,7 +218,7 @@ quota_security_group_rule = 100
Per-tenant quota configuration - OpenStack Networking also supports per-tenant quota limit by quota extension API. To + OpenStack Networking also supports per-tenant quota limit by quota extension API. To enable per-tenant quota, you need to set quota_driver in neutron.conf. quota_driver = neutron.db.quota_db.DbQuotaDriver diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/ch_auth.xml b/doc/src/docbkx/openstack-network-connectivity-admin/ch_auth.xml index 7dd7416444..5028ff44ae 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/ch_auth.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/ch_auth.xml @@ -37,7 +37,7 @@ only for the network resource). The actual authorization policies enforced in OpenStack Networking might vary from deployment to - deployment. + deployment. The policy engine reads entries from the + catalog template file (default_catalog.templates). catalog.$REGION.network.publicURL = http://$IP:9696 catalog.$REGION.network.adminURL = http://$IP:9696 catalog.$REGION.network.internalURL = http://$IP:9696 @@ -84,7 +84,7 @@ catalog.$Region.network.name = Network Service $ keystone user-role-add --user_id $NEUTRON_USER --role_id $ADMIN_ROLE --tenant_id service - See the OpenStack Installation Guides for more details + See the OpenStack Installation Guides for more details about creating service entries and service users.
@@ -118,7 +118,7 @@ catalog.$Region.network.name = Network Service OpenStack Networking. Inadvertently running the nova-network process while using OpenStack Networking can cause problems, as can stale iptables rules pushed - down by previously running nova-network. + down by previously running nova-network. The next section describes nova.conf settings that are required @@ -187,8 +187,8 @@ catalog.$Region.network.name = Network Service - security_group_api : must be set to 'neutron' so that all security group - requests are proxied to the OpenStack Network Service. + security_group_api : must be set to 'neutron' so that all security group + requests are proxied to the OpenStack Network Service.
@@ -201,16 +201,16 @@ catalog.$Region.network.name = Network Service setting the following fields in nova.conf: - service_neutron_metadata_proxy: must be set to 'true', otherwise nova-api - will not properly respond to requests from the neutron-metadata-agent. + service_neutron_metadata_proxy: must be set to 'true', otherwise nova-api + will not properly respond to requests from the neutron-metadata-agent. - neutron_metadata_proxy_shared_secret: a string "password" value that + neutron_metadata_proxy_shared_secret: a string "password" value that should also be configured in the metadata_agent.ini file in order to authenticate requests made for metadata. The default value of the empty string in both files will allow metadata to function, but will be insecure if any non-trusted entities have access to the metadata APIs exposed by - nova-api. + nova-api. As a precaution, even when using neutron_metadata_proxy_shared_secret, it is recommended that @@ -218,7 +218,7 @@ catalog.$Region.network.name = Network Service tenants. Instead, run a dedicated set of nova-api instances for metadata available only on your management network. Whether a given nova-api instance exposes metadata APIs is determined by the value of 'enabled_apis' in its - nova.conf. + nova.conf.
Vif-plugging Configuration @@ -295,7 +295,7 @@ catalog.$Region.network.name = Network Service Example values for the above settings, assuming a cloud controller node running OpenStack Compute and OpenStack Networking with an IP address of 192.168.1.2 - and vif-plugging using the LibvirtHybridOVSBridgeDriver. + and vif-plugging using the LibvirtHybridOVSBridgeDriver. network_api_class=nova.network.neutronv2.api.API neutron_url=http://192.168.1.2:9696 neutron_auth_strategy=keystone diff --git a/doc/src/docbkx/openstack-network-connectivity-admin/ch_install.xml b/doc/src/docbkx/openstack-network-connectivity-admin/ch_install.xml index da468990e8..36aaee42ac 100644 --- a/doc/src/docbkx/openstack-network-connectivity-admin/ch_install.xml +++ b/doc/src/docbkx/openstack-network-connectivity-admin/ch_install.xml @@ -5,7 +5,7 @@ xml:id="ch_install"> OpenStack Networking Installation Learn how to install and get the OpenStack Networking service - up and running. + up and running.
Initial Prerequisites @@ -15,11 +15,11 @@ OpenStack Networking, we strongly recommend that you use Ubuntu 12.04 or 12.10 or Fedora 17 or 18. These platforms have OpenStack Networking packages and receive significant - testing. + testing. OpenStack Networking requires at least dnsmasq 2.59, - which contains all the necessary options. + which contains all the necessary options. @@ -89,7 +89,7 @@ agents to communicate with the main neutron-server process. If your plugin requires agents, they can use the same RPC - mechanism used by other OpenStack components like Nova. + mechanism used by other OpenStack components like Nova. To use RabbitMQ as the message bus for RPC @@ -114,7 +114,7 @@ file should be copied to and used on all hosts running neutron-server or any neutron-*-agent binaries. + >neutron-*-agent binaries.
@@ -123,7 +123,7 @@ with multiple hosts, you will need to use either tunneling or vlans to isolate traffic from multiple networks. Tunneling is easier to deploy because it does not require - configuring VLANs on network switches. + configuring VLANs on network switches. The following procedure uses tunneling: diff --git a/doc/src/docbkx/openstack-object-storage-admin/objectstorage-config-reference.xml b/doc/src/docbkx/openstack-object-storage-admin/objectstorage-config-reference.xml index 2412697add..c89e854bcb 100644 --- a/doc/src/docbkx/openstack-object-storage-admin/objectstorage-config-reference.xml +++ b/doc/src/docbkx/openstack-object-storage-admin/objectstorage-config-reference.xml @@ -142,7 +142,7 @@ can be overridden in any of the other sections. use - + The paste.deploy entry point for the object server. For most cases, this should be egg:swift#object. @@ -438,7 +438,7 @@ can be overridden in any of the other sections. When configured, the StaticWeb WSGI middleware serves container data as a static web site with index file and error file resolution and optional file listings. This mode is - normally only active for anonymous requests. + normally only active for anonymous requests. @@ -616,7 +616,7 @@ can be overridden in any of the other sections. - + @@ -962,7 +962,7 @@ can be overridden in any of the other sections. - + @@ -1210,12 +1210,12 @@ can be overridden in any of the other sections. - + - + @@ -1235,7 +1235,7 @@ can be overridden in any of the other sections. - + @@ -1328,7 +1328,7 @@ can be overridden in any of the other sections. - + diff --git a/doc/src/docbkx/openstack-object-storage-admin/objectstorageadmin.xml b/doc/src/docbkx/openstack-object-storage-admin/objectstorageadmin.xml index 2818f67a4d..e169a53f99 100644 --- a/doc/src/docbkx/openstack-object-storage-admin/objectstorageadmin.xml +++ b/doc/src/docbkx/openstack-object-storage-admin/objectstorageadmin.xml @@ -6,13 +6,13 @@ System Administration for OpenStack Object StorageBy understanding the concepts inherent to the Object Storage system you can better monitor and administer your storage - solution. + solution.
Understanding How Object Storage Works This section offers a brief overview of each concept in - administering Object Storage. + administering Object Storage. The Ring @@ -22,7 +22,7 @@ objects. When other components need to perform any operation on an object, container, or account, they need to interact with the appropriate ring to - determine its location in the cluster. + determine its location in the cluster. The Ring maintains this mapping using zones, devices, partitions, and replicas. Each partition in the ring is replicated, by default, 3 times across the @@ -60,11 +60,11 @@ For each request, it will look up the location of the account, container, or object in the ring (see below) and route the request accordingly. The public API is - also exposed through the Proxy Server. + also exposed through the Proxy Server. A large number of failures are also handled in the Proxy Server. For example, if a server is unavailable for an object PUT, it will ask the ring for a hand-off - server and route there instead. + server and route there instead. When objects are streamed to or from an object server, they are streamed directly through the proxy server to or from the user – the proxy server does not @@ -120,24 +120,24 @@ Replication is designed to keep the system in a consistent state in the face of temporary error - conditions like network outages or drive failures. + conditions like network outages or drive failures. The replication processes compare local data with each remote copy to ensure they all contain the latest version. Object replication uses a hash list to quickly compare subsections of each partition, and container and account replication use a combination of - hashes and shared high water marks. + hashes and shared high water marks. Replication updates are push based. For object replication, updating is just a matter of rsyncing files to the peer. Account and container replication push missing records over HTTP or rsync whole database - files. + files. The replicator also ensures that data is removed from the system. When an item (object, container, or account) is deleted, a tombstone is set as the latest version of the item. The replicator will see the tombstone and ensure that the item is removed from the - entire system. + entire system. Updaters @@ -156,7 +156,7 @@ container server did not update the object listing, and so the update would be queued for a later update. Container listings, therefore, may not immediately - contain the object. + contain the object. In practice, the consistency window is only as large as the frequency at which the updater runs and may not even be noticed as the proxy server will route listing @@ -175,7 +175,7 @@ will replace the bad file from another replica. If other errors are found they are logged (for example, an object’s listing can’t be found on any container - server it should be). + server it should be).
@@ -198,7 +198,7 @@ <timestamp>.data). The object's data is stored in the file, and the object's metadata is stored in the extended attributes (xattrs) of - the file. + the file.If a user deletes the object, the .data file is deleted and a <timestamp>.ts @@ -369,7 +369,7 @@ pushed to all of the servers in the cluster. For more information about building rings, running swift-ring-builder with no options will display help text - with available commands and options. + with available commands and options.
@@ -537,7 +537,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 that swift waits for processes to die and notably you can pass --run-dir flag with swift-init to set where PIDs - will be stored. + will be stored.
Logging Considerations @@ -1502,7 +1502,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 Additional Features This section aims to detail a number of additional - features in Swift and their configuration. + features in Swift and their configuration.
Health Check @@ -1545,7 +1545,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 Domain Remap Domain Remap is middleware that translates container and account parts of a domain to path parameters that - the proxy server understands. + the proxy server understands.
object-server.conf staticweb Options in the [filter:staticweb] section
use The paste.deploy entry point for the container server. For most cases, this should be egg:swift#container.
use Entry point for paste.deploy for the account server. For most cases, this should be egg:swift#account.
cert_file Path to the ssl .crt
key_file Path to the ssl .key
use Entry point for paste.deploy for the proxy server. For most cases, this should be egg:swift#proxy.
use Entry point for paste.deploy to use for auth, set to: egg:swauth#swauth to use the swauth downloaded from https://github.com/gholt/swauth
@@ -1601,7 +1601,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 CNAME Lookup is middleware that translates an unknown domain in the host header to something that ends with the configured storage_domain by looking up - the given domain's CNAME record in DNS. + the given domain's CNAME record in DNS.
Configuration options for filter:domain_remap in proxy-server.conf file
@@ -1701,7 +1701,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 uploads to specific locations in Swift. Note that changing the X-Account-Meta-Temp-URL-Key will invalidate any previously generated temporary URLs - within 60 seconds (the memcache time for the key). + within 60 seconds (the memcache time for the key).A script called swift-temp-url distributed with swift source code eases the temporary URL creation: @@ -1711,7 +1711,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 temp_url_expires=1374497657 The path returned by the above command is prefixed with swift - storage hostname. + storage hostname.
Configuration options for filter:cname_lookup in proxy-server.conf file
@@ -1750,7 +1750,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 Name Check FilterName Check is a filter that disallows any paths that contain defined forbidden characters or that exceed a - defined length. + defined length.
Configuration options for filter:tempurl in proxy-server.conf file
@@ -1848,7 +1848,7 @@ net.ipv4.netfilter.ip_conntrack_max = 262144 - @@ -2039,7 +2039,7 @@ $ swift-dispersion-report -j as a single object. It is different in that it does not rely on eventually consistent container listings to do so. Instead, a user defined manifest of the - object segments is used. + object segments is used.
Uploading the Manifest After the user has uploaded the objects to be @@ -2079,7 +2079,7 @@ $ swift-dispersion-report -j minimum size) then the user will receive a 4xx error response. If everything does match, the user will receive a 2xx response and the SLO object is - ready for downloading. + ready for downloading. Behind the scenes, on success, a json manifest generated from the user input is sent to object servers with an extra "X-Static-Large-Object: @@ -2088,11 +2088,11 @@ $ swift-dispersion-report -j appended to the existing Content-Type, where total_size is the sum of all the included segments' size_bytes. This extra parameter will be - hidden from the user. + hidden from the user. Manifest files can reference objects in separate containers, which will improve concurrent upload speed. Objects can be referenced by multiple - manifests. + manifests.
Retrieving a Large Object @@ -2103,7 +2103,7 @@ $ swift-dispersion-report -j Length no longer match the connection will drop. In this case a 409 Conflict will be logged in the proxy logs and the user will receive incomplete - results. + results. The headers from this GET or HEAD request will return the metadata attached to the manifest object itself with some exceptions: @@ -2125,7 +2125,7 @@ $ swift-dispersion-report -j Will return the actual manifest file itself. This is generated json and does not match the data sent from the original multipart-manifest=put. Use this - call for debugging. + call for debugging. When the manifest object is uploaded you are more or less guaranteed that every segment in the manifest exists and matched the specifications. @@ -2133,7 +2133,7 @@ $ swift-dispersion-report -j from breaking the SLO download by deleting/replacing a segment referenced in the manifest. It is left to the user use caution in - handling the segments. + handling the segments.
Deleting a Large Object @@ -2144,7 +2144,7 @@ $ swift-dispersion-report -j will delete all the segments referenced in the manifest and then, if successful, the manifest itself. The failure response will be similar to - the bulk delete middleware. + the bulk delete middleware.
Modifying a Large Object @@ -2167,7 +2167,7 @@ $ swift-dispersion-report -j count the bytes-used twice (for both the manifest and the segments it's referring to) in the container and account metadata which can be used - for stats purposes. + for stats purposes.
@@ -2255,7 +2255,7 @@ $ swift-dispersion-report -j delete will return an HTTPBadGateway. In both cases the response body is a json dictionary specifying in the number of files successfully deleted, not found, and a - list of the files that failed. + list of the files that failed.
@@ -2313,7 +2313,7 @@ $ swift-dispersion-report -j To add this middleware to your configuration, add the swift3 middleware in front of the auth middleware, and before any other middleware that look at swift requests - (like rate limiting). + (like rate limiting). Ensure that your proxy-server.conf file contains swift3 in the pipeline and the [filter:swift3] section, as shown @@ -2344,7 +2344,7 @@ use = egg:swift#swift3 old-style calling format, and not the hostname-based container format. Here is an example client setup using the Python boto library on a locally installed all-in-one - Swift installation. + Swift installation. connection = boto.s3.Connection( aws_access_key_id='test:tester', @@ -2379,7 +2379,7 @@ connection = boto.s3.Connection( account. ACLs are set at the container level and support lists for read and write access, which are set with the X-Container-Read and X-Container-Write header - respectively. + respectively. The swift client can be used to set the acls, using the post subcommand with the option '-r' for the read ACL, and '-w' for the write ACL. This example allows @@ -2387,7 +2387,7 @@ connection = boto.s3.Connection( $ swift post -r 'testuser' - This could instead be a list of users. + This could instead be a list of users. If you are using the StaticWeb middleware to allow OpenStack Object Storage to serve public web content, you should also be aware of the ACL syntax for @@ -2405,7 +2405,7 @@ connection = boto.s3.Connection( The command line usage for swift, the CLI tool is: swift (command) [options] [args] - Here are the available commands for swift. + Here are the available commands for swift. stat [container] [object] Displays information for the account, container, @@ -2440,7 +2440,7 @@ connection = boto.s3.Connection( -read-acl) and -w (or -write-acl) options. The -m or -meta option is allowed on all and used to define the user meta data items to set in the form - Name:Value. This option can be repeated. + Name:Value. This option can be repeated. Example: post -m Color:Blue -m Size:Large @@ -2458,7 +2458,7 @@ connection = boto.s3.Connection( [object] … Deletes everything in the account (with —all), or everything in a container, or a list of objects - depending on the args given. + depending on the args given. Example: swift -A https://auth.api.rackspacecloud.com/v1.0 -U user -K key stat @@ -2590,7 +2590,7 @@ Write ACL: list command on the “logtest” container; then for each item in the list run swift with download -o - then pipe the output into grep to filter the put requests - and finally into wc -l to count the lines. + and finally into wc -l to count the lines. $ for f in `swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing list logtest` ; \ do echo -ne “PUTS - ” ; swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing download -o - logtest $f | grep PUT | wc -l ; \ @@ -2612,7 +2612,7 @@ Write ACL: 2010-11-15. Then on each of item(s) returned run swift with the download -o - then pipe the output to grep and wc as in the previous example. The echo command is - added to display the object name. + added to display the object name. $ for f in `swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing list -p 2010-11-15 logtest` ; \ do echo -ne “$f - PUTS - ” ; swift -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing download -o - logtest $f | grep PUT | wc -l ; \ @@ -2623,7 +2623,7 @@ Write ACL: The swift utility is simple, scalable, flexible and provides useful solutions all of which are core principles of cloud computing; with the -o output - option being just one of its many features. + option being just one of its many features.
diff --git a/doc/src/docbkx/openstack-object-storage-admin/objectstoragetutorials.xml b/doc/src/docbkx/openstack-object-storage-admin/objectstoragetutorials.xml index c2aff35c6b..a0a52be5ad 100644 --- a/doc/src/docbkx/openstack-object-storage-admin/objectstoragetutorials.xml +++ b/doc/src/docbkx/openstack-object-storage-admin/objectstoragetutorials.xml @@ -16,7 +16,7 @@ In this OpenStack tutorial, we’ll walk through using an Object Storage installation to back up all your photos or videos. As the sensors on consumer-grade and pro-sumer grade cameras generate more and more megapixels, we all need a place to back our files to and - know they are safe. + know they are safe.We'll go through this tutorial in parts: @@ -37,7 +37,7 @@ $ cd /etc/swift $ openssl req -new -x509 -nodes -out cert.crt -keyout cert.key - Ensure these generated files are in /etc/swift/cert.crt and /etc/swift/cert.key. + Ensure these generated files are in /etc/swift/cert.crt and /etc/swift/cert.key. You also should configure your iptables to enable https traffic. Here's an example setup that works. Chain INPUT (policy ACCEPT 0 packets, 0 bytes) @@ -56,8 +56,8 @@ Chain FORWARD (policy ACCEPT 0 packets, 0 bytes) Chain OUTPUT (policy ACCEPT 397K packets, 1561M bytes) pkts bytes target prot opt in out source destination If you don't have access to the Object Storage installation to configure these - settings, ask your service provider to set up secure access for you. Then, edit your proxy-server.conf file to - include the following in the [DEFAULT] sections. + settings, ask your service provider to set up secure access for you.Then, edit your proxy-server.conf file to + include the following in the [DEFAULT] sections. [DEFAULT] bind_port = 443 @@ -94,7 +94,7 @@ key_file = /etc/swift/cert.key On OS X open a Terminal window and execute, $ defaults write ch.sudo.cyberduck cf.authentication.context /auth/v1.0 - On Windows open the preferences file in a text + On Windows open the preferences file in a text editor. The exact location of this file is described Unsecured Swift profile file and - double click it to import it into Cyberduck. + double click it to import it into Cyberduck. When creating a new connection select Swift (HTTP). Enter your connection details as described above. You'll need diff --git a/doc/src/docbkx/openstack-security/ch002_why-and-how-we-wrote-this-book.xml b/doc/src/docbkx/openstack-security/ch002_why-and-how-we-wrote-this-book.xml index 60b0e3de2f..a2059d0883 100644 --- a/doc/src/docbkx/openstack-security/ch002_why-and-how-we-wrote-this-book.xml +++ b/doc/src/docbkx/openstack-security/ch002_why-and-how-we-wrote-this-book.xml @@ -6,19 +6,19 @@
Objectives - Identify the security domains in OpenStack + Identify the security domains in OpenStack - Provide guidance to secure your OpenStack deployment + Provide guidance to secure your OpenStack deployment - Highlight security concerns and potential mitigations in present day OpenStack + Highlight security concerns and potential mitigations in present day OpenStack - Discuss upcoming security features + Discuss upcoming security features - To provide a community driven facility for knowledge capture and dissemination + To provide a community driven facility for knowledge capture and dissemination
diff --git a/doc/src/docbkx/openstack-security/ch005_security-domains.xml b/doc/src/docbkx/openstack-security/ch005_security-domains.xml index 5320e4e8d2..5b51b04a1e 100644 --- a/doc/src/docbkx/openstack-security/ch005_security-domains.xml +++ b/doc/src/docbkx/openstack-security/ch005_security-domains.xml @@ -7,16 +7,16 @@ A security domain comprises users, applications, servers or networks that share common trust requirements and expectations within a system. Typically they have the same authentication and authorization (AuthN/Z) requirements and users. Although you may desire to break these domains down further (we later discuss where this may be appropriate), we generally refer to four distinct security domains which form the bare minimum that is required to deploy any OpenStack cloud securely. These security domains are: - Public + Public - Guest + Guest - Management + Management - Data + Data We selected these security domains because they can be mapped independently or combined to represent the majority of the possible areas of trust within a given OpenStack deployment. For example, some deployment topologies combine both guest and data domains onto one physical network versus others, which have these networks physically separated. In each case, the cloud operator should be aware of the appropriate security concerns. Security domains should be mapped out against your specific OpenStack deployment topology. The domains and their trust requirements depend upon whether the cloud instance is public, private, or hybrid. diff --git a/doc/src/docbkx/openstack-security/ch008_system-roles-types.xml b/doc/src/docbkx/openstack-security/ch008_system-roles-types.xml index 3475e9ad19..12713b4749 100644 --- a/doc/src/docbkx/openstack-security/ch008_system-roles-types.xml +++ b/doc/src/docbkx/openstack-security/ch008_system-roles-types.xml @@ -6,10 +6,10 @@ System Roles & Types It is necessary to describe the two broadly defined types of nodes that generally make up an OpenStack installation. - Infrastructure nodes, or the nodes that run the cloud related services such as the OpenStack Identity service, the message queuing service, storage, networking, and other services required to support the operation of the cloud. + Infrastructure nodes, or the nodes that run the cloud related services such as the OpenStack Identity service, the message queuing service, storage, networking, and other services required to support the operation of the cloud. - The other type of nodes are compute, storage, or other resource nodes, those that provide storage capacity or virtual machines for your cloud. + The other type of nodes are compute, storage, or other resource nodes, those that provide storage capacity or virtual machines for your cloud. diff --git a/doc/src/docbkx/openstack-security/ch012_configuration-management.xml b/doc/src/docbkx/openstack-security/ch012_configuration-management.xml index 1497130515..3eade0f8ca 100644 --- a/doc/src/docbkx/openstack-security/ch012_configuration-management.xml +++ b/doc/src/docbkx/openstack-security/ch012_configuration-management.xml @@ -86,16 +86,16 @@ Additionally, when combined with a version control system such as Git or SVN, you can track changes to your environment over time and remediate unauthorized changes that may occur. For example, a nova.conf or other configuration file falls out of compliance with your standard, your configuration management tool will be able to revert or replace the file and bring your configuration back into a known state. Finally a configuration management tool can also be used to deploy updates; simplifying the security patch process. These tools have a broad range of capabilities that are useful in this space. The key point for securing your cloud is to choose a tool for configuration management and use it. There are many configuration management solutions, at the time of this writing there are two in the marketplace that are robust in their support of OpenStack environments: Chef and Puppet. A non-exhaustive listing of tools in this space is provided below: - Chef + Chef - Puppet + Puppet - Salt Stack + Salt Stack - Ansible + Ansible
@@ -109,29 +109,29 @@
Security Considerations - Ensure only authenticated users and backup clients have access to the backup server. + Ensure only authenticated users and backup clients have access to the backup server. - Use data encryption options for storage and transmission of backups. + Use data encryption options for storage and transmission of backups. - Use a dedicated and hardened backup server(s). The backup server's logs should be monitored daily and should be accessible by only few individuals. + Use a dedicated and hardened backup server(s). The backup server's logs should be monitored daily and should be accessible by only few individuals. - Test data recovery options regularly. One of the things that can be restored from secured backups is the images. In case of a compromise, the best practice would be to terminate running instances immediately and then relaunch the instances from the images in the secured backup repository. + Test data recovery options regularly. One of the things that can be restored from secured backups is the images. In case of a compromise, the best practice would be to terminate running instances immediately and then relaunch the instances from the images in the secured backup repository.
References - http://docs.openstack.org/folsom/openstack-ops/content/backup_and_recovery.html + http://docs.openstack.org/folsom/openstack-ops/content/backup_and_recovery.html - http://www.sans.org/reading_room/whitepapers/backup/security-considerations-enterprise-level-backups_515 + http://www.sans.org/reading_room/whitepapers/backup/security-considerations-enterprise-level-backups_515 - http://www.music-piracy.com/?p=494 (OpenStack Security Primer)  + http://www.music-piracy.com/?p=494 (OpenStack Security Primer) 
diff --git a/doc/src/docbkx/openstack-security/ch013_node-bootstrapping.xml b/doc/src/docbkx/openstack-security/ch013_node-bootstrapping.xml index d6323b12fb..8990c36cdd 100644 --- a/doc/src/docbkx/openstack-security/ch013_node-bootstrapping.xml +++ b/doc/src/docbkx/openstack-security/ch013_node-bootstrapping.xml @@ -104,13 +104,13 @@ At this point we know that the node has booted with the correct kernel and underlying components. There are many paths for hardening a given operating system deployment. The specifics on these steps are outside of the scope of this book.  We recommend following the guidance from a hardening guide specific to your operating system.  For example, the security technical implementation guides (STIG, http://iase.disa.mil/stigs/) and the NSA guides (http://www.nsa.gov/ia/mitigation_guidance/security_configuration_guides/) are useful starting places. The nature of the nodes makes additional hardening possible. We recommend the following additional steps for production nodes: - Use a read-only file system where possible.  Ensure that writeable file systems do not permit execution.  This can be handled through the mount options provided in /etc/fstab . + Use a read-only file system where possible.  Ensure that writeable file systems do not permit execution.  This can be handled through the mount options provided in /etc/fstab . - Use a mandatory access control policy to contain the instances, the node services, and any other critical processes and data on the node.  See the discussions on sVirt / SELinux and AppArmor below. + Use a mandatory access control policy to contain the instances, the node services, and any other critical processes and data on the node.  See the discussions on sVirt / SELinux and AppArmor below. - Remove any unnecessary software packages. This should result in a very stripped down installation because a compute node has a relatively small number of dependencies. + Remove any unnecessary software packages. This should result in a very stripped down installation because a compute node has a relatively small number of dependencies. Finally, the node kernel should have a mechanism to validate that the rest of the node starts in a known good state. This provides the necessary link from the boot validation process to validating the entire system. The steps for doing this will be deployment specific. As an example, a kernel module could verify a hash over the blocks comprising the file system before mounting it using dm-verity (https://code.google.com/p/cryptsetup/wiki/DMVerity). @@ -126,26 +126,26 @@ False positives occur when the security monitoring tool produces a security alert for a benign event. Due to the nature of security monitoring tools, false positives will most certainly occur from time to time. Typically a cloud administrator can tune security monitoring tools to reduce the false positives, but this may also reduce the overall detection rate at the same time. These classic trade-offs must be understood and accounted for when setting up a security monitoring system in the cloud. The selection and configuration of a host-based intrusion detection tool is highly deployment specific. We recommend starting by exploring the following open source projects which implement a variety of host-based intrusion detection and file monitoring features. - OSSEC (http://www.ossec.net/) + OSSEC (http://www.ossec.net/) - Samhain (http://la-samhna.de/samhain/) + Samhain (http://la-samhna.de/samhain/) - Tripwire (http://sourceforge.net/projects/tripwire/) + Tripwire (http://sourceforge.net/projects/tripwire/) - AIDE (http://aide.sourceforge.net/) + AIDE (http://aide.sourceforge.net/) Network intrusion detection tools complement the host-based tools. OpenStack doesn't have a specific network IDS built-in, but OpenStack's networking component, Neutron, provides a plugin mechanism to enable different technologies via the Neutron API. This plugin architecture will allow tenants to develop API extensions to insert and configure their own advanced networking services like a firewall, an intrusion detection system, or a VPN between the VMs. Similar to host-based tools, the selection and configuration of a network-based intrusion detection tool is deployment specific. Snort (http://www.snort.org/) is the leading open source networking intrusion detection tool, and a good starting place to learn more. There are a few important security considerations for network and host-based intrusion detection systems. - It is important to consider the placement of the Network IDS on the cloud (e.g., adding it to the network boundary and/or around sensitive networks). The placement depends on your network environment but make sure to monitor the impact the IDS may have on your services depending on where you choose to add it. Encrypted traffic, such as SSL, cannot generally be inspected for content by a Network IDS. However, the Network IDS may still provide some benefit in identifying anomalous unencrypted traffic on the network. + It is important to consider the placement of the Network IDS on the cloud (e.g., adding it to the network boundary and/or around sensitive networks). The placement depends on your network environment but make sure to monitor the impact the IDS may have on your services depending on where you choose to add it. Encrypted traffic, such as SSL, cannot generally be inspected for content by a Network IDS. However, the Network IDS may still provide some benefit in identifying anomalous unencrypted traffic on the network. - In some deployments it may be required to add host-based IDS on sensitive components on security domain bridges.  A host-based IDS may detect anomalous activity by compromised or unauthorized processes on the component. The IDS should transmit alert and log information on the Management network. + In some deployments it may be required to add host-based IDS on sensitive components on security domain bridges.  A host-based IDS may detect anomalous activity by compromised or unauthorized processes on the component. The IDS should transmit alert and log information on the Management network.
diff --git a/doc/src/docbkx/openstack-security/ch014_best-practices-for-operator-mode-access.xml b/doc/src/docbkx/openstack-security/ch014_best-practices-for-operator-mode-access.xml index 1b533a5347..60f9af877d 100644 --- a/doc/src/docbkx/openstack-security/ch014_best-practices-for-operator-mode-access.xml +++ b/doc/src/docbkx/openstack-security/ch014_best-practices-for-operator-mode-access.xml @@ -4,19 +4,19 @@ It is necessary for administrators to perform command and control over the cloud for various operational functions. It is important these command and control facilities are understood and secured. OpenStack provides several management interfaces for operators and tenants: - OpenStack Dashboard (Horizon) + OpenStack Dashboard (Horizon) - OpenStack API + OpenStack API - Secure Shell (SSH) + Secure Shell (SSH) - OpenStack Management Utilities (nova-manage, glance-manage, etc.) + OpenStack Management Utilities (nova-manage, glance-manage, etc.) - Out-of-Band Management Interfaces (IPMI, etc.) + Out-of-Band Management Interfaces (IPMI, etc.)
@@ -25,38 +25,38 @@
Capabilities - As a cloud administrator, the dashboard provides an overall view of the size and state of your cloud. You can create users and tenants/projects, assign users to tenant/projects and set limits on the resources available for them. + As a cloud administrator, the dashboard provides an overall view of the size and state of your cloud. You can create users and tenants/projects, assign users to tenant/projects and set limits on the resources available for them. - The dashboard provides tenant-users a self-service portal to provision their own resources within the limits set by administrators. + The dashboard provides tenant-users a self-service portal to provision their own resources within the limits set by administrators. - The dashboard provides GUI support for routers and load-balancers. For example, Horizon now implements all of the main Neutron features. + The dashboard provides GUI support for routers and load-balancers. For example, Horizon now implements all of the main Neutron features. - It is an extensible Django web application that allows easy plug-in of third-party products and services, such as billing, monitoring, and additional management tools. + It is an extensible Django web application that allows easy plug-in of third-party products and services, such as billing, monitoring, and additional management tools. - The dashboard can also be branded for service providers and other commercial vendors. + The dashboard can also be branded for service providers and other commercial vendors.
Security Considerations - Horizon requires cookies and JavaScript to be enabled in the web browser. + Horizon requires cookies and JavaScript to be enabled in the web browser. - The web server that hosts Horizon should be configured for SSL to ensure data is encrypted. + The web server that hosts Horizon should be configured for SSL to ensure data is encrypted. - Both the Horizon web service and the OpenStack API it uses to communicate with the back-end are susceptible to web attack vectors such as denial of service and must be monitored. + Both the Horizon web service and the OpenStack API it uses to communicate with the back-end are susceptible to web attack vectors such as denial of service and must be monitored. - It is now possible (though there are numerous deployment/security implications) to upload an image file directly from a user’s hard disk to Glance through Horizon. For multi-GB images it is still strongly recommended that the upload be done using the Glance CLI + It is now possible (though there are numerous deployment/security implications) to upload an image file directly from a user’s hard disk to Glance through Horizon. For multi-GB images it is still strongly recommended that the upload be done using the Glance CLI - Create and manage security groups through dashboard. The security groups allows L3-L4 packet filtering for security policies to protect virtual machines + Create and manage security groups through dashboard. The security groups allows L3-L4 packet filtering for security policies to protect virtual machines
@@ -71,20 +71,20 @@
Capabilities - To the cloud administrator the API provides an overall view of the size and state of the cloud deployment and allows the creation of users, tenants/projects, assigning users to tenants/projects and specifying resource quotas on a per tenant/project basis. + To the cloud administrator the API provides an overall view of the size and state of the cloud deployment and allows the creation of users, tenants/projects, assigning users to tenants/projects and specifying resource quotas on a per tenant/project basis. - The API provides a tenant interface for provisioning, managing, and accessing their resources. + The API provides a tenant interface for provisioning, managing, and accessing their resources.
Security Considerations - The API service should be configured for SSL to ensure data is encrypted. + The API service should be configured for SSL to ensure data is encrypted. - As a web service, OpenStack API is susceptible to familiar web site attack vectors such as denial of service attacks. + As a web service, OpenStack API is susceptible to familiar web site attack vectors such as denial of service attacks.
@@ -112,10 +112,10 @@
Security Considerations - The dedicated management utilities (*-manage) in some cases use the direct database connection. + The dedicated management utilities (*-manage) in some cases use the direct database connection. - Ensure that the .rc file which has your credential information is secured. + Ensure that the .rc file which has your credential information is secured.
@@ -131,16 +131,16 @@
Security Considerations - Use strong passwords and safeguard them, or use client-side SSL authentication. + Use strong passwords and safeguard them, or use client-side SSL authentication. - Ensure that the network interfaces are on their own private(management or a separate) network. Segregate management domains with firewalls or other network gear. + Ensure that the network interfaces are on their own private(management or a separate) network. Segregate management domains with firewalls or other network gear. - If you use a web interface to interact with the BMC/IPMI, always use the SSL interface (e.g. https or port 443). This SSL interface should NOT use self-signed certificates, as is often default, but should have trusted certificates using the correctly defined fully qualified domain names (FQDNs). + If you use a web interface to interact with the BMC/IPMI, always use the SSL interface (e.g. https or port 443). This SSL interface should NOT use self-signed certificates, as is often default, but should have trusted certificates using the correctly defined fully qualified domain names (FQDNs). - Monitor the traffic on the management network. The anomalies may be easier to track than on the busier compute nodes + Monitor the traffic on the management network. The anomalies may be easier to track than on the busier compute nodes Out of band management interfaces also often include graphical machine console access. It is often possible, although not necessarily default, that these interfaces are encrypted. Consult with your system software documentation for encrypting these interfaces. diff --git a/doc/src/docbkx/openstack-security/ch015_case-studies-management.xml b/doc/src/docbkx/openstack-security/ch015_case-studies-management.xml index 2c68e11798..1db644d3dc 100644 --- a/doc/src/docbkx/openstack-security/ch015_case-studies-management.xml +++ b/doc/src/docbkx/openstack-security/ch015_case-studies-management.xml @@ -3,16 +3,16 @@ Case Studies Earlier in this chapter we discussed typical OpenStack management interfaces and associated backplane issues. We will now approach these issues by returning to our Alice and Bob case study. Specifically, we will look into how both Alice and Bob will address: - Cloud Administration + Cloud Administration - Self Service + Self Service - Data Replication & Recovery + Data Replication & Recovery - SLA & Security Monitoring. + SLA & Security Monitoring.
diff --git a/doc/src/docbkx/openstack-security/ch017_threat-models-confidence-and-confidentiality.xml b/doc/src/docbkx/openstack-security/ch017_threat-models-confidence-and-confidentiality.xml index a6fe685216..3c34de6c8b 100644 --- a/doc/src/docbkx/openstack-security/ch017_threat-models-confidence-and-confidentiality.xml +++ b/doc/src/docbkx/openstack-security/ch017_threat-models-confidence-and-confidentiality.xml @@ -5,16 +5,16 @@ While it is commonly accepted that data over public networks should be secured using cryptographic measures, such as Secure Sockets Layer or Transport Layer Security (SSL/TLS) protocols, it is insufficient to rely on security domain separation to protect internal traffic. Using a security-in-depth approach, we recommend securing all domains with SSL/TLS, including the management domain services. It is important that should a tenant escape their VM isolation and gain access to the hypervisor or host resources, compromise an API endpoint, or any other service, they must not be able to easily inject or capture messages, commands, or otherwise affect or control management capabilities of the cloud. SSL/TLS provides the mechanisms to ensure authentication, non-repudiation, confidentiality, and integrity of user communications to the OpenStack services and between the OpenStack services themselves. Public Key Infrastructure (PKI) is the set of hardware, software, and policies to operate a secure system which provides authentication, non-repudiation, confidentiality, and integrity. The core components of PKI are: - End Entity - user, process, or system which is the subject of a certificate + End Entity - user, process, or system which is the subject of a certificate - Certification Authority (CA) - defines certificate policies, management, and issuance of certificates + Certification Authority (CA) - defines certificate policies, management, and issuance of certificates - Registration Authority (RA) - an optional system to which a CA delegates certain management functions + Registration Authority (RA) - an optional system to which a CA delegates certain management functions - Repository - Where the end entity certificates and certificate revocation lists are stored and looked up + Repository - Where the end entity certificates and certificate revocation lists are stored and looked up PKI builds the framework on which to provide encryption algorithms, cipher modes, and protocols for securing data and authentication. We strongly recommend securing all services with Public Key Infrastructure (PKI), including the use of SSL/TLS for API endpoints. It is impossible for the encryption or signing of transports or messages alone to solve all these problems. Hosts themselves must be secure and implement policy, namespaces, and other controls to protect their private credentials and keys. However, the challenges of key management and protection do not reduce the necessity of these controls, or lessen their importance. @@ -32,22 +32,22 @@ Cryptographic Algorithms, Cipher Modes, and Protocols We recommend only using TLS v1.1 or v1.2, but suggest SSLv3 and TLSv1.0 may be used for compatibility. Other SSL/TLS versions, explicitly older versions, should not be used. These older versions include SSLv1 and SSLv2. As this book does not intend to be a thorough reference on cryptography we do not wish to be prescriptive about what specific algorithms or cipher modes you should enable or disable in your OpenStack services. However, there are some authoritative references we would like to recommend for further information: - National Security Agency, Suite B Cryptography - http://www.nsa.gov/ia/programs/suiteb_cryptography/index.shtml + National Security Agency, Suite B Cryptography - http://www.nsa.gov/ia/programs/suiteb_cryptography/index.shtml - OWASP Guide to Cryptography - https://www.owasp.org/index.php/Guide_to_Cryptography + OWASP Guide to Cryptography - https://www.owasp.org/index.php/Guide_to_Cryptography - OWASP Transport Layer Protection Cheat Sheet - https://www.owasp.org/index.php/Transport_Layer_Protection_Cheat_Sheet + OWASP Transport Layer Protection Cheat Sheet - https://www.owasp.org/index.php/Transport_Layer_Protection_Cheat_Sheet - http://www.ieee-security.org/TC/SP2013/papers/4977a511.pdf + http://www.ieee-security.org/TC/SP2013/papers/4977a511.pdf - http://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + http://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf - http://www.openssl.org/docs/fips/fipsnotes.html + http://www.openssl.org/docs/fips/fipsnotes.html
@@ -55,25 +55,25 @@ Summary It is important to note given the many components that make up OpenStack and the different deployment and implementation choices, care should be taken to look at each component to ensure the appropriate configuration of SSL certificates, keys, and CAs. The following services will be discussed in later sections of this book where SSL and PKI is available (either natively or possible via SSL proxy): - Compute API endpoints + Compute API endpoints - Identity API endpoints + Identity API endpoints - Networking API endpoints + Networking API endpoints - Storage API endpoints + Storage API endpoints - Messaging server + Messaging server - Database server + Database server - Dashboard + Dashboard Throughout this book we will use SSL as shorthand to refer to these recommendations for SSL/TLS protocols. diff --git a/doc/src/docbkx/openstack-security/ch020_ssl-everywhere.xml b/doc/src/docbkx/openstack-security/ch020_ssl-everywhere.xml index 5e3e330b2b..7ef4e82588 100644 --- a/doc/src/docbkx/openstack-security/ch020_ssl-everywhere.xml +++ b/doc/src/docbkx/openstack-security/ch020_ssl-everywhere.xml @@ -4,19 +4,19 @@ OpenStack endpoints are HTTP services providing APIs to both end-users on public networks and to other OpenStack services within the same deployment operating over the management network. It is highly recommended these requests, both those internal and external, operate over SSL. In order for API requests to be encrypted by SSL it's necessary to position the API services behind a proxy that will establish and terminate SSL sessions. The following table offers a non-exhaustive list of software services that can proxy SSL traffic for API requests: - Pound (http://www.apsis.ch/pound) + Pound (http://www.apsis.ch/pound) - Stud (https://github.coapm/bumptech/stud) + Stud (https://github.coapm/bumptech/stud) - nginx (http://nginx.org/) + nginx (http://nginx.org/) - Apache httpd (http://www.apache.org/) + Apache httpd (http://www.apache.org/) - Hardware appliance SSL acceleration proxies + Hardware appliance SSL acceleration proxies It is important to be mindful of the size of requests that will be processed by any chosen SSL proxy. diff --git a/doc/src/docbkx/openstack-security/ch024_authentication.xml b/doc/src/docbkx/openstack-security/ch024_authentication.xml index f93d6da933..dd9b3fa92c 100644 --- a/doc/src/docbkx/openstack-security/ch024_authentication.xml +++ b/doc/src/docbkx/openstack-security/ch024_authentication.xml @@ -36,13 +36,13 @@ Organizations may desire to implement external authentication for compatibility with existing authentication services or to enforce stronger authentication policy requirements. Although passwords are the most common form of authentication, they can be compromised through numerous methods, including keystroke logging and password compromise. External authentication services can provide alternative forms of authentication that minimize the risk from weak passwords. These include: - Password Policy Enforcement: Requires user passwords to conform to minimum standards for length, diversity of characters, expiration, or failed login attempts. + Password Policy Enforcement: Requires user passwords to conform to minimum standards for length, diversity of characters, expiration, or failed login attempts. - Multi-factor authentication: The authentication service requires the user to provide information based on something they have (e.g., a one-time password token or X.509 certificate) and something they know (e.g., a password). + Multi-factor authentication: The authentication service requires the user to provide information based on something they have (e.g., a one-time password token or X.509 certificate) and something they know (e.g., a password). - Kerberos + Kerberos
diff --git a/doc/src/docbkx/openstack-security/ch026_compute.xml b/doc/src/docbkx/openstack-security/ch026_compute.xml index c3b93bc5a3..f75f03a9e9 100644 --- a/doc/src/docbkx/openstack-security/ch026_compute.xml +++ b/doc/src/docbkx/openstack-security/ch026_compute.xml @@ -12,20 +12,20 @@
Capabilities - The OpenStack Dashboard (Horizon) can provide a VNC console for instances directly on the web page using the HTML5 noVNC client.  This requires the nova-novncproxy service to bridge from the public network to the management network. + The OpenStack Dashboard (Horizon) can provide a VNC console for instances directly on the web page using the HTML5 noVNC client.  This requires the nova-novncproxy service to bridge from the public network to the management network. - The nova command line utility can return a URL for the VNC console for access by the nova Java VNC client. This requires the nova-xvpvncproxy service to bridge from the public network to the management network. + The nova command line utility can return a URL for the VNC console for access by the nova Java VNC client. This requires the nova-xvpvncproxy service to bridge from the public network to the management network.
Security Considerations - The nova-novncproxy and nova-xvpvncproxy services by default open public-facing ports that are token authenticated. + The nova-novncproxy and nova-xvpvncproxy services by default open public-facing ports that are token authenticated. - By default, the remote desktop traffic is not encrypted. Havana is expected to have VNC connections secured by Kerberos. + By default, the remote desktop traffic is not encrypted. Havana is expected to have VNC connections secured by Kerberos.
@@ -39,30 +39,30 @@
Capabilities - SPICE is supported by the OpenStack Dashboard (Horizon) directly on the instance web page.  This requires the nova-spicehtml5proxy service. + SPICE is supported by the OpenStack Dashboard (Horizon) directly on the instance web page.  This requires the nova-spicehtml5proxy service. - The nova command line utility can return a URL for SPICE console for access by a SPICE-html client. + The nova command line utility can return a URL for SPICE console for access by a SPICE-html client.
Limitations - Although SPICE has many advantages over VNC, the spice-html5 browser integration currently doesn't really allow admins to take advantage of any of the benefits. To take advantage of SPICE features like multi-monitor, USB pass through, etc. admins are recommended to use a standalone SPICE client within the Management Network. + Although SPICE has many advantages over VNC, the spice-html5 browser integration currently doesn't really allow admins to take advantage of any of the benefits. To take advantage of SPICE features like multi-monitor, USB pass through, etc. admins are recommended to use a standalone SPICE client within the Management Network.
Security Considerations - The nova-spicehtml5proxy service by default opens public-facing ports that are token authenticated. + The nova-spicehtml5proxy service by default opens public-facing ports that are token authenticated. - The functionality and integration are still evolving. We will access the features in the next release and make recommendations. + The functionality and integration are still evolving. We will access the features in the next release and make recommendations. - As is the case for VNC, at this time we recommend using SPICE from the management network in addition to limiting use to few individuals. + As is the case for VNC, at this time we recommend using SPICE from the management network in addition to limiting use to few individuals.
diff --git a/doc/src/docbkx/openstack-security/ch032_networking-best-practices.xml b/doc/src/docbkx/openstack-security/ch032_networking-best-practices.xml index 846bfcd72b..a20c6ae68e 100644 --- a/doc/src/docbkx/openstack-security/ch032_networking-best-practices.xml +++ b/doc/src/docbkx/openstack-security/ch032_networking-best-practices.xml @@ -11,7 +11,7 @@ VLANs are realized as packets on a specific physical network containing IEEE 802.1Q headers with a specific VLAN ID (VID) field value. VLAN networks sharing the same physical network are isolated from each other at L2, and can even have overlapping IP address spaces. Each distinct physical network supporting VLAN networks is treated as a separate VLAN trunk, with a distinct space of VID values. Valid VID values are 1 through 4094. VLAN configuration complexity depends on your OpenStack design requirements. In order to allow OpenStack Networking to efficiently use VLANs, you must allocate a VLAN range (one for each tenant) and turn each compute node physical switch port into a VLAN trunk port. - NOTE: If you intend for your network to support more than 4094 tenants VLAN is probably not the correct option for you as multiple 'hacks' are required to extend the VLAN tags to more than 4094 tenants. + NOTE: If you intend for your network to support more than 4094 tenants VLAN is probably not the correct option for you as multiple 'hacks' are required to extend the VLAN tags to more than 4094 tenants.
@@ -41,16 +41,16 @@ Quality of Service (QoS) The ability to set QoS on the virtual interface ports of tenant instances is a current deficiency for OpenStack Networking. The application of QoS for traffic shaping and rate-limiting at the physical network edge device is insufficient due to the dynamic nature of workloads in an OpenStack deployment and can not be leveraged in the traditional way.  QoS-as-a-Service (QoSaaS) is currently in development for the OpenStack Networking Havana release as an experimental feature. QoSaaS is planning to provide the following services: - Traffic shaping via DSCP markings + Traffic shaping via DSCP markings - Rate-limiting on a per port/network/tenant basis. + Rate-limiting on a per port/network/tenant basis. - Port mirroring (via open source or third-party plugins) + Port mirroring (via open source or third-party plugins) - Flow analysis (via open source or third-party plugins) + Flow analysis (via open source or third-party plugins) Tenant traffic port mirroring or Network Flow monitoring is currently not an exposed feature in OpenStack Networking. There are third-party plugin extensions that do provide Port Mirroring on a per port/network/tenant basis. If Open vSwitch is used on the networking hypervisor, it is possible to enable sFlow and port mirroring, however it will require some operational effort to implement. @@ -75,14 +75,14 @@ Networking Services Limitations There a few current known limitations of OpenStack Networking: - Overlapping IP addressesLinux network namespaces are required on nodes running neutron-l3-agent or neutron-dhcp-agent if overlapping IP's are in use. In order to support overlapping IP addresses, the OpenStack Networking DHCP and L3 agents use Linux network namespaces by default. If the host does not support namespaces then the neutron-l3-agent and neutron-dhcp-agent should be run on different hosts. + Overlapping IP addressesLinux network namespaces are required on nodes running neutron-l3-agent or neutron-dhcp-agent if overlapping IP's are in use. In order to support overlapping IP addresses, the OpenStack Networking DHCP and L3 agents use Linux network namespaces by default. If the host does not support namespaces then the neutron-l3-agent and neutron-dhcp-agent should be run on different hosts. A further limitation of the L3 Agent is the support a single logical router without namespaces. - Multi-Host DHCP-agentOpenStack Networking now supports running multiple l3-agent and dhcp-agents with load being split across those agents, but the tight coupling of that scheduling with the location of the VM is not supported in Grizzly. The Havana release is expected to include an exact replacement for the --multi_host flag in nova-network. + Multi-Host DHCP-agentOpenStack Networking now supports running multiple l3-agent and dhcp-agents with load being split across those agents, but the tight coupling of that scheduling with the location of the VM is not supported in Grizzly. The Havana release is expected to include an exact replacement for the --multi_host flag in nova-network. - No IPv6 Support for L3 agentsThe neutron-l3-agent, used by many plugins to implement L3 forwarding, supports only IPv4 forwarding. + No IPv6 Support for L3 agentsThe neutron-l3-agent, used by many plugins to implement L3 forwarding, supports only IPv4 forwarding.
diff --git a/doc/src/docbkx/openstack-security/ch033_securing-neutron-services.xml b/doc/src/docbkx/openstack-security/ch033_securing-neutron-services.xml index accfb347c7..2553f80f68 100644 --- a/doc/src/docbkx/openstack-security/ch033_securing-neutron-services.xml +++ b/doc/src/docbkx/openstack-security/ch033_securing-neutron-services.xml @@ -4,19 +4,19 @@ In order to secure OpenStack Networking, an understanding of the workflow process for tenant instance creation needs to be mapped to security domains.  There are four main services that interact with OpenStack Networking. In a typical OpenStack deployment these services map to the following security domains: - Openstack Dashboard: Public and Management + Openstack Dashboard: Public and Management - Openstack Identity: Management + Openstack Identity: Management - Openstack Compute Node: Management and Guest + Openstack Compute Node: Management and Guest - Openstack Network Node: Management, Guest, and possibly Public depending upon neutron-plugin in use. + Openstack Network Node: Management, Guest, and possibly Public depending upon neutron-plugin in use. - SDN Services Node: Management, Guest and possibly + SDN Services Node: Management, Guest and possibly Public depending upon product used. @@ -27,7 +27,7 @@
- In order to isolate sensitive data communication between the OpenStack Networking services and other OpenStack core services, we strongly recommend that these communication channels be configured to only allow communications over an isolated management network. + In order to isolate sensitive data communication between the OpenStack Networking services and other OpenStack core services, we strongly recommend that these communication channels be configured to only allow communications over an isolated management network.
Openstack Networking Service Configuration
diff --git a/doc/src/docbkx/openstack-security/ch034_tenant-secure-networking-best-practices.xml b/doc/src/docbkx/openstack-security/ch034_tenant-secure-networking-best-practices.xml index 2a9b5ab3f8..88dfdf9859 100644 --- a/doc/src/docbkx/openstack-security/ch034_tenant-secure-networking-best-practices.xml +++ b/doc/src/docbkx/openstack-security/ch034_tenant-secure-networking-best-practices.xml @@ -16,10 +16,10 @@ Security Groups The OpenStack Networking Service provides security group functionality using a mechanism that is more flexible and powerful than the security group capabilities built into OpenStack Compute. Thus, when using OpenStack Networking, nova.conf should always disable built-in security groups and proxy all security group calls to the OpenStack Networking API. Failure to do so will result in conflicting security policies being simultaneously applied by both services. To proxy security groups to OpenStack Networking, use the following configuration values: - firewall_driver : must be set to 'nova.virt.firewall.NoopFirewallDriver' so that nova-compute does not perform iptables-based filtering itself. + firewall_driver : must be set to 'nova.virt.firewall.NoopFirewallDriver' so that nova-compute does not perform iptables-based filtering itself. - security_group_api : must be set to 'neutron' so that all security group requests are proxied to the OpenStack Network Service. + security_group_api : must be set to 'neutron' so that all security group requests are proxied to the OpenStack Network Service. Security groups and security group rules allow administrators and tenants the ability to specify the type of traffic and direction (ingress/egress) that is allowed to pass through a virtual interface port. A security group is a container for security group rules. When a virtual interface port is created in OpenStack Networking it is associated with a security group. If a security group is not specified, the port will be associated with a 'default' security group. By default this group will drop all ingress traffic and allow all egress. Rules can be added to this group in order to change the behaviour. diff --git a/doc/src/docbkx/openstack-security/ch037_risks.xml b/doc/src/docbkx/openstack-security/ch037_risks.xml index 41b74421ca..db3f014df3 100644 --- a/doc/src/docbkx/openstack-security/ch037_risks.xml +++ b/doc/src/docbkx/openstack-security/ch037_risks.xml @@ -3,13 +3,13 @@ Message Queuing Architecture Inter-process communication within OpenStack is facilitated via message queueing services. Today, three messaging service backends are supported: - RabbitMQ + RabbitMQ - Qpid + Qpid - ZeroMQ + ZeroMQ Both RabbitMQ and Qpid are Advanced Message Queuing Protocol (AMQP) frameworks which provide message queues for peer-to-peer communication. Queue implementations are typically deployed as centralized or decentralized pool of queue servers. ZeroMQ differs by communicating directly using TCP sockets between peers. diff --git a/doc/src/docbkx/openstack-security/ch038_transport-security.xml b/doc/src/docbkx/openstack-security/ch038_transport-security.xml index 31ac923b69..aaa26c876d 100644 --- a/doc/src/docbkx/openstack-security/ch038_transport-security.xml +++ b/doc/src/docbkx/openstack-security/ch038_transport-security.xml @@ -24,10 +24,10 @@ Note, the 'tcp_listeners' option is set to '[]' to prevent it from listening an on non-SSL port. 'ssl_listeners' option should be restricted to only listen on the management network for the services. For more information on RabbitMQ SSL configuration see: - RabbitMQ Configuration - http://www.rabbitmq.com/configure.html + RabbitMQ Configuration - http://www.rabbitmq.com/configure.html - RabbitMQ SSL - http://www.rabbitmq.com/ssl.html + RabbitMQ SSL - http://www.rabbitmq.com/ssl.html
@@ -35,7 +35,7 @@ Qpid Server SSL Configuration The Apache Foundation has a messaging security guide for Qpid. See: - Apache Qpid SSL - http://qpid.apache.org/books/0.22/AMQP-Messaging-Broker-CPP-Book/html/chap-Messaging_User_Guide-Security.html#sect-Messaging_User_Guide-Security-Encryption_using_SSL + Apache Qpid SSL - http://qpid.apache.org/books/0.22/AMQP-Messaging-Broker-CPP-Book/html/chap-Messaging_User_Guide-Security.html#sect-Messaging_User_Guide-Security-Encryption_using_SSL
@@ -60,13 +60,13 @@ rabbitmqctl set_permissions compute01 ".*"".*"".*" RabbitMQ Access Control - http://www.rabbitmq.com/access-control.html - RabbitMQ Authentication - http://www.rabbitmq.com/authentication.html + RabbitMQ Authentication - http://www.rabbitmq.com/authentication.html - RabbitMQ Plugins - http://www.rabbitmq.com/plugins.html + RabbitMQ Plugins - http://www.rabbitmq.com/plugins.html - RabbitMQ SASL External Auth - http://hg.rabbitmq.com/rabbitmq-auth-mechanism-ssl/file/rabbitmq_v3_1_3/README + RabbitMQ SASL External Auth - http://hg.rabbitmq.com/rabbitmq-auth-mechanism-ssl/file/rabbitmq_v3_1_3/README
@@ -89,10 +89,10 @@ kombu_ssl_ca_certs=/etc/ssl/cacert.pemAuthentication Configuration Example - QpidFor configuration information see: - Apache Qpid Authentication - http://qpid.apache.org/books/0.22/AMQP-Messaging-Broker-CPP-Book/html/chap-Messaging_User_Guide-Security.html#sect-Messaging_User_Guide-Security-User_Authentication + Apache Qpid Authentication - http://qpid.apache.org/books/0.22/AMQP-Messaging-Broker-CPP-Book/html/chap-Messaging_User_Guide-Security.html#sect-Messaging_User_Guide-Security-User_Authentication - Apache Qpid Authorization - http://qpid.apache.org/books/0.22/AMQP-Messaging-Broker-CPP-Book/html/chap-Messaging_User_Guide-Security.html#sect-Messaging_User_Guide-Security-Authorization + Apache Qpid Authorization - http://qpid.apache.org/books/0.22/AMQP-Messaging-Broker-CPP-Book/html/chap-Messaging_User_Guide-Security.html#sect-Messaging_User_Guide-Security-Authorization diff --git a/doc/src/docbkx/openstack-security/ch041_database-backend-considerations.xml b/doc/src/docbkx/openstack-security/ch041_database-backend-considerations.xml index 48cca10989..02d3b0242f 100644 --- a/doc/src/docbkx/openstack-security/ch041_database-backend-considerations.xml +++ b/doc/src/docbkx/openstack-security/ch041_database-backend-considerations.xml @@ -9,21 +9,21 @@ Those deploying MySQL or PostgreSQL are advised to refer to existing security guidance. Some references are listed below:MySQL: - OWASP MySQL Hardening - https://www.owasp.org/index.php/OWASP_Backend_Security_Project_MySQL_Hardening + OWASP MySQL Hardening - https://www.owasp.org/index.php/OWASP_Backend_Security_Project_MySQL_Hardening - http://dev.mysql.com/doc/refman/5.5/en/pluggable-authentication.html + http://dev.mysql.com/doc/refman/5.5/en/pluggable-authentication.html - Security in MySQL - http://downloads.mysql.com/docs/mysql-security-excerpt-5.1-en.pdf + Security in MySQL - http://downloads.mysql.com/docs/mysql-security-excerpt-5.1-en.pdf PostgreSQL: - OWASP PostgreSQL Hardening - https://www.owasp.org/index.php/OWASP_Backend_Security_Project_PostgreSQL_Hardening + OWASP PostgreSQL Hardening - https://www.owasp.org/index.php/OWASP_Backend_Security_Project_PostgreSQL_Hardening - Total security in a PostgreSQL database - http://www.ibm.com/developerworks/opensource/library/os-postgresecurity + Total security in a PostgreSQL database - http://www.ibm.com/developerworks/opensource/library/os-postgresecurity diff --git a/doc/src/docbkx/openstack-security/ch042_database-overview.xml b/doc/src/docbkx/openstack-security/ch042_database-overview.xml index c0e825adb6..f5a32b58d5 100644 --- a/doc/src/docbkx/openstack-security/ch042_database-overview.xml +++ b/doc/src/docbkx/openstack-security/ch042_database-overview.xml @@ -22,13 +22,13 @@ The compute nodes are the least trusted of the services in OpenStack because they host tenant instances. The nova-conductor service has been introduced to serve as a database proxy, acting as an intermediary between the compute nodes and the database. We discuss its ramifications later in this chapter.We strongly recommend: - All database communications be isolated to a management network + All database communications be isolated to a management network Securing communications using SSL - Creating unique database user accounts per OpenStack service endpoint (illustrated below) + Creating unique database user accounts per OpenStack service endpoint (illustrated below) diff --git a/doc/src/docbkx/openstack-security/ch043_database-transport-security.xml b/doc/src/docbkx/openstack-security/ch043_database-transport-security.xml index f5bda6434b..c45d84f03c 100644 --- a/doc/src/docbkx/openstack-security/ch043_database-transport-security.xml +++ b/doc/src/docbkx/openstack-security/ch043_database-transport-security.xml @@ -24,7 +24,7 @@ listen_addresses = <ip address or hostname of management network interface&g Database Transport In addition to restricting database communications to the management network, we also strongly recommend that the cloud administrator configure their database backend to require SSL. Using SSL for the database client connections  protects the communications from tampering and eavesdropping. As will be discussed in the next section, using SSL also provides the framework for doing database user authentication via X.509 certificates (commonly referred to as PKI). Below is guidance on how SSL is typically configured for the two popular database backends MySQL and PostgreSQL. - NOTE: When installing the certificate and key files, ensure that the file permissions are restricted, for example chmod 0600, and the ownership is restricted to the database daemon user to prevent unauthorized access by other processes and users on the database server. + NOTE: When installing the certificate and key files, ensure that the file permissions are restricted, for example chmod 0600, and the ownership is restricted to the database daemon user to prevent unauthorized access by other processes and users on the database server.
@@ -51,16 +51,16 @@ ssl = true ssl-ciphers = 'cipher:list' The server certificate, key, and certificate authority (CA) files should be placed in the $PGDATA directory in the following files: - $PGDATA/server.crt - Server certificate + $PGDATA/server.crt - Server certificate - $PGDATA/server.key - Private key corresponding to server.crt + $PGDATA/server.key - Private key corresponding to server.crt - $PGDATA/root.crt - Trusted certificate authorities + $PGDATA/root.crt - Trusted certificate authorities - $PGDATA/root.crl - Certificate revocation list + $PGDATA/root.crl - Certificate revocation list
diff --git a/doc/src/docbkx/openstack-security/ch046_data-residency.xml b/doc/src/docbkx/openstack-security/ch046_data-residency.xml index 7ada328edf..0fea08b30e 100644 --- a/doc/src/docbkx/openstack-security/ch046_data-residency.xml +++ b/doc/src/docbkx/openstack-security/ch046_data-residency.xml @@ -3,10 +3,10 @@ Data Privacy Concerns OpenStack is designed to support multitenancy and those tenants will most probably have different data requirements. As a cloud builder and operator you need to ensure your OpenStack environment can address various data privacy concerns and regulations. In this chapter we will address the following topics around Data Privacy as it pertains to OpenStack implementations: - Data Residency + Data Residency - Data Disposal + Data Disposal
@@ -15,48 +15,48 @@ Numerous OpenStack services maintain data and metadata belonging to tenants or reference tenant information. Tenant data stored in an OpenStack cloud may include the following items: - Swift objects + Swift objects - Compute instance ephemeral filesystem storage + Compute instance ephemeral filesystem storage - Compute instance memory + Compute instance memory - Cinder volume data + Cinder volume data - Public keys for Compute Access + Public keys for Compute Access - Virtual Machine Images in Glance + Virtual Machine Images in Glance - Machine snapshots + Machine snapshots - Data passed to OpenStack Compute's configuration-drive extension + Data passed to OpenStack Compute's configuration-drive extension Metadata stored by an OpenStack cloud includes the following non-exhaustive items: - Organization name + Organization name - User's "Real Name" + User's "Real Name" - Number or size of running instances, buckets, objects, volumes, and other quota-related items + Number or size of running instances, buckets, objects, volumes, and other quota-related items - Number of hours running instances or storing data + Number of hours running instances or storing data - IP addresses of users + IP addresses of users - Internally generated private keys for compute image bundling + Internally generated private keys for compute image bundling
@@ -64,38 +64,38 @@ Data Disposal OpenStack operators should strive to provide a certain level of tenant data disposal assurance. Best practices suggest that the operator sanitize cloud system media (digital and non-digital) prior to disposal, release out of organization control or release for reuse. Sanitization methods should implement an appropriate level of strength and integrity given the specific security domain and sensitivity of the information.
- "Sanitization is the process used to remove information from system media such that there is reasonable assurance that the information cannot be retrieved or reconstructed. Sanitization techniques, including clearing, purging, and destroying media information, prevent the disclosure of organizational information to unauthorized individuals when such media is reused or released for disposal." [NIST Special Publication 800-53 Revision 3] + "Sanitization is the process used to remove information from system media such that there is reasonable assurance that the information cannot be retrieved or reconstructed. Sanitization techniques, including clearing, purging, and destroying media information, prevent the disclosure of organizational information to unauthorized individuals when such media is reused or released for disposal." [NIST Special Publication 800-53 Revision 3]
General data disposal and sanitization guidelines as adopted from NIST recommended security controls. Cloud Operators should: - Track, document and verify media sanitization and disposal actions. + Track, document and verify media sanitization and disposal actions. - Test sanitation equipment and procedures to verify + Test sanitation equipment and procedures to verify proper performance. - Sanitize portable, removable storage devices prior to connecting such devices to the cloud infrastructure. + Sanitize portable, removable storage devices prior to connecting such devices to the cloud infrastructure. - Destroy cloud system media that cannot be sanitized. + Destroy cloud system media that cannot be sanitized. In an OpenStack deployment you will need to address the following: - Secure data erasure + Secure data erasure - Instance memory scrubbing + Instance memory scrubbing - Cinder volume data + Cinder volume data - Compute instance ephemeral storage + Compute instance ephemeral storage - Bare metal server sanitization + Bare metal server sanitization
diff --git a/doc/src/docbkx/openstack-security/ch047_data-encryption.xml b/doc/src/docbkx/openstack-security/ch047_data-encryption.xml index fd3cdc43f2..e53983adcb 100644 --- a/doc/src/docbkx/openstack-security/ch047_data-encryption.xml +++ b/doc/src/docbkx/openstack-security/ch047_data-encryption.xml @@ -7,13 +7,13 @@ Often, data encryption relates positively to the ability to reliably destroy tenant and per-instance data, simply by throwing away the keys. It should be noted that in doing so, it becomes of great importance to destroy those keys in a reliable and secure manner. Opportunities to encrypt data for users are present: - Swift objects + Swift objects - Cinder volumes & Instance Ephemeral Filesystems + Cinder volumes & Instance Ephemeral Filesystems - Network data + Network data
diff --git a/doc/src/docbkx/openstack-security/ch048_key-management.xml b/doc/src/docbkx/openstack-security/ch048_key-management.xml index 21c906f0be..de0cc81d76 100644 --- a/doc/src/docbkx/openstack-security/ch048_key-management.xml +++ b/doc/src/docbkx/openstack-security/ch048_key-management.xml @@ -8,10 +8,10 @@
References: - Barbican - https://github.com/cloudkeep/barbican + Barbican - https://github.com/cloudkeep/barbican - KMIP -https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip + KMIP -https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip
diff --git a/doc/src/docbkx/openstack-security/ch049_case-studies-tenant-data.xml b/doc/src/docbkx/openstack-security/ch049_case-studies-tenant-data.xml index 7a29d8377c..378c13e93d 100644 --- a/doc/src/docbkx/openstack-security/ch049_case-studies-tenant-data.xml +++ b/doc/src/docbkx/openstack-security/ch049_case-studies-tenant-data.xml @@ -12,22 +12,22 @@ of data destruction activities. Alice does this using the following: - Establishing procedures to sanitize tenant data when a program or project ends - Track the destruction of both the tenant data and metadata via ticketing in a CMDB - For Volume storage: - Physical Server Issues - To provide secure ephemeral instance storage, Alice implements qcow2 files on an encrypted filesystem. + Establishing procedures to sanitize tenant data when a program or project ends + Track the destruction of both the tenant data and metadata via ticketing in a CMDB + For Volume storage: + Physical Server Issues + To provide secure ephemeral instance storage, Alice implements qcow2 files on an encrypted filesystem.
Bob's Public Cloud As stated during the introduction to Bob's case study, tenant privacy is of an extremely high priority. In addition to the requirements and actions Bob will take to isolate tenants from one another at the infrastructure layer, Bob also needs to provide assurances for tenant data privacy. Bob does this using the following: - Establishing procedures to sanitize customer data when a customer churns - Track the destruction of both the customer data and metadata via ticketing in a CMDB - For Volume storage: - Physical Server Issues - To provide secure ephemeral instance storage, Bob implements qcow2 files on an encrypted filesystems. + Establishing procedures to sanitize customer data when a customer churns + Track the destruction of both the customer data and metadata via ticketing in a CMDB + For Volume storage: + Physical Server Issues + To provide secure ephemeral instance storage, Bob implements qcow2 files on an encrypted filesystems.
diff --git a/doc/src/docbkx/openstack-security/ch051_vss-intro.xml b/doc/src/docbkx/openstack-security/ch051_vss-intro.xml index 913d028f9a..31753d5c00 100644 --- a/doc/src/docbkx/openstack-security/ch051_vss-intro.xml +++ b/doc/src/docbkx/openstack-security/ch051_vss-intro.xml @@ -12,48 +12,48 @@ Selection Criteria As part of your hypervisor selection process, you will need to consider a number of important factors to help increase your security posture. Specifically, we will be looking into the following areas: - Team Expertise + Team Expertise - Product or Project maturity + Product or Project maturity - Certifications, Attestations + Certifications, Attestations - Additional Security Features + Additional Security Features - Hypervisor vs. Baremetal + Hypervisor vs. Baremetal - Hardware Concerns + Hardware Concerns - Common Criteria + Common Criteria Additionally, the following security-related criteria are highly encouraged to be evaluated when selecting a hypervisor for OpenStack deployments: - Has the hypervisor undergone Common Criteria certification? If so, to what levels? + Has the hypervisor undergone Common Criteria certification? If so, to what levels? - Is the underlying cryptography certified by a third-party? + Is the underlying cryptography certified by a third-party? Team Expertise Most likely, the most important aspect in hypervisor selection is the expertise of your staff in managing and maintaining a particular hypervisor platform. The more familiar your team is with a given product, its configuration, and its eccentricities, the less likely will there be configuration mistakes. Additionally, having staff expertise spread across an organization on a given hypervisor will increase availability of your systems, allow for developing a segregation of duties, and mitigate problems in the event that a team member is unavailable.
Product or Project Maturity The maturity of a given hypervisor product or project is critical to your security posture as well. Product maturity will have a number of effects once you have deployed your cloud, in the context of this security guide we are interested in the following: - Availability of expertise + Availability of expertise Active developer and user communities - Timeliness and Availability of updates + Timeliness and Availability of updates - Incidence response + Incidence response One of the biggest indicators of a hypervisor's maturity is the size and vibrancy of the community that surrounds it. As this concerns security, the quality of the community will affect the availability of expertise should you need additional cloud operators. It is also a sign of how widely deployed the hypervisor is, in turn leading to the battle readiness of any reference architectures and best practices. @@ -68,18 +68,18 @@ Common Criteria is an internationally standardized software evaluation process, used by governments and commercial companies to validate software technologies perform as advertised. In the government sector, NSTISSP No. 11 mandates that U.S. Government agencies only procure software which has been Common Criteria certified, a policy which has been in place since July 2002. It should be specifically noted that OpenStack has not undergone Common Criteria certification, however many of the available hypervisors have.  In addition to validating a technologies capabilities, the Common Criteria process evaluates how technologies are developed. - How is source code management performed? + How is source code management performed? - How are users granted access to build systems? + How are users granted access to build systems? - Is the technology cryptographically signed before distribution? + Is the technology cryptographically signed before distribution? The KVM hypervisor has been Common Criteria certified through the U.S. Government and commercial distributions, which have been validated to separate the runtime environment of virtual machines from each other, providing foundational technology to enforce instance isolation. In addition to virtual machine isolation, KVM has been Common Criteria certified to
- "provide system-inherent separation mechanisms to the resources of virtual machines. This separation ensures that large software component used for virtualizing and simulating devices executing for each virtual machine cannot interfere with each other. Using the SELinux multi-category mechanism, the virtualization and simulation software instances are isolated. The virtual machine management framework configures SELinux multi-category settings transparently to the administrator" + "provide system-inherent separation mechanisms to the resources of virtual machines. This separation ensures that large software component used for virtualizing and simulating devices executing for each virtual machine cannot interfere with each other. Using the SELinux multi-category mechanism, the virtualization and simulation software instances are isolated. The virtual machine management framework configures SELinux multi-category settings transparently to the administrator"
While many hypervisor vendors, such as Red Hat, Microsoft, and VMWare have achieved Common Criteria Certification their underlying certified feature set differs. It is recommended to evaluate vendor claims to ensure they minimally satisfy the following requirements: @@ -89,19 +89,19 @@
- + - + - + - + @@ -117,15 +117,15 @@ - + - + - + @@ -144,42 +144,42 @@ - - - - + + + + - + - + - + - + - + - + @@ -212,7 +212,7 @@ FIPS 140-2In the United States the National Institute of Science and Technology (NIST) certifies cryptographic algorithms through a process known the Cryptographic Module Validation Program. NIST certifies algorithms for conformance against Federal Information Processing Standard 140-2 (FIPS 140-2), which ensures:
- Products validated as conforming to FIPS 140-2 are accepted by the Federal agencies of both countries [United States and Canada] for the protection of sensitive information (United States) or Designated Information (Canada). The goal of the CMVP is to promote the use of validated cryptographic modules and provide Federal agencies with a security metric to use in procuring equipment containing validated cryptographic modules. + Products validated as conforming to FIPS 140-2 are accepted by the Federal agencies of both countries [United States and Canada] for the protection of sensitive information (United States) or Designated Information (Canada). The goal of the CMVP is to promote the use of validated cryptographic modules and provide Federal agencies with a security metric to use in procuring equipment containing validated cryptographic modules.
When evaluating base hypervisor technologies, consider if the hypervisor has been certified against FIPS 140-2. Not only is conformance against FIPS 140-2 mandated per U.S. Government policy, formal certification indicates that a given implementation of a cryptographic algorithm has been reviewed for conformance against module specification, cryptographic module ports and interfaces; roles, services, and authentication; finite state model; physical security; operational environment; cryptographic key management; electromagnetic interference/electromagnetic compatibility (EMI/EMC); self-tests; design assurance; and mitigation of other attacks. @@ -242,7 +242,7 @@ - + diff --git a/doc/src/docbkx/openstack-security/ch052_devices.xml b/doc/src/docbkx/openstack-security/ch052_devices.xml index 71d38ce9bb..cd14df6903 100644 --- a/doc/src/docbkx/openstack-security/ch052_devices.xml +++ b/doc/src/docbkx/openstack-security/ch052_devices.xml @@ -7,10 +7,10 @@ Many hypervisors offer a functionality known as PCI passthrough. This allows an instance to have direct access to a piece of hardware on the node. For example, this could be used to allow instances to access video cards offering the compute unified device architecture (CUDA) for high performance computation. This feature carries two types of security risks: direct memory access and hardware infection.Direct memory access (DMA) is a feature that permits certain hardware devices to access arbitrary physical memory addresses in the host computer. Often video cards have this capability. However, an instance should not be given arbitrary physical memory access because this would give it full view of both the host system and other instances running on the same node. Hardware vendors use an input/output memory management unit (IOMMU) to manage DMA access in these situations. Therefore, cloud architects should ensure that the hypervisor is configured to utilize this hardware feature. - KVM: http://www.linux-kvm.org/page/How_to_assign_devices_with_VT-d_in_KVM + KVM: http://www.linux-kvm.org/page/How_to_assign_devices_with_VT-d_in_KVM - Xen: http://wiki.xen.org/wiki/VTd_HowTo + Xen: http://wiki.xen.org/wiki/VTd_HowTo Note: The IOMMU feature is marketed as VT-d by Intel and AMD-Vi by AMD. @@ -74,10 +74,10 @@ CFLAGS="-arch x86_64 -fstack-protector-all -Wstack-protector --param ssp-buffer- We recommend testing your QEMU executable file after it is compiled to ensure that the compiler hardening worked properly.Most cloud deployments will not want to build software such as QEMU by hand. It is better to use packaging to ensure that the process is repeatable and to ensure that the end result can be easily deployed throughout the cloud. The references below provide some additional details on applying compiler hardening options to existing packages. - DEB packages: http://wiki.debian.org/HardeningWalkthrough  + DEB packages: http://wiki.debian.org/HardeningWalkthrough  - RPM packages: http://fedoraproject.org/wiki/How_to_create_an_RPM_package + RPM packages: http://fedoraproject.org/wiki/How_to_create_an_RPM_package diff --git a/doc/src/docbkx/openstack-security/ch055_security-services-for-instances.xml b/doc/src/docbkx/openstack-security/ch055_security-services-for-instances.xml index 2879c4a180..843e859ad9 100644 --- a/doc/src/docbkx/openstack-security/ch055_security-services-for-instances.xml +++ b/doc/src/docbkx/openstack-security/ch055_security-services-for-instances.xml @@ -5,16 +5,16 @@ Deployers or users of OpenStack with strong security requirements may want to consider deploying these technologies. Not all are applicable in every situation, indeed in some cases technologies may be ruled out for use in a cloud because of prescriptive business requirements. Similarly some technologies inspect instance data such as run state which may be undesirable to the users of the system.In this chapter we explore these technologies and describe the situations where they can be used to enhance security for instances or underlying instances. We also seek to highlight where privacy concerns may exist. These include data pass through, introspection, or providing a source of entropy. In this section we highlight the following additional security services: - Entropy to Instances + Entropy to Instances - Scheduling Instances to Nodes + Scheduling Instances to Nodes - Trusted Images + Trusted Images - Instance Migrations + Instance Migrations
@@ -130,11 +130,11 @@ gpg --verify SHA256SUMS.gpg SHA256SUMSsha256sum -c SHA256SUMS 2>&1 | grep Instance Migrations OpenStack and the underlying virtualization layers provide for the Live Migration of images between OpenStack nodes allowing you to seamlessly perform rolling upgrades of your OpenStack Compute nodes without instance downtime. However, Live Migrations also come with their fair share of risk. To understand the risks involved, it is important to first understand how a live migration works. The following are the high level steps preformed during a live migration. - Start instance on destination host - Transfer memory - Stop the guest & sync disks - Transfer state - Start the guest + Start instance on destination host + Transfer memory + Stop the guest & sync disks + Transfer state + Start the guest
Live Migration Risks @@ -157,13 +157,13 @@ gpg --verify SHA256SUMS.gpg SHA256SUMSsha256sum -c SHA256SUMS 2>&1 | grep Live Migration Mitigations There are several methods to mitigate some of the risk associated with live migrations, the following list details some of these: - Disable Live Migration + Disable Live Migration - Isolated Migration Network + Isolated Migration Network - Encrypted Live Migration + Encrypted Live Migration
diff --git a/doc/src/docbkx/openstack-security/ch058_forensicsincident-response.xml b/doc/src/docbkx/openstack-security/ch058_forensicsincident-response.xml index e6b7033eb8..0929d6cc8d 100644 --- a/doc/src/docbkx/openstack-security/ch058_forensicsincident-response.xml +++ b/doc/src/docbkx/openstack-security/ch058_forensicsincident-response.xml @@ -13,27 +13,27 @@ In the case of a OpenStack cloud instance, we need to monitor the hardware, the OpenStack services, and the cloud resource usage. The last stems from wanting to be elastic, to scale to the dynamic needs of the users. Here are a few important use cases to consider when implementing log aggregation, analysis and monitoring. These use cases can be implemented and monitored through various commercial and open source tools, homegrown scripts, etc. These tools and scripts can generate events that can then be sent to the administrators through email or integrated dashboard. It is important to consider additional use cases that may apply to your specific network and what you may consider anomalous behavior. - Detecting the absence of log generation is an event of high value. Such an event would indicate a service failure or even an intruder who has temporarily switched off logging or modified the log level to hide their tracks. + Detecting the absence of log generation is an event of high value. Such an event would indicate a service failure or even an intruder who has temporarily switched off logging or modified the log level to hide their tracks. - Application events such as start and/or stop that were unscheduled would also be events to monitor and examine for possible security implications. + Application events such as start and/or stop that were unscheduled would also be events to monitor and examine for possible security implications. - OS events on the OpenStack service machines such as user logins, restarts also provide valuable insight into use/misuse + OS events on the OpenStack service machines such as user logins, restarts also provide valuable insight into use/misuse - Being able to detect the load on the OpenStack servers also enables responding by way of introducing additional servers for load balancing to ensure high availability. + Being able to detect the load on the OpenStack servers also enables responding by way of introducing additional servers for load balancing to ensure high availability. - Other events that are actionable are networking bridges going down, ip tables being flushed on nova compute nodes and consequential loss of access to instances resulting in unhappy customers.  + Other events that are actionable are networking bridges going down, ip tables being flushed on nova compute nodes and consequential loss of access to instances resulting in unhappy customers.  - To reduce security risks from orphan instances on a user/tenant/domain deletion in the Identity service there is discussion to generate notifications in the system and have OpenStack components respond to these events as appropriate such as terminating instances, disconnecting attached volumes, reclaiming CPU and storage resources etc.  + To reduce security risks from orphan instances on a user/tenant/domain deletion in the Identity service there is discussion to generate notifications in the system and have OpenStack components respond to these events as appropriate such as terminating instances, disconnecting attached volumes, reclaiming CPU and storage resources etc.  A cloud will host many virtual instances, and monitoring these instances goes beyond hardware monitoring and log files which may just contain CRUD events. diff --git a/doc/src/docbkx/openstack-security/ch061_compliance-overview.xml b/doc/src/docbkx/openstack-security/ch061_compliance-overview.xml index 1a129e25c0..fe15cb4f72 100644 --- a/doc/src/docbkx/openstack-security/ch061_compliance-overview.xml +++ b/doc/src/docbkx/openstack-security/ch061_compliance-overview.xml @@ -3,38 +3,38 @@ Compliance Overview An OpenStack deployment may require compliance activities for many purposes, such as regulatory and legal requirements, customer need, privacy considerations, and security best practices. Compliance, when done correctly, unifies and strengthens the other security topics discussed in this guide. This chapter has several objectives: - Review common security principles. + Review common security principles. - Discuss common control frameworks and certification resources to achieve industry certifications or regulator attestations. + Discuss common control frameworks and certification resources to achieve industry certifications or regulator attestations. - Act as a reference for auditors when evaluating OpenStack deployments. + Act as a reference for auditors when evaluating OpenStack deployments. - Introduce privacy considerations specific to OpenStack and cloud environments. + Introduce privacy considerations specific to OpenStack and cloud environments.
Security Principles Industry standard security principles provide a baseline for compliance certifications and attestations. If these principles are considered and referenced throughout an OpenStack deployment, certification activities may be simplified. - Layered Defenses: Identify where risks exist in a cloud architecture and apply controls to mitigate the risks. In areas of significant concern, layered defences provide multiple complementary controls to further mitigate risk. For example, to ensure adequate isolation between cloud tenants, we recommend hardening QEMU, using a hypervisor with SELinux support, enforcing mandatory access control policies, and reducing the overall attack surface. The foundational principle is to harden an area of concern with multiple layers of defense such that if any one layer is compromised, other layers will exist to offer protection and minimize exposure. + Layered Defenses: Identify where risks exist in a cloud architecture and apply controls to mitigate the risks. In areas of significant concern, layered defences provide multiple complementary controls to further mitigate risk. For example, to ensure adequate isolation between cloud tenants, we recommend hardening QEMU, using a hypervisor with SELinux support, enforcing mandatory access control policies, and reducing the overall attack surface. The foundational principle is to harden an area of concern with multiple layers of defense such that if any one layer is compromised, other layers will exist to offer protection and minimize exposure. - Fail Securely: In the case of failure, systems should be configured to fail into a closed secure state. For example, SSL certificate verification should fail closed by severing the network connection if the CNAME doesn't match the server's DNS name. Software often fails open in this situation, allowing the connection to proceed without a CNAME match, which is less secure and not recommended. + Fail Securely: In the case of failure, systems should be configured to fail into a closed secure state. For example, SSL certificate verification should fail closed by severing the network connection if the CNAME doesn't match the server's DNS name. Software often fails open in this situation, allowing the connection to proceed without a CNAME match, which is less secure and not recommended. - Least Privilege: Only the minimum level of access for users and system services is granted. This access is based upon role, responsibility and job function. This security principal of least privilege is written into several international government security policies, such as NIST 800-53 Section AC-6 within the United States.  + Least Privilege: Only the minimum level of access for users and system services is granted. This access is based upon role, responsibility and job function. This security principal of least privilege is written into several international government security policies, such as NIST 800-53 Section AC-6 within the United States.  - Compartmentalize: Systems should be segregated in a such way that if one machine, or system-level service, is compromised the security of the other systems will remain intact. Practically, the enablement and proper usage of SELinux helps accomplish this goal. + Compartmentalize: Systems should be segregated in a such way that if one machine, or system-level service, is compromised the security of the other systems will remain intact. Practically, the enablement and proper usage of SELinux helps accomplish this goal. - Promote Privacy: The amount of information that can be gathered about a system and its users should be minimized. + Promote Privacy: The amount of information that can be gathered about a system and its users should be minimized. - Logging Capability: Appropriate logging is implemented to monitor for unauthorized use, incident response and forensics. It is highly recommended that selected audit subsystems be Common Criteria certified, which provides non-attestable event records in most countries. + Logging Capability: Appropriate logging is implemented to monitor for unauthorized use, incident response and forensics. It is highly recommended that selected audit subsystems be Common Criteria certified, which provides non-attestable event records in most countries.
diff --git a/doc/src/docbkx/openstack-security/ch062_audit-guidance.xml b/doc/src/docbkx/openstack-security/ch062_audit-guidance.xml index 3daacdd58b..fd2bbef317 100644 --- a/doc/src/docbkx/openstack-security/ch062_audit-guidance.xml +++ b/doc/src/docbkx/openstack-security/ch062_audit-guidance.xml @@ -28,13 +28,13 @@ Prepare for External Audit Once the internal audit results look good, it is time to prepare for an external audit. There are several key actions to take at this stage, these are outlined below: - Maintain good records from your internal audit. These will prove useful during the external audit so you can be prepared to answer questions about mapping the compliance controls to a particular deployment. + Maintain good records from your internal audit. These will prove useful during the external audit so you can be prepared to answer questions about mapping the compliance controls to a particular deployment. - Deploy automated testing tools to ensure that the cloud remains compliant over time. + Deploy automated testing tools to ensure that the cloud remains compliant over time. - Select an auditor. + Select an auditor. Selecting an auditor can be challenging. Ideally, you are looking for someone with experience in cloud compliance audits. OpenStack experience is another big plus. Often it is best to consult with people who have been through this process for referrals. Cost can vary greatly depending on the scope of the engagement and the audit firm considered. diff --git a/doc/src/docbkx/openstack-security/ch064_certifications-compliance-statements.xml b/doc/src/docbkx/openstack-security/ch064_certifications-compliance-statements.xml index cd563f3163..a6dbd3d8b9 100644 --- a/doc/src/docbkx/openstack-security/ch064_certifications-compliance-statements.xml +++ b/doc/src/docbkx/openstack-security/ch064_certifications-compliance-statements.xml @@ -11,10 +11,10 @@ Service Organization Controls (SOC) criteria are defined by the American Institute of Certified Public Accountants (AICPA, http://www.aicpa.org/). SOC controls assess relevant financial statements and assertions of a service provider, such as compliance with the Sarbanes-Oxley Act. SOC 1 is a replacement for Statement on Auditing Standards No. 70 (SAS 70) Type II report. These controls commonly include physical data centers in scope. There are two types of SOC 1 reports: - Type 1 – report on the fairness of the presentation of management’s description of the service organization’s system and the suitability of the design of the controls to achieve the related control objectives included in the description as of a specified date. + Type 1 – report on the fairness of the presentation of management’s description of the service organization’s system and the suitability of the design of the controls to achieve the related control objectives included in the description as of a specified date. - Type 2 – report on the fairness of the presentation of management’s description of the service organization’s system and the suitability of the design and operating effectiveness of the controls to achieve the related control objectives included in the description throughout a specified period + Type 2 – report on the fairness of the presentation of management’s description of the service organization’s system and the suitability of the design and operating effectiveness of the controls to achieve the related control objectives included in the description throughout a specified period For more details see http://www.aicpa.org/InterestAreas/FRC/AssuranceAdvisoryServices/Pages/AICPASOC1Report.aspx. @@ -24,10 +24,10 @@ Service Organization Controls (SOC) 2 is a self attestation of controls that affect the security, availability, and processing integrity of the systems a service organization uses to process users' data and the confidentiality and privacy of information processed by these system. Examples of users are those responsible for governance of the service organization; customers of the service organization; regulators; business partners; suppliers and others who have an understanding of the service organization and its controls. There are two types of SOC 2 reports: - Type 1 – report on the fairness of the presentation of management’s description of the service organization’s system and the suitability of the design of the controls to achieve the related control objectives included in the description as of a specified date.   + Type 1 – report on the fairness of the presentation of management’s description of the service organization’s system and the suitability of the design of the controls to achieve the related control objectives included in the description as of a specified date.   - Type 2 – report on the fairness of the presentation of management’s description of the service organization’s system and the suitability of the design and operating effectiveness of the controls to achieve the related control objectives included in the description throughout a specified period. + Type 2 – report on the fairness of the presentation of management’s description of the service organization’s system and the suitability of the design and operating effectiveness of the controls to achieve the related control objectives included in the description throughout a specified period. For more details see http://www.aicpa.org/InterestAreas/FRC/AssuranceAdvisoryServices/Pages/AICPASOC2Report.aspx. @@ -79,10 +79,10 @@ - Control SelectionBased upon system security category as defined in FIPS 199, an organization utilizes FIPS 200 to identify specific security control requirements for the information system. For example, if a system is categorized as “moderate” a requirement may be introduced to mandate “secure passwords.” + Control SelectionBased upon system security category as defined in FIPS 199, an organization utilizes FIPS 200 to identify specific security control requirements for the information system. For example, if a system is categorized as “moderate” a requirement may be introduced to mandate “secure passwords.” - Control TailoringOnce system security controls are identified, an OpenStack architect will utilize NIST 800-53 to extract tailored control selection, e.g. specification of what constitutes a “secure password.” + Control TailoringOnce system security controls are identified, an OpenStack architect will utilize NIST 800-53 to extract tailored control selection, e.g. specification of what constitutes a “secure password.”
diff --git a/doc/src/docbkx/openstack-security/ch065_privacy.xml b/doc/src/docbkx/openstack-security/ch065_privacy.xml index 7b6b65a0e4..98f1c68adc 100644 --- a/doc/src/docbkx/openstack-security/ch065_privacy.xml +++ b/doc/src/docbkx/openstack-security/ch065_privacy.xml @@ -5,7 +5,7 @@ An OpenStack deployment will likely need to demonstrate compliance with an organization’s Privacy Policy, with the U.S. – E.U. Safe Harbor framework, the ISO/IEC 29100:2011 privacy framework or with other privacy-specific guidelines. In the U.S. the AICPA has defined 10 privacy areas of focus, OpenStack deployments within a commercial environment may desire to attest to some or all of these principles. (http://www.aicpa.org/interestareas/informationtechnology/resources/privacy/generallyacceptedprivacyprinciples/). To aid OpenStack architects in the protection of personal data, it is recommended that OpenStack architects review the NIST publication 800-122, titled "Guide to Protecting the Confidentiality of Personally Identifiable Information (PII)." This guide steps through the process of protecting:
- "any information about an individual maintained by an agency, including (1) any information that can be used to distinguish or trace an individual‘s identity, such as name, social security number, date and place of birth, mother‘s maiden name, or biometric records; and (2) any other information that is linked or linkable to an individual, such as medical, educational, financial, and employment information" + "any information about an individual maintained by an agency, including (1) any information that can be used to distinguish or trace an individual‘s identity, such as name, social security number, date and place of birth, mother‘s maiden name, or biometric records; and (2) any other information that is linked or linkable to an individual, such as medical, educational, financial, and employment information"
Comprehensive privacy management requires significant preparation, thought and investment. Additional complications are introduced when building global OpenStack clouds, for example navigating the differences between U.S. and more restrictive E.U. privacy laws. In addition, extra care needs to be taken when dealing with sensitive PII that may include information such as credit card numbers or medical records. This sensitive data is not only subject to privacy laws but also regulatory and governmental regulations. By deferring to established best practices, including those published by governments, a holistic privacy management policy may be created and practiced for OpenStack deployments. diff --git a/doc/src/docbkx/openstack-training/bk000-preface.xml b/doc/src/docbkx/openstack-training/bk000-preface.xml index 4d830671ce..bfa5845955 100644 --- a/doc/src/docbkx/openstack-training/bk000-preface.xml +++ b/doc/src/docbkx/openstack-training/bk000-preface.xml @@ -5,7 +5,7 @@ xml:id="bk001-preface"> Preface OpenStack™ Training Guides offers open source software training for cloud - administration and management for any organization. + administration and management for any organization.
Document Change History The following table describes the most recent changes: diff --git a/doc/src/docbkx/openstack-training/bk001-ch003-associate-general.xml b/doc/src/docbkx/openstack-training/bk001-ch003-associate-general.xml index 40d72add12..40f5c4af76 100644 --- a/doc/src/docbkx/openstack-training/bk001-ch003-associate-general.xml +++ b/doc/src/docbkx/openstack-training/bk001-ch003-associate-general.xml @@ -7,7 +7,7 @@ Associate General - Credit for basic install guide authors to be here +Credit for basic install guide authors to be here diff --git a/doc/src/docbkx/openstack-training/bk002-ch003-operations-general.xml b/doc/src/docbkx/openstack-training/bk002-ch003-operations-general.xml index eefa0a2057..0804225e52 100644 --- a/doc/src/docbkx/openstack-training/bk002-ch003-operations-general.xml +++ b/doc/src/docbkx/openstack-training/bk002-ch003-operations-general.xml @@ -94,7 +94,7 @@ export MAVEN_OPTS='-Xms256m -XX:MaxPermSize=1024m -Xmx1024m'

Add at least one SSH key to your account profile. To do this, follow the - instructions onhttps://help.launchpad.net/YourAccount/CreatingAnSSHKeyPair. + instructions onhttps://help.launchpad.net/YourAccount/CreatingAnSSHKeyPair. Sign the CLA: Every developer and contributor needs to sign the Individual @@ -152,7 +152,7 @@ $ git config --global user.email "your_email@youremail.com"

Select a bug that is unassigned and that you can fix. Syntax errors are the best - ones to start off with. + ones to start off with.
Using Oxygen, open the @@ -160,7 +160,7 @@ $ git config --global user.email "your_email@youremail.com"

+ html page and the xml page.
In the shell, @@ -179,7 +179,7 @@ $ git config --global user.email "your_email@youremail.com"

Correct the bug through Oxygen. Toggle back and forth through the different views at - the bottom of the editor. + the bottom of the editor.
Once the bug is fixed, verify that the documentation can be built without errors by @@ -237,7 +237,7 @@ $ git config --global user.email "your_email@youremail.com"

Follow the jenkins build progress at https://jenkins.openstack.org/view/Openstack-manuals/. Note if the build process - fails then the online documention will not reflect your bug fix. + fails then the online documention will not reflect your bug fix.
diff --git a/doc/src/docbkx/openstack-training/bk002-ch008-operations-assessment.xml b/doc/src/docbkx/openstack-training/bk002-ch008-operations-assessment.xml index 28d1bcee06..c7440b3592 100644 --- a/doc/src/docbkx/openstack-training/bk002-ch008-operations-assessment.xml +++ b/doc/src/docbkx/openstack-training/bk002-ch008-operations-assessment.xml @@ -16,7 +16,7 @@
- +
Configuration options for filter:name_check in proxy-server.conf file
container_listing_limit 10000 the default (and max) number of items + the default (and max) number of items returned for a container listing request
Identification and Authentication Identification and authentication using pluggable authentication modules (PAM) based upon user passwords. The quality of the passwords used can be enforced through configuration options.Identification and authentication using pluggable authentication modules (PAM) based upon user passwords. The quality of the passwords used can be enforced through configuration options.
Audit The system provides the capability to audit a large number of events including individual system calls as well as events generated by trusted processes. Audit data is collected in regular files in ASCII format. The system provides a program for the purpose of searching the audit records. The system administrator can define a rule base to restrict auditing to the events they are interested in. This includes the ability to restrict auditing to specific events, specific users, specific objects or a combination of all of this. Audit records can be transferred to a remote audit daemon.The system provides the capability to audit a large number of events including individual system calls as well as events generated by trusted processes. Audit data is collected in regular files in ASCII format. The system provides a program for the purpose of searching the audit records.The system administrator can define a rule base to restrict auditing to the events they are interested in. This includes the ability to restrict auditing to specific events, specific users, specific objects or a combination of all of this. Audit records can be transferred to a remote audit daemon.
Discretionary Access Control Discretionary Access Control (DAC) restricts access to file system objects based on Access Control Lists (ACLs) that include the standard UNIX permissions for user, group and others. Access control mechanisms also protect IPC objects from unauthorized access.The system includes the ext4 file system, which supports POSIX ACLs. This allows defining access rights to files within this type of file system down to the granularity of a single user.Discretionary Access Control (DAC) restricts access to file system objects based on Access Control Lists (ACLs) that include the standard UNIX permissions for user, group and others. Access control mechanisms also protect IPC objects from unauthorized access.The system includes the ext4 file system, which supports POSIX ACLs. This allows defining access rights to files within this type of file system down to the granularity of a single user.
Mandatory Access Control Mandatory Access Control (MAC) restricts access to objects based on labels assigned to subjects and objects. Sensitivity labels are automatically attached to processes and objects. The access control policy enforced using these labels is derived from the BellLaPadula access control model.SELinux categories are attached to virtual machines and its resources. The access control policy enforced using these categories grant virtual machines access to resources if the category of the virtual machine is identical to the category of the accessed resource.The TOE implements non-hierarchical categories to control access to virtual machines.Mandatory Access Control (MAC) restricts access to objects based on labels assigned to subjects and objects. Sensitivity labels are automatically attached to processes and objects. The access control policy enforced using these labels is derived from the BellLaPadula access control model.SELinux categories are attached to virtual machines and its resources. The access control policy enforced using these categories grant virtual machines access to resources if the category of the virtual machine is identical to the category of the accessed resource.The TOE implements non-hierarchical categories to control access to virtual machines.
Role-Based Access Control
Secure Communication The system supports the definition of trusted channels using SSH. Password based authentication is supported. Only a restricted number of cipher suites are supported for those protocols in the evaluated configuration.The system supports the definition of trusted channels using SSH. Password based authentication is supported. Only a restricted number of cipher suites are supported for those protocols in the evaluated configuration.
Storage Encryption The system supports encrypted block devices to provide storage confidentiality via dm_crypt.The system supports encrypted block devices to provide storage confidentiality via dm_crypt.
TSF Protection While in operation, the kernel software and data are protected by the hardware memory protection mechanisms. The memory and process management components of the kernel ensure a user process cannot access kernel storage or storage belonging to other processes.Non-kernel TSF software and data are protected by DAC and process isolation  mechanisms. In the evaluated configuration, the reserved user ID root owns the directories and files that define the TSF configuration. In general, files and directories containing internal TSF data (e.g., configuration files, batch job queues) are also protected from reading by DAC permissions.The system and the hardware and firmware components are required to be physically protected from unauthorized access. The system kernel mediates all access to the hardware mechanisms themselves, other than program visible CPU instruction functions.In addition, mechanisms for protection against stack overflow attacks are provided.While in operation, the kernel software and data are protected by the hardware memory protection mechanisms. The memory and process management components of the kernel ensure a user process cannot access kernel storage or storage belonging to other processes.Non-kernel TSF software and data are protected by DAC and process isolation  mechanisms. In the evaluated configuration, the reserved user ID root owns the directories and files that define the TSF configuration. In general, files and directories containing internal TSF data (e.g., configuration files, batch job queues) are also protected from reading by DAC permissions.The system and the hardware and firmware components are required to be physically protected from unauthorized access. The system kernel mediates all access to the hardware mechanisms themselves, other than program visible CPU instruction functions.In addition, mechanisms for protection against stack overflow attacks are provided.
Algorithm Key Length Intended Purpose Security Function implementation StandardKey LengthIntended PurposeSecurity Functionimplementation Standard
AES 128 bits,192 bits,256 bits 128 bits,192 bits,256 bits  Encryption / Decryption  Protected Data Transfer,Protection for Data at Rest  RFC 4253
TDES 168 bits168 bits Encryption /Decryption  Protected Data Transfer RFC 4253
RSA 1024 bits,2048 bits,3072 bits 1024 bits,2048 bits,3072 bits  Authentication,Key Exchange  identification andAuthentication, ProtectedData Transfer U.S. NIST FIPS PUB 186-3
DSA L=1024,N=160 bits L=1024,N=160 bits  Authentication,Key Exchange  Identification andAuthentication, ProtectedData Transfer Identification andAuthentication, ProtectedData Transfer  U.S. NIST FIPS PUB 186-3
Serpent 128, 196, or256 bit 128, 196, or256 bit  Encryption /Decryption  Protection of Data at Rest http://www.cl.cam.ac.uk/~rj a14/Papers/serpent.pdf Required for dynamic attestation services
PCI-SIG I/O virtualizationPCI-SIG I/O virtualization SR-IOV, MR-IOV, ATS Required to allow secure sharing of PCI Express devices
Configure a ....
diff --git a/doc/src/docbkx/openstack-training/st-training-guides.xml b/doc/src/docbkx/openstack-training/st-training-guides.xml index 148ea58637..f6899bd05c 100644 --- a/doc/src/docbkx/openstack-training/st-training-guides.xml +++ b/doc/src/docbkx/openstack-training/st-training-guides.xml @@ -42,7 +42,7 @@ OpenStack™ Compute offers open source software for cloud administration and management for any organization. This guide describes how to install, manage, and - understand the software that runs OpenStack Compute. + understand the software that runs OpenStack Compute. diff --git a/doc/src/docbkx/openstack-user/src/ch_overview.xml b/doc/src/docbkx/openstack-user/src/ch_overview.xml index 0441852958..1b54e2d09d 100644 --- a/doc/src/docbkx/openstack-user/src/ch_overview.xml +++ b/doc/src/docbkx/openstack-user/src/ch_overview.xml @@ -18,7 +18,7 @@ command-line clients let you run simple commands to create and manage resources in a cloud and automate tasks by using scripts. Each of the core OpenStack projects has its own - command-line client. + command-line client. You can modify these examples for your specific use cases. In addition to these ways of interacting with a cloud, you diff --git a/doc/src/docbkx/openstack-user/src/section_cli_configure_instances.xml b/doc/src/docbkx/openstack-user/src/section_cli_configure_instances.xml index 027b0b94d3..f4719f6e06 100644 --- a/doc/src/docbkx/openstack-user/src/section_cli_configure_instances.xml +++ b/doc/src/docbkx/openstack-user/src/section_cli_configure_instances.xml @@ -24,7 +24,7 @@ can use the keypair for multiple instances that belong to that project. In case an image uses a static root password or a static key set – neither is recommended – you must - not provide a keypair when you launch the instance. + not provide a keypair when you launch the instance. A security group is a named collection of network access rules that you use to limit the types of traffic that have access to instances. When you @@ -37,10 +37,10 @@ Any incoming traffic that is not matched by a rule is denied access by default. You can add rules to or remove rules from a security group. You can modify rules for the default and any - other security group. + other security group. You must modify the rules for the default security group because users cannot access instances that use the default - group from any IP address outside the cloud. + group from any IP address outside the cloud. You can modify the rules in a security group to allow access to instances through different ports and protocols. For example, you can modify rules to allow access to instances @@ -52,18 +52,18 @@ Source of traffic. Enable traffic to instances from either IP addresses inside the cloud from other group members or from all - IP addresses. + IP addresses. Protocol. Choose - TCP for SSH, ICMP for pings, or UDP. + TCP for SSH, ICMP for pings, or UDP. Destination port on virtual machine. Defines a port range. To open a single port only, enter the same value twice. ICMP does not support ports: Enter values to define the - codes and types of ICMP traffic to be allowed. + codes and types of ICMP traffic to be allowed. Rules are automatically enforced as soon as you create or @@ -78,7 +78,7 @@ To add or import keys You can generate a keypair or upload an existing - public key. + public key. To generate a keypair, run the following command: @@ -88,7 +88,7 @@ the private key to the MY_KEY.pem file, and registers the public key at the Nova - database. + database. To set the permissions of the @@ -151,14 +151,14 @@ You cannot delete the default security group for a project. Also, you cannot delete a security group that is assigned to a running - instance. + instance. To configure security group rules Modify security group rules with the nova - secgroup-*-rule commands. + secgroup-*-rule commands. On a shell, source the OpenStack RC file. For details, see . diff --git a/doc/src/docbkx/openstack-user/src/section_cli_floating_ips.xml b/doc/src/docbkx/openstack-user/src/section_cli_floating_ips.xml index 78210bc069..4af084c9cd 100644 --- a/doc/src/docbkx/openstack-user/src/section_cli_floating_ips.xml +++ b/doc/src/docbkx/openstack-user/src/section_cli_floating_ips.xml @@ -5,27 +5,27 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> Manage IP addresses Each instance can have a private, or fixed, IP address and a - public, or floating, one. + public, or floating, one. Private IP addresses are used for communication between instances, and public ones are used for communication with the - outside world. + outside world. When you launch an instance, it is automatically assigned a private IP address that stays the same until you explicitly terminate the instance. Rebooting an instance has no effect on the private IP address. A pool of floating IPs, configured by the cloud operator, is - available in OpenStack Compute. + available in OpenStack Compute. You can allocate a certain number of these to a project: The maximum number of floating IP addresses per project is defined - by the quota. + by the quota. You can add a floating IP address from this set to an instance of the project. Floating IP addresses can be dynamically disassociated and associated with other instances - of the same project at any time. + of the same project at any time. Before you can assign a floating IP address to an instance, you first must allocate floating IPs to a project. After floating IP addresses have been allocated to the current - project, you can assign them to running instances. + project, you can assign them to running instances. One floating IP address can be assigned to only one instance at a time. Floating IP addresses can be managed with the nova *floating-ip-* commands, provided @@ -84,7 +84,7 @@ with:$ nova floating-ip-listIn addition, you must know the instance's name (or ID). To look up the instances that belong to the current - project, use the nova list command. + project, use the nova list command. $ nova add-floating-ip INSTANCE_NAME_OR_ID FLOATING_IP After you assign the IP with nova add-floating-ip and configure security group rules for the instance, the instance is diff --git a/doc/src/docbkx/openstack-user/src/section_cli_heat.xml b/doc/src/docbkx/openstack-user/src/section_cli_heat.xml index bbed60b222..b17b3f6a4f 100644 --- a/doc/src/docbkx/openstack-user/src/section_cli_heat.xml +++ b/doc/src/docbkx/openstack-user/src/section_cli_heat.xml @@ -31,7 +31,7 @@ You can also use the stack-create command to validate a template file without creating a - stack from it. + stack from it. To do so, run the following command: $ heat stack-create mystack --template-file=/path/to/heat/templates/WordPress_Single_Instance.template If validation fails, the response returns an error @@ -53,7 +53,7 @@ To view stack details - To explore the state and history of a particular stack, you can run a number of commands. + To explore the state and history of a particular stack, you can run a number of commands. To show the details of a stack, run the following command: diff --git a/doc/src/docbkx/openstack-user/src/section_cli_launch_instances.xml b/doc/src/docbkx/openstack-user/src/section_cli_launch_instances.xml index 5ecded54ea..1ccf3bc138 100644 --- a/doc/src/docbkx/openstack-user/src/section_cli_launch_instances.xml +++ b/doc/src/docbkx/openstack-user/src/section_cli_launch_instances.xml @@ -19,7 +19,7 @@ To gather the parameters you need to launch an instance On a shell, source the OpenStack RC file. See . + linkend="cli_openrc"/>. List the available flavors: @@ -36,7 +36,7 @@ | 84 | m1.micro | 128 | 0 | 0 | | 1 | 1.0 | True | +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+ Note the ID of the flavor that you want to use for - your instance. + your instance. List the available images: @@ -53,14 +53,14 @@ $ nova image-list | grep 'kernel' | df430cc2-3406-4061-b635-a51c16e488ac | cirros-0.3.1-x86_64-uec-kernel | ACTIVE | | Note the ID of the image that you want to boot your - instance from. + instance from. List the available security groups: If you are an admin user, specify the --all-tenants parameter to - list groups for all tenants. + list groups for all tenants. $ nova secgroup-list --all-tenants +----+---------+-------------+ @@ -70,7 +70,7 @@ +----+---------+-------------+ If you have not created any security groups, you can assign the instance to only the default security - group. + group. You can also list rules for a specified security group: $ nova secgroup-list-rules default @@ -83,7 +83,7 @@ permitting TCP traffic on Port 80. - List the available keypairs. + List the available keypairs. $ nova keypair-list +------+-------------+ | Name | Fingerprint | @@ -106,11 +106,11 @@ parameter. $ nova boot --flavor FLAVOR_ID --image IMAGE_ID --key_name KEY_NAME \ --security_group NAME_OF_SEC_GROUP NAME_FOR_INSTANCE --meta KEY=VALUE --meta KEY=VALUE - The command returns a list of server properties. + The command returns a list of server properties. A status of BUILD indicates that the - instance has started, but is not yet online. + instance has started, but is not yet online. A status of ACTIVE indicates that - your server is active. + your server is active. +-------------------------------------+--------------------------------------+ | Property | Value | @@ -145,7 +145,7 @@ Copy the server ID value from the id field in the output. You use - this ID to get details for or delete your server. + this ID to get details for or delete your server. Copy the administrative password value from the adminPass field. You use this value to log into your server. @@ -176,9 +176,9 @@ This command lists all instances of the project you belong to, including their ID, their name, their status, and their private (and if assigned, their - public) IP addresses. + public) IP addresses. If the status for the instance is ACTIVE, the - instance is online. + instance is online. To view the available options for the nova list command, run the following command: @@ -188,7 +188,7 @@ If you did not provide a keypair, security groups, or rules, you can only access the instance from inside the cloud through VNC. Even pinging the instance is - not possible. + not possible. To change this, proceed to . diff --git a/doc/src/docbkx/openstack-user/src/section_cli_manage_volumes.xml b/doc/src/docbkx/openstack-user/src/section_cli_manage_volumes.xml index a35b72ede1..aeb27ac024 100644 --- a/doc/src/docbkx/openstack-user/src/section_cli_manage_volumes.xml +++ b/doc/src/docbkx/openstack-user/src/section_cli_manage_volumes.xml @@ -87,7 +87,7 @@ If your volume was created successfully, its status is available. If its status is error, you might have tried to - create a volume outside of your quota. + create a volume outside of your quota. Attach your volume to a server: diff --git a/doc/src/docbkx/openstack-user/src/section_cli_nova.xml b/doc/src/docbkx/openstack-user/src/section_cli_nova.xml index d800a6bea3..d8a6fa201a 100644 --- a/doc/src/docbkx/openstack-user/src/section_cli_nova.xml +++ b/doc/src/docbkx/openstack-user/src/section_cli_nova.xml @@ -143,7 +143,7 @@ $ nova reboot <server> --hard Where server is the - server ID or name. + server ID or name. Omit the option to perform a soft reboot. @@ -171,7 +171,7 @@ Where server is the server ID or name and flavor is the ID or - name of the new flavor. + name of the new flavor. Specify the optional option to block while the instance resizes so that progress can be reported. @@ -497,7 +497,7 @@ . Optional. Opts out of attaching - PublicNet to your server. + PublicNet to your server. RackConnect and Managed Cloud customers will receive an error if @@ -540,7 +540,7 @@ is written to /meta.js on the new server. Can be specified - multiple times. + multiple times. @@ -565,19 +565,19 @@ user-data. User data file, which is exposed by the - metadata server. + metadata server. availability-zone. The availability zone for instance - placement. + placement. security_groups. A comma-separated list of security - group names. + group names. @@ -602,15 +602,15 @@ NIC with a specified UUID to a network, specify the =private-net-id - parameter. + parameter. Optionally, specify the ip-addr parameter to specify an IPv4 fixed - address for NIC. + address for NIC. If you do not specify any networks on the parameter, the Internet and ServiceNet - are attached to your server. + are attached to your server. ServiceNet is labeled as private and the @@ -641,12 +641,12 @@ value. Enables - a configuration drive. + a configuration drive. . Blocks while the instance builds so progress - can be reported. + can be reported. For example, you might issue the following diff --git a/doc/src/docbkx/openstack-user/src/section_cli_swift.xml b/doc/src/docbkx/openstack-user/src/section_cli_swift.xml index 76000a4ef5..df69200c42 100644 --- a/doc/src/docbkx/openstack-user/src/section_cli_swift.xml +++ b/doc/src/docbkx/openstack-user/src/section_cli_swift.xml @@ -150,7 +150,7 @@ $ swift reboot <server> --hard Where server is the - server ID or name. + server ID or name. Omit the option to perform a soft reboot. @@ -178,7 +178,7 @@ Where server is the server ID or name and flavor is the ID or - name of the new flavor. + name of the new flavor. Specify the optional option to block while the instance resizes so that progress can be reported. @@ -504,7 +504,7 @@ . Optional. Opts out of attaching - PublicNet to your server. + PublicNet to your server. RackConnect and Managed Cloud customers will receive an error if @@ -547,7 +547,7 @@ is written to /meta.js on the new server. Can be specified - multiple times. + multiple times. @@ -572,19 +572,19 @@ user-data. User data file, which is exposed by the - metadata server. + metadata server. availability-zone. The availability zone for instance - placement. + placement. security_groups. A comma-separated list of security - group names. + group names. @@ -609,15 +609,15 @@ NIC with a specified UUID to a network, specify the =private-net-id - parameter. + parameter. Optionally, specify the ip-addr parameter to specify an IPv4 fixed - address for NIC. + address for NIC. If you do not specify any networks on the parameter, the Internet and ServiceNet - are attached to your server. + are attached to your server. ServiceNet is labeled as private and the @@ -648,12 +648,12 @@ value. Enables - a configuration drive. + a configuration drive. . Blocks while the instance builds so progress - can be reported. + can be reported. For example, you might issue the following diff --git a/doc/src/docbkx/openstack-user/src/section_dashboard_access_and_security.xml b/doc/src/docbkx/openstack-user/src/section_dashboard_access_and_security.xml index b68f6281ea..0e965fb9e8 100644 --- a/doc/src/docbkx/openstack-user/src/section_dashboard_access_and_security.xml +++ b/doc/src/docbkx/openstack-user/src/section_dashboard_access_and_security.xml @@ -31,13 +31,13 @@ If you are a member of multiple projects, select a project from the drop-down list at the top of - the Project tab. + the Project tab. Click the Access & - Security category. + Security category. The dashboard shows the security groups that are - available for this project. + available for this project.
OpenStack dashboard – Security Groups @@ -52,7 +52,7 @@ Select the default security group and click - Edit Rules. + Edit Rules. The Security Group Rules page appears:
@@ -69,9 +69,9 @@ Add a TCP rule - Click Add Rule. + Click Add Rule. The Add Rule window - appears. + appears.