diff --git a/doc/install-guide/source/cinder-backup-install-debian.rst b/doc/install-guide/source/cinder-backup-install-debian.rst
new file mode 100644
index 0000000000..1bb115500c
--- /dev/null
+++ b/doc/install-guide/source/cinder-backup-install-debian.rst
@@ -0,0 +1,71 @@
+:orphan:
+
+Install and configure the backup service
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Optionally, install and configure the backup service. For simplicity,
+this configuration uses the Block Storage node and the Object Storage
+(swift) driver, thus depending on the
+`Object Storage service `_.
+
+.. note::
+
+ You must :ref:`install and configure a storage node ` prior
+ to installing and configuring the backup service.
+
+Install and configure components
+--------------------------------
+
+.. note::
+
+ Perform these steps on the Block Storage node.
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install cinder-backup
+
+ .. end
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file
+ and complete the following actions:
+
+ * In the ``[DEFAULT]`` section, configure backup options:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ backup_driver = cinder.backup.drivers.swift
+ backup_swift_url = SWIFT_URL
+
+ .. end
+
+ Replace ``SWIFT_URL`` with the URL of the Object Storage service. The
+ URL can be found by showing the object-store API endpoints:
+
+ .. code-block:: console
+
+ $ openstack catalog show object-store
+
+ .. end
+
+Finalize installation
+---------------------
+
+
+
+Restart the Block Storage backup service:
+
+.. code-block:: console
+
+ # service cinder-backup restart
+
+.. end
+
diff --git a/doc/install-guide/source/cinder-backup-install-obs.rst b/doc/install-guide/source/cinder-backup-install-obs.rst
new file mode 100644
index 0000000000..b8c9191b8e
--- /dev/null
+++ b/doc/install-guide/source/cinder-backup-install-obs.rst
@@ -0,0 +1,73 @@
+:orphan:
+
+Install and configure the backup service
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Optionally, install and configure the backup service. For simplicity,
+this configuration uses the Block Storage node and the Object Storage
+(swift) driver, thus depending on the
+`Object Storage service `_.
+
+.. note::
+
+ You must :ref:`install and configure a storage node ` prior
+ to installing and configuring the backup service.
+
+Install and configure components
+--------------------------------
+
+.. note::
+
+ Perform these steps on the Block Storage node.
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # zypper install openstack-cinder-backup
+
+ .. end
+
+
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file
+ and complete the following actions:
+
+ * In the ``[DEFAULT]`` section, configure backup options:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ backup_driver = cinder.backup.drivers.swift
+ backup_swift_url = SWIFT_URL
+
+ .. end
+
+ Replace ``SWIFT_URL`` with the URL of the Object Storage service. The
+ URL can be found by showing the object-store API endpoints:
+
+ .. code-block:: console
+
+ $ openstack catalog show object-store
+
+ .. end
+
+Finalize installation
+---------------------
+
+
+Start the Block Storage backup service and configure it to
+start when the system boots:
+
+.. code-block:: console
+
+ # systemctl enable openstack-cinder-backup.service
+ # systemctl start openstack-cinder-backup.service
+
+.. end
+
+
diff --git a/doc/install-guide/source/cinder-backup-install-rdo.rst b/doc/install-guide/source/cinder-backup-install-rdo.rst
new file mode 100644
index 0000000000..d7ccfc152f
--- /dev/null
+++ b/doc/install-guide/source/cinder-backup-install-rdo.rst
@@ -0,0 +1,73 @@
+:orphan:
+
+Install and configure the backup service
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Optionally, install and configure the backup service. For simplicity,
+this configuration uses the Block Storage node and the Object Storage
+(swift) driver, thus depending on the
+`Object Storage service `_.
+
+.. note::
+
+ You must :ref:`install and configure a storage node ` prior
+ to installing and configuring the backup service.
+
+Install and configure components
+--------------------------------
+
+.. note::
+
+ Perform these steps on the Block Storage node.
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # yum install openstack-cinder
+
+ .. end
+
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file
+ and complete the following actions:
+
+ * In the ``[DEFAULT]`` section, configure backup options:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ backup_driver = cinder.backup.drivers.swift
+ backup_swift_url = SWIFT_URL
+
+ .. end
+
+ Replace ``SWIFT_URL`` with the URL of the Object Storage service. The
+ URL can be found by showing the object-store API endpoints:
+
+ .. code-block:: console
+
+ $ openstack catalog show object-store
+
+ .. end
+
+Finalize installation
+---------------------
+
+
+Start the Block Storage backup service and configure it to
+start when the system boots:
+
+.. code-block:: console
+
+ # systemctl enable openstack-cinder-backup.service
+ # systemctl start openstack-cinder-backup.service
+
+.. end
+
+
diff --git a/doc/install-guide/source/cinder-backup-install-ubuntu.rst b/doc/install-guide/source/cinder-backup-install-ubuntu.rst
new file mode 100644
index 0000000000..1bb115500c
--- /dev/null
+++ b/doc/install-guide/source/cinder-backup-install-ubuntu.rst
@@ -0,0 +1,71 @@
+:orphan:
+
+Install and configure the backup service
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Optionally, install and configure the backup service. For simplicity,
+this configuration uses the Block Storage node and the Object Storage
+(swift) driver, thus depending on the
+`Object Storage service `_.
+
+.. note::
+
+ You must :ref:`install and configure a storage node ` prior
+ to installing and configuring the backup service.
+
+Install and configure components
+--------------------------------
+
+.. note::
+
+ Perform these steps on the Block Storage node.
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install cinder-backup
+
+ .. end
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file
+ and complete the following actions:
+
+ * In the ``[DEFAULT]`` section, configure backup options:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ backup_driver = cinder.backup.drivers.swift
+ backup_swift_url = SWIFT_URL
+
+ .. end
+
+ Replace ``SWIFT_URL`` with the URL of the Object Storage service. The
+ URL can be found by showing the object-store API endpoints:
+
+ .. code-block:: console
+
+ $ openstack catalog show object-store
+
+ .. end
+
+Finalize installation
+---------------------
+
+
+
+Restart the Block Storage backup service:
+
+.. code-block:: console
+
+ # service cinder-backup restart
+
+.. end
+
diff --git a/doc/install-guide/source/cinder-backup-install.rst b/doc/install-guide/source/cinder-backup-install.rst
index 949f7f5e9a..77de40223a 100644
--- a/doc/install-guide/source/cinder-backup-install.rst
+++ b/doc/install-guide/source/cinder-backup-install.rst
@@ -5,108 +5,7 @@
Install and configure the backup service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Optionally, install and configure the backup service. For simplicity,
-this configuration uses the Block Storage node and the Object Storage
-(swift) driver, thus depending on the
-`Object Storage service `_.
+.. toctree::
+ :glob:
-.. note::
-
- You must :ref:`install and configure a storage node ` prior
- to installing and configuring the backup service.
-
-Install and configure components
---------------------------------
-
-.. note::
-
- Perform these steps on the Block Storage node.
-
-.. only:: obs
-
- #. Install the packages:
-
- .. code-block:: console
-
- # zypper install openstack-cinder-backup
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- #. Install the packages:
-
- .. code-block:: console
-
- # yum install openstack-cinder
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Install the packages:
-
- .. code-block:: console
-
- # apt install cinder-backup
-
- .. end
-
-.. endonly
-
-2. Edit the ``/etc/cinder/cinder.conf`` file
- and complete the following actions:
-
- * In the ``[DEFAULT]`` section, configure backup options:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- backup_driver = cinder.backup.drivers.swift
- backup_swift_url = SWIFT_URL
-
- .. end
-
- Replace ``SWIFT_URL`` with the URL of the Object Storage service. The
- URL can be found by showing the object-store API endpoints:
-
- .. code-block:: console
-
- $ openstack catalog show object-store
-
- .. end
-
-Finalize installation
----------------------
-
-.. only:: obs or rdo
-
- Start the Block Storage backup service and configure it to
- start when the system boots:
-
- .. code-block:: console
-
- # systemctl enable openstack-cinder-backup.service
- # systemctl start openstack-cinder-backup.service
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- Restart the Block Storage backup service:
-
- .. code-block:: console
-
- # service cinder-backup restart
-
- .. end
-
-.. endonly
+ cinder-backup-install-*
diff --git a/doc/install-guide/source/cinder-controller-install-debian.rst b/doc/install-guide/source/cinder-controller-install-debian.rst
new file mode 100644
index 0000000000..d9e141a265
--- /dev/null
+++ b/doc/install-guide/source/cinder-controller-install-debian.rst
@@ -0,0 +1,394 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Block
+Storage service, code-named cinder, on the controller node. This
+service requires at least one additional storage node that provides
+volumes to instances.
+
+Prerequisites
+-------------
+
+Before you install and configure the Block Storage service, you
+must create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``cinder`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE cinder;
+
+ .. end
+
+ * Grant proper access to the ``cinder`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
+ IDENTIFIED BY 'CINDER_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
+ IDENTIFIED BY 'CINDER_DBPASS';
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to admin-only
+ CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create a ``cinder`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt cinder
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 9d7e33de3e1a498390353819bc7d245d |
+ | name | cinder |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``cinder`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user cinder admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``cinderv2`` and ``cinderv3`` service entities:
+
+ .. code-block:: console
+
+ $ openstack service create --name cinderv2 \
+ --description "OpenStack Block Storage" volumev2
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Block Storage |
+ | enabled | True |
+ | id | eb9fd245bdbc414695952e93f29fe3ac |
+ | name | cinderv2 |
+ | type | volumev2 |
+ +-------------+----------------------------------+
+
+ .. end
+
+ .. code-block:: console
+
+ $ openstack service create --name cinderv3 \
+ --description "OpenStack Block Storage" volumev3
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Block Storage |
+ | enabled | True |
+ | id | ab3bbbef780845a1a283490d281e7fda |
+ | name | cinderv3 |
+ | type | volumev3 |
+ +-------------+----------------------------------+
+
+ .. end
+
+ .. note::
+
+ The Block Storage services require two service entities.
+
+#. Create the Block Storage service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 public http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 513e73819e14460fb904163f41ef3759 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 internal http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 6436a8a23d014cfdb69c586eff146a32 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 admin http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | e652cf84dd334f359ae9b045a2c91d96 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ .. end
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 public http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 03fa2c90153546c295bf30ca86b1344b |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 internal http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 94f684395d1b41068c70e4ecb11364b2 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 admin http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 4511c28a0f9840c78bacb25f10f62c98 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ The Block Storage services require endpoints for each service
+ entity.
+
+Install and configure components
+--------------------------------
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install cinder-api cinder-scheduler
+
+ .. end
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file and complete the
+ following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with the password you chose for the
+ Block Storage database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = cinder
+ password = CINDER_PASS
+
+ .. end
+
+ Replace ``CINDER_PASS`` with the password you chose for
+ the ``cinder`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
+ use the management interface IP address of the controller node:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = 10.0.0.11
+
+ .. end
+
+
+
+3. Populate the Block Storage database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "cinder-manage db sync" cinder
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+
+Configure Compute to use Block Storage
+--------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and add the following
+ to it:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [cinder]
+ os_region_name = RegionOne
+
+ .. end
+
+Finalize installation
+---------------------
+
+
+
+#. Restart the Compute API service:
+
+ .. code-block:: console
+
+ # service nova-api restart
+
+ .. end
+
+#. Restart the Block Storage services:
+
+ .. code-block:: console
+
+ # service cinder-scheduler restart
+ # service apache2 restart
+
+ .. end
+
diff --git a/doc/install-guide/source/cinder-controller-install-obs.rst b/doc/install-guide/source/cinder-controller-install-obs.rst
new file mode 100644
index 0000000000..11cb0b8a2e
--- /dev/null
+++ b/doc/install-guide/source/cinder-controller-install-obs.rst
@@ -0,0 +1,394 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Block
+Storage service, code-named cinder, on the controller node. This
+service requires at least one additional storage node that provides
+volumes to instances.
+
+Prerequisites
+-------------
+
+Before you install and configure the Block Storage service, you
+must create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``cinder`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE cinder;
+
+ .. end
+
+ * Grant proper access to the ``cinder`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
+ IDENTIFIED BY 'CINDER_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
+ IDENTIFIED BY 'CINDER_DBPASS';
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to admin-only
+ CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create a ``cinder`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt cinder
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 9d7e33de3e1a498390353819bc7d245d |
+ | name | cinder |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``cinder`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user cinder admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``cinderv2`` and ``cinderv3`` service entities:
+
+ .. code-block:: console
+
+ $ openstack service create --name cinderv2 \
+ --description "OpenStack Block Storage" volumev2
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Block Storage |
+ | enabled | True |
+ | id | eb9fd245bdbc414695952e93f29fe3ac |
+ | name | cinderv2 |
+ | type | volumev2 |
+ +-------------+----------------------------------+
+
+ .. end
+
+ .. code-block:: console
+
+ $ openstack service create --name cinderv3 \
+ --description "OpenStack Block Storage" volumev3
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Block Storage |
+ | enabled | True |
+ | id | ab3bbbef780845a1a283490d281e7fda |
+ | name | cinderv3 |
+ | type | volumev3 |
+ +-------------+----------------------------------+
+
+ .. end
+
+ .. note::
+
+ The Block Storage services require two service entities.
+
+#. Create the Block Storage service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 public http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 513e73819e14460fb904163f41ef3759 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 internal http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 6436a8a23d014cfdb69c586eff146a32 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 admin http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | e652cf84dd334f359ae9b045a2c91d96 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ .. end
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 public http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 03fa2c90153546c295bf30ca86b1344b |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 internal http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 94f684395d1b41068c70e4ecb11364b2 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 admin http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 4511c28a0f9840c78bacb25f10f62c98 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ The Block Storage services require endpoints for each service
+ entity.
+
+Install and configure components
+--------------------------------
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # zypper install openstack-cinder-api openstack-cinder-scheduler
+
+ .. end
+
+
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file and complete the
+ following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with the password you chose for the
+ Block Storage database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = cinder
+ password = CINDER_PASS
+
+ .. end
+
+ Replace ``CINDER_PASS`` with the password you chose for
+ the ``cinder`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
+ use the management interface IP address of the controller node:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = 10.0.0.11
+
+ .. end
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/cinder/tmp
+
+ .. end
+
+
+
+Configure Compute to use Block Storage
+--------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and add the following
+ to it:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [cinder]
+ os_region_name = RegionOne
+
+ .. end
+
+Finalize installation
+---------------------
+
+
+#. Restart the Compute API service:
+
+ .. code-block:: console
+
+ # systemctl restart openstack-nova-api.service
+
+ .. end
+
+#. Start the Block Storage services and configure them to start when
+ the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
+ # systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
+
+ .. end
+
+
diff --git a/doc/install-guide/source/cinder-controller-install-rdo.rst b/doc/install-guide/source/cinder-controller-install-rdo.rst
new file mode 100644
index 0000000000..9ac1723e8c
--- /dev/null
+++ b/doc/install-guide/source/cinder-controller-install-rdo.rst
@@ -0,0 +1,407 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Block
+Storage service, code-named cinder, on the controller node. This
+service requires at least one additional storage node that provides
+volumes to instances.
+
+Prerequisites
+-------------
+
+Before you install and configure the Block Storage service, you
+must create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``cinder`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE cinder;
+
+ .. end
+
+ * Grant proper access to the ``cinder`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
+ IDENTIFIED BY 'CINDER_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
+ IDENTIFIED BY 'CINDER_DBPASS';
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to admin-only
+ CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create a ``cinder`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt cinder
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 9d7e33de3e1a498390353819bc7d245d |
+ | name | cinder |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``cinder`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user cinder admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``cinderv2`` and ``cinderv3`` service entities:
+
+ .. code-block:: console
+
+ $ openstack service create --name cinderv2 \
+ --description "OpenStack Block Storage" volumev2
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Block Storage |
+ | enabled | True |
+ | id | eb9fd245bdbc414695952e93f29fe3ac |
+ | name | cinderv2 |
+ | type | volumev2 |
+ +-------------+----------------------------------+
+
+ .. end
+
+ .. code-block:: console
+
+ $ openstack service create --name cinderv3 \
+ --description "OpenStack Block Storage" volumev3
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Block Storage |
+ | enabled | True |
+ | id | ab3bbbef780845a1a283490d281e7fda |
+ | name | cinderv3 |
+ | type | volumev3 |
+ +-------------+----------------------------------+
+
+ .. end
+
+ .. note::
+
+ The Block Storage services require two service entities.
+
+#. Create the Block Storage service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 public http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 513e73819e14460fb904163f41ef3759 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 internal http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 6436a8a23d014cfdb69c586eff146a32 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 admin http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | e652cf84dd334f359ae9b045a2c91d96 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ .. end
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 public http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 03fa2c90153546c295bf30ca86b1344b |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 internal http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 94f684395d1b41068c70e4ecb11364b2 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 admin http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 4511c28a0f9840c78bacb25f10f62c98 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ The Block Storage services require endpoints for each service
+ entity.
+
+Install and configure components
+--------------------------------
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # yum install openstack-cinder
+
+ .. end
+
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file and complete the
+ following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with the password you chose for the
+ Block Storage database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = cinder
+ password = CINDER_PASS
+
+ .. end
+
+ Replace ``CINDER_PASS`` with the password you chose for
+ the ``cinder`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
+ use the management interface IP address of the controller node:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = 10.0.0.11
+
+ .. end
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/cinder/tmp
+
+ .. end
+
+
+
+3. Populate the Block Storage database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "cinder-manage db sync" cinder
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+
+Configure Compute to use Block Storage
+--------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and add the following
+ to it:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [cinder]
+ os_region_name = RegionOne
+
+ .. end
+
+Finalize installation
+---------------------
+
+
+#. Restart the Compute API service:
+
+ .. code-block:: console
+
+ # systemctl restart openstack-nova-api.service
+
+ .. end
+
+#. Start the Block Storage services and configure them to start when
+ the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
+ # systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
+
+ .. end
+
+
diff --git a/doc/install-guide/source/cinder-controller-install-ubuntu.rst b/doc/install-guide/source/cinder-controller-install-ubuntu.rst
new file mode 100644
index 0000000000..313b5ea10c
--- /dev/null
+++ b/doc/install-guide/source/cinder-controller-install-ubuntu.rst
@@ -0,0 +1,406 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Block
+Storage service, code-named cinder, on the controller node. This
+service requires at least one additional storage node that provides
+volumes to instances.
+
+Prerequisites
+-------------
+
+Before you install and configure the Block Storage service, you
+must create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ # mysql
+
+ .. end
+
+
+
+ * Create the ``cinder`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE cinder;
+
+ .. end
+
+ * Grant proper access to the ``cinder`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
+ IDENTIFIED BY 'CINDER_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
+ IDENTIFIED BY 'CINDER_DBPASS';
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to admin-only
+ CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create a ``cinder`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt cinder
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 9d7e33de3e1a498390353819bc7d245d |
+ | name | cinder |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``cinder`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user cinder admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``cinderv2`` and ``cinderv3`` service entities:
+
+ .. code-block:: console
+
+ $ openstack service create --name cinderv2 \
+ --description "OpenStack Block Storage" volumev2
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Block Storage |
+ | enabled | True |
+ | id | eb9fd245bdbc414695952e93f29fe3ac |
+ | name | cinderv2 |
+ | type | volumev2 |
+ +-------------+----------------------------------+
+
+ .. end
+
+ .. code-block:: console
+
+ $ openstack service create --name cinderv3 \
+ --description "OpenStack Block Storage" volumev3
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Block Storage |
+ | enabled | True |
+ | id | ab3bbbef780845a1a283490d281e7fda |
+ | name | cinderv3 |
+ | type | volumev3 |
+ +-------------+----------------------------------+
+
+ .. end
+
+ .. note::
+
+ The Block Storage services require two service entities.
+
+#. Create the Block Storage service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 public http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 513e73819e14460fb904163f41ef3759 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 internal http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 6436a8a23d014cfdb69c586eff146a32 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev2 admin http://controller:8776/v2/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | e652cf84dd334f359ae9b045a2c91d96 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | eb9fd245bdbc414695952e93f29fe3ac |
+ | service_name | cinderv2 |
+ | service_type | volumev2 |
+ | url | http://controller:8776/v2/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ .. end
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 public http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 03fa2c90153546c295bf30ca86b1344b |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 internal http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 94f684395d1b41068c70e4ecb11364b2 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ volumev3 admin http://controller:8776/v3/%\(project_id\)s
+
+ +--------------+------------------------------------------+
+ | Field | Value |
+ +--------------+------------------------------------------+
+ | enabled | True |
+ | id | 4511c28a0f9840c78bacb25f10f62c98 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | ab3bbbef780845a1a283490d281e7fda |
+ | service_name | cinderv3 |
+ | service_type | volumev3 |
+ | url | http://controller:8776/v3/%(project_id)s |
+ +--------------+------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ The Block Storage services require endpoints for each service
+ entity.
+
+Install and configure components
+--------------------------------
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install cinder-api cinder-scheduler
+
+ .. end
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file and complete the
+ following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with the password you chose for the
+ Block Storage database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = cinder
+ password = CINDER_PASS
+
+ .. end
+
+ Replace ``CINDER_PASS`` with the password you chose for
+ the ``cinder`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
+ use the management interface IP address of the controller node:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = 10.0.0.11
+
+ .. end
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/cinder/tmp
+
+ .. end
+
+
+
+3. Populate the Block Storage database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "cinder-manage db sync" cinder
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+
+Configure Compute to use Block Storage
+--------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and add the following
+ to it:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [cinder]
+ os_region_name = RegionOne
+
+ .. end
+
+Finalize installation
+---------------------
+
+
+
+#. Restart the Compute API service:
+
+ .. code-block:: console
+
+ # service nova-api restart
+
+ .. end
+
+#. Restart the Block Storage services:
+
+ .. code-block:: console
+
+ # service cinder-scheduler restart
+ # service apache2 restart
+
+ .. end
+
diff --git a/doc/install-guide/source/cinder-controller-install.rst b/doc/install-guide/source/cinder-controller-install.rst
index 7927bcbebe..8b0abfd484 100644
--- a/doc/install-guide/source/cinder-controller-install.rst
+++ b/doc/install-guide/source/cinder-controller-install.rst
@@ -3,471 +3,7 @@
Install and configure controller node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This section describes how to install and configure the Block
-Storage service, code-named cinder, on the controller node. This
-service requires at least one additional storage node that provides
-volumes to instances.
+.. toctree::
+ :glob:
-Prerequisites
--------------
-
-Before you install and configure the Block Storage service, you
-must create a database, service credentials, and API endpoints.
-
-#. To create the database, complete these steps:
-
- .. only:: ubuntu
-
- * Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- # mysql
-
- .. end
-
- .. endonly
-
- .. only:: rdo or debian or obs
-
- * Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- $ mysql -u root -p
-
- .. end
-
- .. endonly
-
- * Create the ``cinder`` database:
-
- .. code-block:: console
-
- MariaDB [(none)]> CREATE DATABASE cinder;
-
- .. end
-
- * Grant proper access to the ``cinder`` database:
-
- .. code-block:: console
-
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
- IDENTIFIED BY 'CINDER_DBPASS';
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
- IDENTIFIED BY 'CINDER_DBPASS';
-
- .. end
-
- Replace ``CINDER_DBPASS`` with a suitable password.
-
- * Exit the database access client.
-
-#. Source the ``admin`` credentials to gain access to admin-only
- CLI commands:
-
- .. code-block:: console
-
- $ . admin-openrc
-
- .. end
-
-#. To create the service credentials, complete these steps:
-
- * Create a ``cinder`` user:
-
- .. code-block:: console
-
- $ openstack user create --domain default --password-prompt cinder
-
- User Password:
- Repeat User Password:
- +---------------------+----------------------------------+
- | Field | Value |
- +---------------------+----------------------------------+
- | domain_id | default |
- | enabled | True |
- | id | 9d7e33de3e1a498390353819bc7d245d |
- | name | cinder |
- | options | {} |
- | password_expires_at | None |
- +---------------------+----------------------------------+
-
- .. end
-
- * Add the ``admin`` role to the ``cinder`` user:
-
- .. code-block:: console
-
- $ openstack role add --project service --user cinder admin
-
- .. end
-
- .. note::
-
- This command provides no output.
-
- * Create the ``cinderv2`` and ``cinderv3`` service entities:
-
- .. code-block:: console
-
- $ openstack service create --name cinderv2 \
- --description "OpenStack Block Storage" volumev2
-
- +-------------+----------------------------------+
- | Field | Value |
- +-------------+----------------------------------+
- | description | OpenStack Block Storage |
- | enabled | True |
- | id | eb9fd245bdbc414695952e93f29fe3ac |
- | name | cinderv2 |
- | type | volumev2 |
- +-------------+----------------------------------+
-
- .. end
-
- .. code-block:: console
-
- $ openstack service create --name cinderv3 \
- --description "OpenStack Block Storage" volumev3
-
- +-------------+----------------------------------+
- | Field | Value |
- +-------------+----------------------------------+
- | description | OpenStack Block Storage |
- | enabled | True |
- | id | ab3bbbef780845a1a283490d281e7fda |
- | name | cinderv3 |
- | type | volumev3 |
- +-------------+----------------------------------+
-
- .. end
-
- .. note::
-
- The Block Storage services require two service entities.
-
-#. Create the Block Storage service API endpoints:
-
- .. code-block:: console
-
- $ openstack endpoint create --region RegionOne \
- volumev2 public http://controller:8776/v2/%\(project_id\)s
-
- +--------------+------------------------------------------+
- | Field | Value |
- +--------------+------------------------------------------+
- | enabled | True |
- | id | 513e73819e14460fb904163f41ef3759 |
- | interface | public |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | eb9fd245bdbc414695952e93f29fe3ac |
- | service_name | cinderv2 |
- | service_type | volumev2 |
- | url | http://controller:8776/v2/%(project_id)s |
- +--------------+------------------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- volumev2 internal http://controller:8776/v2/%\(project_id\)s
-
- +--------------+------------------------------------------+
- | Field | Value |
- +--------------+------------------------------------------+
- | enabled | True |
- | id | 6436a8a23d014cfdb69c586eff146a32 |
- | interface | internal |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | eb9fd245bdbc414695952e93f29fe3ac |
- | service_name | cinderv2 |
- | service_type | volumev2 |
- | url | http://controller:8776/v2/%(project_id)s |
- +--------------+------------------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- volumev2 admin http://controller:8776/v2/%\(project_id\)s
-
- +--------------+------------------------------------------+
- | Field | Value |
- +--------------+------------------------------------------+
- | enabled | True |
- | id | e652cf84dd334f359ae9b045a2c91d96 |
- | interface | admin |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | eb9fd245bdbc414695952e93f29fe3ac |
- | service_name | cinderv2 |
- | service_type | volumev2 |
- | url | http://controller:8776/v2/%(project_id)s |
- +--------------+------------------------------------------+
-
- .. end
-
- .. code-block:: console
-
- $ openstack endpoint create --region RegionOne \
- volumev3 public http://controller:8776/v3/%\(project_id\)s
-
- +--------------+------------------------------------------+
- | Field | Value |
- +--------------+------------------------------------------+
- | enabled | True |
- | id | 03fa2c90153546c295bf30ca86b1344b |
- | interface | public |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | ab3bbbef780845a1a283490d281e7fda |
- | service_name | cinderv3 |
- | service_type | volumev3 |
- | url | http://controller:8776/v3/%(project_id)s |
- +--------------+------------------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- volumev3 internal http://controller:8776/v3/%\(project_id\)s
-
- +--------------+------------------------------------------+
- | Field | Value |
- +--------------+------------------------------------------+
- | enabled | True |
- | id | 94f684395d1b41068c70e4ecb11364b2 |
- | interface | internal |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | ab3bbbef780845a1a283490d281e7fda |
- | service_name | cinderv3 |
- | service_type | volumev3 |
- | url | http://controller:8776/v3/%(project_id)s |
- +--------------+------------------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- volumev3 admin http://controller:8776/v3/%\(project_id\)s
-
- +--------------+------------------------------------------+
- | Field | Value |
- +--------------+------------------------------------------+
- | enabled | True |
- | id | 4511c28a0f9840c78bacb25f10f62c98 |
- | interface | admin |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | ab3bbbef780845a1a283490d281e7fda |
- | service_name | cinderv3 |
- | service_type | volumev3 |
- | url | http://controller:8776/v3/%(project_id)s |
- +--------------+------------------------------------------+
-
- .. end
-
- .. note::
-
- The Block Storage services require endpoints for each service
- entity.
-
-Install and configure components
---------------------------------
-
-.. only:: obs
-
- #. Install the packages:
-
- .. code-block:: console
-
- # zypper install openstack-cinder-api openstack-cinder-scheduler
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- #. Install the packages:
-
- .. code-block:: console
-
- # yum install openstack-cinder
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Install the packages:
-
- .. code-block:: console
-
- # apt install cinder-api cinder-scheduler
-
- .. end
-
-.. endonly
-
-2. Edit the ``/etc/cinder/cinder.conf`` file and complete the
- following actions:
-
- * In the ``[database]`` section, configure database access:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [database]
- # ...
- connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
-
- .. end
-
- Replace ``CINDER_DBPASS`` with the password you chose for the
- Block Storage database.
-
- * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
- message queue access:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- transport_url = rabbit://openstack:RABBIT_PASS@controller
-
- .. end
-
- Replace ``RABBIT_PASS`` with the password you chose for the
- ``openstack`` account in ``RabbitMQ``.
-
- * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
- configure Identity service access:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- auth_strategy = keystone
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = cinder
- password = CINDER_PASS
-
- .. end
-
- Replace ``CINDER_PASS`` with the password you chose for
- the ``cinder`` user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
- * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
- use the management interface IP address of the controller node:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- my_ip = 10.0.0.11
-
- .. end
-
- .. only:: obs or rdo or ubuntu
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/lib/cinder/tmp
-
- .. end
-
- .. endonly
-
-.. only:: rdo or ubuntu or debian
-
- 3. Populate the Block Storage database:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "cinder-manage db sync" cinder
-
- .. end
-
- .. note::
-
- Ignore any deprecation messages in this output.
-
-.. endonly
-
-Configure Compute to use Block Storage
---------------------------------------
-
-* Edit the ``/etc/nova/nova.conf`` file and add the following
- to it:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [cinder]
- os_region_name = RegionOne
-
- .. end
-
-Finalize installation
----------------------
-
-.. only:: obs or rdo
-
- #. Restart the Compute API service:
-
- .. code-block:: console
-
- # systemctl restart openstack-nova-api.service
-
- .. end
-
- #. Start the Block Storage services and configure them to start when
- the system boots:
-
- .. code-block:: console
-
- # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
- # systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Restart the Compute API service:
-
- .. code-block:: console
-
- # service nova-api restart
-
- .. end
-
- #. Restart the Block Storage services:
-
- .. code-block:: console
-
- # service cinder-scheduler restart
- # service apache2 restart
-
- .. end
-
-.. endonly
+ cinder-controller-install-*
diff --git a/doc/install-guide/source/cinder-storage-install-debian.rst b/doc/install-guide/source/cinder-storage-install-debian.rst
new file mode 100644
index 0000000000..af64b986c5
--- /dev/null
+++ b/doc/install-guide/source/cinder-storage-install-debian.rst
@@ -0,0 +1,263 @@
+Install and configure a storage node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure storage nodes
+for the Block Storage service. For simplicity, this configuration
+references one storage node with an empty local block storage device.
+The instructions use ``/dev/sdb``, but you can substitute a different
+value for your particular node.
+
+The service provisions logical volumes on this device using the
+:term:`LVM ` driver and provides them
+to instances via :term:`iSCSI ` transport.
+You can follow these instructions with minor modifications to horizontally
+scale your environment with additional storage nodes.
+
+Prerequisites
+-------------
+
+Before you install and configure the Block Storage service on the
+storage node, you must prepare the storage device.
+
+.. note::
+
+ Perform these steps on the storage node.
+
+#. Install the supporting utility packages:
+
+
+
+
+ .. note::
+
+ Some distributions include LVM by default.
+
+#. Create the LVM physical volume ``/dev/sdb``:
+
+ .. code-block:: console
+
+ # pvcreate /dev/sdb
+
+ Physical volume "/dev/sdb" successfully created
+
+ .. end
+
+#. Create the LVM volume group ``cinder-volumes``:
+
+ .. code-block:: console
+
+ # vgcreate cinder-volumes /dev/sdb
+
+ Volume group "cinder-volumes" successfully created
+
+ .. end
+
+ The Block Storage service creates logical volumes in this volume group.
+
+#. Only instances can access Block Storage volumes. However, the
+ underlying operating system manages the devices associated with
+ the volumes. By default, the LVM volume scanning tool scans the
+ ``/dev`` directory for block storage devices that
+ contain volumes. If projects use LVM on their volumes, the scanning
+ tool detects these volumes and attempts to cache them which can cause
+ a variety of problems with both the underlying operating system
+ and project volumes. You must reconfigure LVM to scan only the devices
+ that contain the ``cinder-volumes`` volume group. Edit the
+ ``/etc/lvm/lvm.conf`` file and complete the following actions:
+
+ * In the ``devices`` section, add a filter that accepts the
+ ``/dev/sdb`` device and rejects all other devices:
+
+ .. path /etc/lvm/lvm.conf
+ .. code-block:: none
+
+ devices {
+ ...
+ filter = [ "a/sdb/", "r/.*/"]
+
+ .. end
+
+ Each item in the filter array begins with ``a`` for **accept** or
+ ``r`` for **reject** and includes a regular expression for the
+ device name. The array must end with ``r/.*/`` to reject any
+ remaining devices. You can use the :command:`vgs -vvvv` command
+ to test filters.
+
+ .. warning::
+
+ If your storage nodes use LVM on the operating system disk, you
+ must also add the associated device to the filter. For example,
+ if the ``/dev/sda`` device contains the operating system:
+
+ .. ignore_path /etc/lvm/lvm.conf
+ .. code-block:: ini
+
+ filter = [ "a/sda/", "a/sdb/", "r/.*/"]
+
+ .. end
+
+ Similarly, if your compute nodes use LVM on the operating
+ system disk, you must also modify the filter in the
+ ``/etc/lvm/lvm.conf`` file on those nodes to include only
+ the operating system disk. For example, if the ``/dev/sda``
+ device contains the operating system:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: ini
+
+ filter = [ "a/sda/", "r/.*/"]
+
+ .. end
+
+Install and configure components
+--------------------------------
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install cinder-volume
+
+ .. end
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file
+ and complete the following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with the password you chose for
+ the Block Storage database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for
+ the ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = cinder
+ password = CINDER_PASS
+
+ .. end
+
+ Replace ``CINDER_PASS`` with the password you chose for the
+ ``cinder`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
+
+ .. end
+
+ Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
+ of the management network interface on your storage node,
+ typically 10.0.0.41 for the first node in the
+ :ref:`example architecture `.
+
+
+
+ * In the ``[DEFAULT]`` section, enable the LVM back end:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ enabled_backends = lvm
+
+ .. end
+
+ .. note::
+
+ Back-end names are arbitrary. As an example, this guide
+ uses the name of the driver as the name of the back end.
+
+ * In the ``[DEFAULT]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ glance_api_servers = http://controller:9292
+
+ .. end
+
+ * In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/cinder/tmp
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+
+
+#. Restart the Block Storage volume service including its dependencies:
+
+ .. code-block:: console
+
+ # service tgt restart
+ # service cinder-volume restart
+
+ .. end
+
diff --git a/doc/install-guide/source/cinder-storage-install-obs.rst b/doc/install-guide/source/cinder-storage-install-obs.rst
new file mode 100644
index 0000000000..7ba542cfaa
--- /dev/null
+++ b/doc/install-guide/source/cinder-storage-install-obs.rst
@@ -0,0 +1,309 @@
+Install and configure a storage node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure storage nodes
+for the Block Storage service. For simplicity, this configuration
+references one storage node with an empty local block storage device.
+The instructions use ``/dev/sdb``, but you can substitute a different
+value for your particular node.
+
+The service provisions logical volumes on this device using the
+:term:`LVM ` driver and provides them
+to instances via :term:`iSCSI ` transport.
+You can follow these instructions with minor modifications to horizontally
+scale your environment with additional storage nodes.
+
+Prerequisites
+-------------
+
+Before you install and configure the Block Storage service on the
+storage node, you must prepare the storage device.
+
+.. note::
+
+ Perform these steps on the storage node.
+
+#. Install the supporting utility packages:
+
+
+* Install the LVM packages:
+
+ .. code-block:: console
+
+ # zypper install lvm2
+
+ .. end
+
+* (Optional) If you intend to use non-raw image types such as QCOW2
+ and VMDK, install the QEMU package:
+
+ .. code-block:: console
+
+ # zypper install qemu
+
+ .. end
+
+
+
+
+ .. note::
+
+ Some distributions include LVM by default.
+
+#. Create the LVM physical volume ``/dev/sdb``:
+
+ .. code-block:: console
+
+ # pvcreate /dev/sdb
+
+ Physical volume "/dev/sdb" successfully created
+
+ .. end
+
+#. Create the LVM volume group ``cinder-volumes``:
+
+ .. code-block:: console
+
+ # vgcreate cinder-volumes /dev/sdb
+
+ Volume group "cinder-volumes" successfully created
+
+ .. end
+
+ The Block Storage service creates logical volumes in this volume group.
+
+#. Only instances can access Block Storage volumes. However, the
+ underlying operating system manages the devices associated with
+ the volumes. By default, the LVM volume scanning tool scans the
+ ``/dev`` directory for block storage devices that
+ contain volumes. If projects use LVM on their volumes, the scanning
+ tool detects these volumes and attempts to cache them which can cause
+ a variety of problems with both the underlying operating system
+ and project volumes. You must reconfigure LVM to scan only the devices
+ that contain the ``cinder-volumes`` volume group. Edit the
+ ``/etc/lvm/lvm.conf`` file and complete the following actions:
+
+ * In the ``devices`` section, add a filter that accepts the
+ ``/dev/sdb`` device and rejects all other devices:
+
+ .. path /etc/lvm/lvm.conf
+ .. code-block:: none
+
+ devices {
+ ...
+ filter = [ "a/sdb/", "r/.*/"]
+
+ .. end
+
+ Each item in the filter array begins with ``a`` for **accept** or
+ ``r`` for **reject** and includes a regular expression for the
+ device name. The array must end with ``r/.*/`` to reject any
+ remaining devices. You can use the :command:`vgs -vvvv` command
+ to test filters.
+
+ .. warning::
+
+ If your storage nodes use LVM on the operating system disk, you
+ must also add the associated device to the filter. For example,
+ if the ``/dev/sda`` device contains the operating system:
+
+ .. ignore_path /etc/lvm/lvm.conf
+ .. code-block:: ini
+
+ filter = [ "a/sda/", "a/sdb/", "r/.*/"]
+
+ .. end
+
+ Similarly, if your compute nodes use LVM on the operating
+ system disk, you must also modify the filter in the
+ ``/etc/lvm/lvm.conf`` file on those nodes to include only
+ the operating system disk. For example, if the ``/dev/sda``
+ device contains the operating system:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: ini
+
+ filter = [ "a/sda/", "r/.*/"]
+
+ .. end
+
+Install and configure components
+--------------------------------
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # zypper install openstack-cinder-volume tgt
+
+ .. end
+
+
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file
+ and complete the following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with the password you chose for
+ the Block Storage database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for
+ the ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = cinder
+ password = CINDER_PASS
+
+ .. end
+
+ Replace ``CINDER_PASS`` with the password you chose for the
+ ``cinder`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
+
+ .. end
+
+ Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
+ of the management network interface on your storage node,
+ typically 10.0.0.41 for the first node in the
+ :ref:`example architecture `.
+
+
+* In the ``[lvm]`` section, configure the LVM back end with the
+ LVM driver, ``cinder-volumes`` volume group, iSCSI protocol,
+ and appropriate iSCSI service:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [lvm]
+ # ...
+ volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
+ volume_group = cinder-volumes
+ iscsi_protocol = iscsi
+ iscsi_helper = tgtadm
+
+ .. end
+
+
+
+ * In the ``[DEFAULT]`` section, enable the LVM back end:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ enabled_backends = lvm
+
+ .. end
+
+ .. note::
+
+ Back-end names are arbitrary. As an example, this guide
+ uses the name of the driver as the name of the back end.
+
+ * In the ``[DEFAULT]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ glance_api_servers = http://controller:9292
+
+ .. end
+
+ * In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/cinder/tmp
+
+ .. end
+
+
+3. Create the ``/etc/tgt/conf.d/cinder.conf`` file
+ with the following data:
+
+ .. code-block:: shell
+
+ include /var/lib/cinder/volumes/*
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+* Start the Block Storage volume service including its dependencies
+ and configure them to start when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-cinder-volume.service tgtd.service
+ # systemctl start openstack-cinder-volume.service tgtd.service
+
+ .. end
+
+
+
diff --git a/doc/install-guide/source/cinder-storage-install-rdo.rst b/doc/install-guide/source/cinder-storage-install-rdo.rst
new file mode 100644
index 0000000000..f79f7c6cc8
--- /dev/null
+++ b/doc/install-guide/source/cinder-storage-install-rdo.rst
@@ -0,0 +1,300 @@
+Install and configure a storage node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure storage nodes
+for the Block Storage service. For simplicity, this configuration
+references one storage node with an empty local block storage device.
+The instructions use ``/dev/sdb``, but you can substitute a different
+value for your particular node.
+
+The service provisions logical volumes on this device using the
+:term:`LVM ` driver and provides them
+to instances via :term:`iSCSI ` transport.
+You can follow these instructions with minor modifications to horizontally
+scale your environment with additional storage nodes.
+
+Prerequisites
+-------------
+
+Before you install and configure the Block Storage service on the
+storage node, you must prepare the storage device.
+
+.. note::
+
+ Perform these steps on the storage node.
+
+#. Install the supporting utility packages:
+
+
+
+* Install the LVM packages:
+
+ .. code-block:: console
+
+ # yum install lvm2
+
+ .. end
+
+* Start the LVM metadata service and configure it to start when the
+ system boots:
+
+ .. code-block:: console
+
+ # systemctl enable lvm2-lvmetad.service
+ # systemctl start lvm2-lvmetad.service
+
+ .. end
+
+
+
+ .. note::
+
+ Some distributions include LVM by default.
+
+#. Create the LVM physical volume ``/dev/sdb``:
+
+ .. code-block:: console
+
+ # pvcreate /dev/sdb
+
+ Physical volume "/dev/sdb" successfully created
+
+ .. end
+
+#. Create the LVM volume group ``cinder-volumes``:
+
+ .. code-block:: console
+
+ # vgcreate cinder-volumes /dev/sdb
+
+ Volume group "cinder-volumes" successfully created
+
+ .. end
+
+ The Block Storage service creates logical volumes in this volume group.
+
+#. Only instances can access Block Storage volumes. However, the
+ underlying operating system manages the devices associated with
+ the volumes. By default, the LVM volume scanning tool scans the
+ ``/dev`` directory for block storage devices that
+ contain volumes. If projects use LVM on their volumes, the scanning
+ tool detects these volumes and attempts to cache them which can cause
+ a variety of problems with both the underlying operating system
+ and project volumes. You must reconfigure LVM to scan only the devices
+ that contain the ``cinder-volumes`` volume group. Edit the
+ ``/etc/lvm/lvm.conf`` file and complete the following actions:
+
+ * In the ``devices`` section, add a filter that accepts the
+ ``/dev/sdb`` device and rejects all other devices:
+
+ .. path /etc/lvm/lvm.conf
+ .. code-block:: none
+
+ devices {
+ ...
+ filter = [ "a/sdb/", "r/.*/"]
+
+ .. end
+
+ Each item in the filter array begins with ``a`` for **accept** or
+ ``r`` for **reject** and includes a regular expression for the
+ device name. The array must end with ``r/.*/`` to reject any
+ remaining devices. You can use the :command:`vgs -vvvv` command
+ to test filters.
+
+ .. warning::
+
+ If your storage nodes use LVM on the operating system disk, you
+ must also add the associated device to the filter. For example,
+ if the ``/dev/sda`` device contains the operating system:
+
+ .. ignore_path /etc/lvm/lvm.conf
+ .. code-block:: ini
+
+ filter = [ "a/sda/", "a/sdb/", "r/.*/"]
+
+ .. end
+
+ Similarly, if your compute nodes use LVM on the operating
+ system disk, you must also modify the filter in the
+ ``/etc/lvm/lvm.conf`` file on those nodes to include only
+ the operating system disk. For example, if the ``/dev/sda``
+ device contains the operating system:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: ini
+
+ filter = [ "a/sda/", "r/.*/"]
+
+ .. end
+
+Install and configure components
+--------------------------------
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # yum install openstack-cinder targetcli python-keystone
+
+ .. end
+
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file
+ and complete the following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with the password you chose for
+ the Block Storage database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for
+ the ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = cinder
+ password = CINDER_PASS
+
+ .. end
+
+ Replace ``CINDER_PASS`` with the password you chose for the
+ ``cinder`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
+
+ .. end
+
+ Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
+ of the management network interface on your storage node,
+ typically 10.0.0.41 for the first node in the
+ :ref:`example architecture `.
+
+
+
+* In the ``[lvm]`` section, configure the LVM back end with the
+ LVM driver, ``cinder-volumes`` volume group, iSCSI protocol,
+ and appropriate iSCSI service. If the ``[lvm]`` section does not exist,
+ create it:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [lvm]
+ volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
+ volume_group = cinder-volumes
+ iscsi_protocol = iscsi
+ iscsi_helper = lioadm
+
+ .. end
+
+
+ * In the ``[DEFAULT]`` section, enable the LVM back end:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ enabled_backends = lvm
+
+ .. end
+
+ .. note::
+
+ Back-end names are arbitrary. As an example, this guide
+ uses the name of the driver as the name of the back end.
+
+ * In the ``[DEFAULT]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ glance_api_servers = http://controller:9292
+
+ .. end
+
+ * In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/cinder/tmp
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+
+* Start the Block Storage volume service including its dependencies
+ and configure them to start when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-cinder-volume.service target.service
+ # systemctl start openstack-cinder-volume.service target.service
+
+ .. end
+
+
diff --git a/doc/install-guide/source/cinder-storage-install-ubuntu.rst b/doc/install-guide/source/cinder-storage-install-ubuntu.rst
new file mode 100644
index 0000000000..783d86f740
--- /dev/null
+++ b/doc/install-guide/source/cinder-storage-install-ubuntu.rst
@@ -0,0 +1,287 @@
+Install and configure a storage node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure storage nodes
+for the Block Storage service. For simplicity, this configuration
+references one storage node with an empty local block storage device.
+The instructions use ``/dev/sdb``, but you can substitute a different
+value for your particular node.
+
+The service provisions logical volumes on this device using the
+:term:`LVM ` driver and provides them
+to instances via :term:`iSCSI ` transport.
+You can follow these instructions with minor modifications to horizontally
+scale your environment with additional storage nodes.
+
+Prerequisites
+-------------
+
+Before you install and configure the Block Storage service on the
+storage node, you must prepare the storage device.
+
+.. note::
+
+ Perform these steps on the storage node.
+
+#. Install the supporting utility packages:
+
+
+
+
+.. code-block:: console
+
+ # apt install lvm2
+
+.. end
+
+
+ .. note::
+
+ Some distributions include LVM by default.
+
+#. Create the LVM physical volume ``/dev/sdb``:
+
+ .. code-block:: console
+
+ # pvcreate /dev/sdb
+
+ Physical volume "/dev/sdb" successfully created
+
+ .. end
+
+#. Create the LVM volume group ``cinder-volumes``:
+
+ .. code-block:: console
+
+ # vgcreate cinder-volumes /dev/sdb
+
+ Volume group "cinder-volumes" successfully created
+
+ .. end
+
+ The Block Storage service creates logical volumes in this volume group.
+
+#. Only instances can access Block Storage volumes. However, the
+ underlying operating system manages the devices associated with
+ the volumes. By default, the LVM volume scanning tool scans the
+ ``/dev`` directory for block storage devices that
+ contain volumes. If projects use LVM on their volumes, the scanning
+ tool detects these volumes and attempts to cache them which can cause
+ a variety of problems with both the underlying operating system
+ and project volumes. You must reconfigure LVM to scan only the devices
+ that contain the ``cinder-volumes`` volume group. Edit the
+ ``/etc/lvm/lvm.conf`` file and complete the following actions:
+
+ * In the ``devices`` section, add a filter that accepts the
+ ``/dev/sdb`` device and rejects all other devices:
+
+ .. path /etc/lvm/lvm.conf
+ .. code-block:: none
+
+ devices {
+ ...
+ filter = [ "a/sdb/", "r/.*/"]
+
+ .. end
+
+ Each item in the filter array begins with ``a`` for **accept** or
+ ``r`` for **reject** and includes a regular expression for the
+ device name. The array must end with ``r/.*/`` to reject any
+ remaining devices. You can use the :command:`vgs -vvvv` command
+ to test filters.
+
+ .. warning::
+
+ If your storage nodes use LVM on the operating system disk, you
+ must also add the associated device to the filter. For example,
+ if the ``/dev/sda`` device contains the operating system:
+
+ .. ignore_path /etc/lvm/lvm.conf
+ .. code-block:: ini
+
+ filter = [ "a/sda/", "a/sdb/", "r/.*/"]
+
+ .. end
+
+ Similarly, if your compute nodes use LVM on the operating
+ system disk, you must also modify the filter in the
+ ``/etc/lvm/lvm.conf`` file on those nodes to include only
+ the operating system disk. For example, if the ``/dev/sda``
+ device contains the operating system:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: ini
+
+ filter = [ "a/sda/", "r/.*/"]
+
+ .. end
+
+Install and configure components
+--------------------------------
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install cinder-volume
+
+ .. end
+
+
+2. Edit the ``/etc/cinder/cinder.conf`` file
+ and complete the following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
+
+ .. end
+
+ Replace ``CINDER_DBPASS`` with the password you chose for
+ the Block Storage database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for
+ the ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = cinder
+ password = CINDER_PASS
+
+ .. end
+
+ Replace ``CINDER_PASS`` with the password you chose for the
+ ``cinder`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
+
+ .. end
+
+ Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
+ of the management network interface on your storage node,
+ typically 10.0.0.41 for the first node in the
+ :ref:`example architecture `.
+
+
+* In the ``[lvm]`` section, configure the LVM back end with the
+ LVM driver, ``cinder-volumes`` volume group, iSCSI protocol,
+ and appropriate iSCSI service:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [lvm]
+ # ...
+ volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
+ volume_group = cinder-volumes
+ iscsi_protocol = iscsi
+ iscsi_helper = tgtadm
+
+ .. end
+
+
+
+ * In the ``[DEFAULT]`` section, enable the LVM back end:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ enabled_backends = lvm
+
+ .. end
+
+ .. note::
+
+ Back-end names are arbitrary. As an example, this guide
+ uses the name of the driver as the name of the back end.
+
+ * In the ``[DEFAULT]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ glance_api_servers = http://controller:9292
+
+ .. end
+
+ * In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/cinder/cinder.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/cinder/tmp
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+
+
+#. Restart the Block Storage volume service including its dependencies:
+
+ .. code-block:: console
+
+ # service tgt restart
+ # service cinder-volume restart
+
+ .. end
+
diff --git a/doc/install-guide/source/cinder-storage-install.rst b/doc/install-guide/source/cinder-storage-install.rst
index 344f944bfb..539c25ed8b 100644
--- a/doc/install-guide/source/cinder-storage-install.rst
+++ b/doc/install-guide/source/cinder-storage-install.rst
@@ -3,415 +3,7 @@
Install and configure a storage node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This section describes how to install and configure storage nodes
-for the Block Storage service. For simplicity, this configuration
-references one storage node with an empty local block storage device.
-The instructions use ``/dev/sdb``, but you can substitute a different
-value for your particular node.
+.. toctree::
+ :glob:
-The service provisions logical volumes on this device using the
-:term:`LVM ` driver and provides them
-to instances via :term:`iSCSI ` transport.
-You can follow these instructions with minor modifications to horizontally
-scale your environment with additional storage nodes.
-
-Prerequisites
--------------
-
-Before you install and configure the Block Storage service on the
-storage node, you must prepare the storage device.
-
-.. note::
-
- Perform these steps on the storage node.
-
-#. Install the supporting utility packages:
-
- .. only:: obs
-
- * Install the LVM packages:
-
- .. code-block:: console
-
- # zypper install lvm2
-
- .. end
-
- * (Optional) If you intend to use non-raw image types such as QCOW2
- and VMDK, install the QEMU package:
-
- .. code-block:: console
-
- # zypper install qemu
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- * Install the LVM packages:
-
- .. code-block:: console
-
- # yum install lvm2
-
- .. end
-
- * Start the LVM metadata service and configure it to start when the
- system boots:
-
- .. code-block:: console
-
- # systemctl enable lvm2-lvmetad.service
- # systemctl start lvm2-lvmetad.service
-
- .. end
-
- .. endonly
-
- .. only:: ubuntu
-
- .. code-block:: console
-
- # apt install lvm2
-
- .. end
-
- .. endonly
-
- .. note::
-
- Some distributions include LVM by default.
-
-#. Create the LVM physical volume ``/dev/sdb``:
-
- .. code-block:: console
-
- # pvcreate /dev/sdb
-
- Physical volume "/dev/sdb" successfully created
-
- .. end
-
-#. Create the LVM volume group ``cinder-volumes``:
-
- .. code-block:: console
-
- # vgcreate cinder-volumes /dev/sdb
-
- Volume group "cinder-volumes" successfully created
-
- .. end
-
- The Block Storage service creates logical volumes in this volume group.
-
-#. Only instances can access Block Storage volumes. However, the
- underlying operating system manages the devices associated with
- the volumes. By default, the LVM volume scanning tool scans the
- ``/dev`` directory for block storage devices that
- contain volumes. If projects use LVM on their volumes, the scanning
- tool detects these volumes and attempts to cache them which can cause
- a variety of problems with both the underlying operating system
- and project volumes. You must reconfigure LVM to scan only the devices
- that contain the ``cinder-volumes`` volume group. Edit the
- ``/etc/lvm/lvm.conf`` file and complete the following actions:
-
- * In the ``devices`` section, add a filter that accepts the
- ``/dev/sdb`` device and rejects all other devices:
-
- .. path /etc/lvm/lvm.conf
- .. code-block:: none
-
- devices {
- ...
- filter = [ "a/sdb/", "r/.*/"]
-
- .. end
-
- Each item in the filter array begins with ``a`` for **accept** or
- ``r`` for **reject** and includes a regular expression for the
- device name. The array must end with ``r/.*/`` to reject any
- remaining devices. You can use the :command:`vgs -vvvv` command
- to test filters.
-
- .. warning::
-
- If your storage nodes use LVM on the operating system disk, you
- must also add the associated device to the filter. For example,
- if the ``/dev/sda`` device contains the operating system:
-
- .. ignore_path /etc/lvm/lvm.conf
- .. code-block:: ini
-
- filter = [ "a/sda/", "a/sdb/", "r/.*/"]
-
- .. end
-
- Similarly, if your compute nodes use LVM on the operating
- system disk, you must also modify the filter in the
- ``/etc/lvm/lvm.conf`` file on those nodes to include only
- the operating system disk. For example, if the ``/dev/sda``
- device contains the operating system:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: ini
-
- filter = [ "a/sda/", "r/.*/"]
-
- .. end
-
-Install and configure components
---------------------------------
-
-.. only:: obs
-
- #. Install the packages:
-
- .. code-block:: console
-
- # zypper install openstack-cinder-volume tgt
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- #. Install the packages:
-
- .. code-block:: console
-
- # yum install openstack-cinder targetcli python-keystone
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Install the packages:
-
- .. code-block:: console
-
- # apt install cinder-volume
-
- .. end
-
-.. endonly
-
-2. Edit the ``/etc/cinder/cinder.conf`` file
- and complete the following actions:
-
- * In the ``[database]`` section, configure database access:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [database]
- # ...
- connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
-
- .. end
-
- Replace ``CINDER_DBPASS`` with the password you chose for
- the Block Storage database.
-
- * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
- message queue access:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- transport_url = rabbit://openstack:RABBIT_PASS@controller
-
- .. end
-
- Replace ``RABBIT_PASS`` with the password you chose for
- the ``openstack`` account in ``RabbitMQ``.
-
- * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections,
- configure Identity service access:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- auth_strategy = keystone
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = cinder
- password = CINDER_PASS
-
- .. end
-
- Replace ``CINDER_PASS`` with the password you chose for the
- ``cinder`` user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
- * In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
-
- .. end
-
- Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
- of the management network interface on your storage node,
- typically 10.0.0.41 for the first node in the
- :ref:`example architecture `.
-
- .. only:: obs or ubuntu
-
- * In the ``[lvm]`` section, configure the LVM back end with the
- LVM driver, ``cinder-volumes`` volume group, iSCSI protocol,
- and appropriate iSCSI service:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [lvm]
- # ...
- volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_group = cinder-volumes
- iscsi_protocol = iscsi
- iscsi_helper = tgtadm
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- * In the ``[lvm]`` section, configure the LVM back end with the
- LVM driver, ``cinder-volumes`` volume group, iSCSI protocol,
- and appropriate iSCSI service. If the ``[lvm]`` section does not exist,
- create it:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [lvm]
- volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_group = cinder-volumes
- iscsi_protocol = iscsi
- iscsi_helper = lioadm
-
- .. end
-
- .. endonly
-
- * In the ``[DEFAULT]`` section, enable the LVM back end:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- enabled_backends = lvm
-
- .. end
-
- .. note::
-
- Back-end names are arbitrary. As an example, this guide
- uses the name of the driver as the name of the back end.
-
- * In the ``[DEFAULT]`` section, configure the location of the
- Image service API:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- glance_api_servers = http://controller:9292
-
- .. end
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/cinder/cinder.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/lib/cinder/tmp
-
- .. end
-
-.. only:: obs
-
- 3. Create the ``/etc/tgt/conf.d/cinder.conf`` file
- with the following data:
-
- .. code-block:: shell
-
- include /var/lib/cinder/volumes/*
-
- .. end
-
-.. endonly
-
-Finalize installation
----------------------
-
-.. only:: obs
-
- * Start the Block Storage volume service including its dependencies
- and configure them to start when the system boots:
-
- .. code-block:: console
-
- # systemctl enable openstack-cinder-volume.service tgtd.service
- # systemctl start openstack-cinder-volume.service tgtd.service
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- * Start the Block Storage volume service including its dependencies
- and configure them to start when the system boots:
-
- .. code-block:: console
-
- # systemctl enable openstack-cinder-volume.service target.service
- # systemctl start openstack-cinder-volume.service target.service
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Restart the Block Storage volume service including its dependencies:
-
- .. code-block:: console
-
- # service tgt restart
- # service cinder-volume restart
-
- .. end
-
-.. endonly
+ cinder-storage-install-*
diff --git a/doc/install-guide/source/environment-debian.rst b/doc/install-guide/source/environment-debian.rst
new file mode 100644
index 0000000000..2b7f561a0c
--- /dev/null
+++ b/doc/install-guide/source/environment-debian.rst
@@ -0,0 +1,76 @@
+===========
+Environment
+===========
+
+This section explains how to configure the controller node and one compute
+node using the example architecture.
+
+Although most environments include Identity, Image service, Compute, at least
+one networking service, and the Dashboard, the Object Storage service can
+operate independently. If your use case only involves Object Storage, you can
+skip to `Object Storage Installation Guide
+`_
+after configuring the appropriate nodes for it.
+
+You must use an account with administrative privileges to configure each node.
+Either run the commands as the ``root`` user or configure the ``sudo``
+utility.
+
+
+For best performance, we recommend that your environment meets or exceeds
+the hardware requirements in :ref:`figure-hwreqs`.
+
+The following minimum requirements should support a proof-of-concept
+environment with core services and several :term:`CirrOS` instances:
+
+* Controller Node: 1 processor, 4 GB memory, and 5 GB storage
+
+* Compute Node: 1 processor, 2 GB memory, and 10 GB storage
+
+As the number of OpenStack services and virtual machines increase, so do the
+hardware requirements for the best performance. If performance degrades after
+enabling additional services or virtual machines, consider adding hardware
+resources to your environment.
+
+To minimize clutter and provide more resources for OpenStack, we recommend
+a minimal installation of your Linux distribution. Also, you must install a
+64-bit version of your distribution on each node.
+
+A single disk partition on each node works for most basic installations.
+However, you should consider :term:`Logical Volume Manager (LVM)` for
+installations with optional services such as Block Storage.
+
+For first-time installation and testing purposes, many users select to build
+each host as a :term:`virtual machine (VM)`. The primary benefits of VMs
+include the following:
+
+* One physical server can support multiple nodes, each with almost any
+ number of network interfaces.
+
+* Ability to take periodic "snap shots" throughout the installation
+ process and "roll back" to a working configuration in the event of a
+ problem.
+
+However, VMs will reduce performance of your instances, particularly if
+your hypervisor and/or processor lacks support for hardware acceleration
+of nested VMs.
+
+.. note::
+
+ If you choose to install on VMs, make sure your hypervisor provides
+ a way to disable MAC address filtering on the provider network
+ interface.
+
+For more information about system requirements, see the `OpenStack
+Operations Guide `_.
+
+.. toctree::
+ :maxdepth: 1
+
+ environment-security.rst
+ environment-networking.rst
+ environment-ntp.rst
+ environment-packages.rst
+ environment-sql-database.rst
+ environment-messaging.rst
+ environment-memcached.rst
diff --git a/doc/install-guide/source/environment-memcached-debian.rst b/doc/install-guide/source/environment-memcached-debian.rst
new file mode 100644
index 0000000000..588c1e3182
--- /dev/null
+++ b/doc/install-guide/source/environment-memcached-debian.rst
@@ -0,0 +1,54 @@
+Memcached
+~~~~~~~~~
+
+The Identity service authentication mechanism for services uses Memcached
+to cache tokens. The memcached service typically runs on the controller
+node. For production deployments, we recommend enabling a combination of
+firewalling, authentication, and encryption to secure it.
+
+Install and configure components
+--------------------------------
+
+#. Install the packages:
+
+
+.. code-block:: console
+
+ # apt install memcached python-memcache
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/memcached.conf`` file and configure the
+ service to use the management IP address of the controller node.
+ This is to enable access by other nodes via the management network:
+
+ .. code-block:: none
+
+ -l 10.0.0.11
+
+ .. end
+
+ .. note::
+
+ Change the existing line that had ``-l 127.0.0.1``.
+
+
+
+
+Finalize installation
+---------------------
+
+
+* Restart the Memcached service:
+
+ .. code-block:: console
+
+ # service memcached restart
+
+ .. end
+
+
diff --git a/doc/install-guide/source/environment-memcached-obs.rst b/doc/install-guide/source/environment-memcached-obs.rst
new file mode 100644
index 0000000000..55c93c2842
--- /dev/null
+++ b/doc/install-guide/source/environment-memcached-obs.rst
@@ -0,0 +1,59 @@
+Memcached
+~~~~~~~~~
+
+The Identity service authentication mechanism for services uses Memcached
+to cache tokens. The memcached service typically runs on the controller
+node. For production deployments, we recommend enabling a combination of
+firewalling, authentication, and encryption to secure it.
+
+Install and configure components
+--------------------------------
+
+#. Install the packages:
+
+
+
+
+.. code-block:: console
+
+ # zypper install memcached python-python-memcached
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/sysconfig/memcached`` file and complete the
+ following actions:
+
+ * Configure the service to use the management IP address of the
+ controller node. This is to enable access by other nodes via
+ the management network:
+
+ .. code-block:: none
+
+ MEMCACHED_PARAMS="-l 127.0.0.1"
+
+ .. end
+
+ .. note::
+
+ Change the existing line ``MEMCACHED_PARAMS="-l 127.0.0.1,::1"``.
+
+
+Finalize installation
+---------------------
+
+
+
+* Start the Memcached service and configure it to start when the system
+ boots:
+
+ .. code-block:: console
+
+ # systemctl enable memcached.service
+ # systemctl start memcached.service
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-memcached-rdo.rst b/doc/install-guide/source/environment-memcached-rdo.rst
new file mode 100644
index 0000000000..9bea64542e
--- /dev/null
+++ b/doc/install-guide/source/environment-memcached-rdo.rst
@@ -0,0 +1,59 @@
+Memcached
+~~~~~~~~~
+
+The Identity service authentication mechanism for services uses Memcached
+to cache tokens. The memcached service typically runs on the controller
+node. For production deployments, we recommend enabling a combination of
+firewalling, authentication, and encryption to secure it.
+
+Install and configure components
+--------------------------------
+
+#. Install the packages:
+
+
+
+.. code-block:: console
+
+ # yum install memcached python-memcached
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/sysconfig/memcached`` file and complete the
+ following actions:
+
+ * Configure the service to use the management IP address of the
+ controller node. This is to enable access by other nodes via
+ the management network:
+
+ .. code-block:: none
+
+ OPTIONS="-l 127.0.0.1,::1,controller"
+
+ .. end
+
+ .. note::
+
+ Change the existing line ``OPTIONS="-l 127.0.0.1,::1"``.
+
+
+
+Finalize installation
+---------------------
+
+
+
+* Start the Memcached service and configure it to start when the system
+ boots:
+
+ .. code-block:: console
+
+ # systemctl enable memcached.service
+ # systemctl start memcached.service
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-memcached-ubuntu.rst b/doc/install-guide/source/environment-memcached-ubuntu.rst
new file mode 100644
index 0000000000..588c1e3182
--- /dev/null
+++ b/doc/install-guide/source/environment-memcached-ubuntu.rst
@@ -0,0 +1,54 @@
+Memcached
+~~~~~~~~~
+
+The Identity service authentication mechanism for services uses Memcached
+to cache tokens. The memcached service typically runs on the controller
+node. For production deployments, we recommend enabling a combination of
+firewalling, authentication, and encryption to secure it.
+
+Install and configure components
+--------------------------------
+
+#. Install the packages:
+
+
+.. code-block:: console
+
+ # apt install memcached python-memcache
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/memcached.conf`` file and configure the
+ service to use the management IP address of the controller node.
+ This is to enable access by other nodes via the management network:
+
+ .. code-block:: none
+
+ -l 10.0.0.11
+
+ .. end
+
+ .. note::
+
+ Change the existing line that had ``-l 127.0.0.1``.
+
+
+
+
+Finalize installation
+---------------------
+
+
+* Restart the Memcached service:
+
+ .. code-block:: console
+
+ # service memcached restart
+
+ .. end
+
+
diff --git a/doc/install-guide/source/environment-memcached.rst b/doc/install-guide/source/environment-memcached.rst
index 80edfac066..7de38deea4 100644
--- a/doc/install-guide/source/environment-memcached.rst
+++ b/doc/install-guide/source/environment-memcached.rst
@@ -6,126 +6,7 @@ to cache tokens. The memcached service typically runs on the controller
node. For production deployments, we recommend enabling a combination of
firewalling, authentication, and encryption to secure it.
-Install and configure components
---------------------------------
+.. toctree::
+ :glob:
-#. Install the packages:
-
- .. only:: ubuntu or debian
-
- .. code-block:: console
-
- # apt install memcached python-memcache
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- .. code-block:: console
-
- # yum install memcached python-memcached
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- .. code-block:: console
-
- # zypper install memcached python-python-memcached
-
- .. end
-
- .. endonly
-
-.. only:: ubuntu or debian
-
- 2. Edit the ``/etc/memcached.conf`` file and configure the
- service to use the management IP address of the controller node.
- This is to enable access by other nodes via the management network:
-
- .. code-block:: none
-
- -l 10.0.0.11
-
- .. end
-
- .. note::
-
- Change the existing line that had ``-l 127.0.0.1``.
-
-.. endonly
-
-.. only:: rdo
-
- 2. Edit the ``/etc/sysconfig/memcached`` file and complete the
- following actions:
-
- * Configure the service to use the management IP address of the
- controller node. This is to enable access by other nodes via
- the management network:
-
- .. code-block:: none
-
- OPTIONS="-l 127.0.0.1,::1,controller"
-
- .. end
-
- .. note::
-
- Change the existing line ``OPTIONS="-l 127.0.0.1,::1"``.
-
-.. endonly
-
-.. only:: obs
-
- 2. Edit the ``/etc/sysconfig/memcached`` file and complete the
- following actions:
-
- * Configure the service to use the management IP address of the
- controller node. This is to enable access by other nodes via
- the management network:
-
- .. code-block:: none
-
- MEMCACHED_PARAMS="-l 127.0.0.1"
-
- .. end
-
- .. note::
-
- Change the existing line ``MEMCACHED_PARAMS="-l 127.0.0.1,::1"``.
-
-.. endonly
-
-Finalize installation
----------------------
-
-.. only:: ubuntu or debian
-
- * Restart the Memcached service:
-
- .. code-block:: console
-
- # service memcached restart
-
- .. end
-
-.. endonly
-
-.. only:: rdo or obs
-
- * Start the Memcached service and configure it to start when the system
- boots:
-
- .. code-block:: console
-
- # systemctl enable memcached.service
- # systemctl start memcached.service
-
- .. end
-
-.. endonly
+ environment-memcached-*
diff --git a/doc/install-guide/source/environment-messaging-debian.rst b/doc/install-guide/source/environment-messaging-debian.rst
new file mode 100644
index 0000000000..a72c1d74f5
--- /dev/null
+++ b/doc/install-guide/source/environment-messaging-debian.rst
@@ -0,0 +1,56 @@
+Message queue
+~~~~~~~~~~~~~
+
+OpenStack uses a :term:`message queue` to coordinate operations and
+status information among services. The message queue service typically
+runs on the controller node. OpenStack supports several message queue
+services including `RabbitMQ `__,
+`Qpid `__, and `ZeroMQ `__.
+However, most distributions that package OpenStack support a particular
+message queue service. This guide implements the RabbitMQ message queue
+service because most distributions support it. If you prefer to
+implement a different message queue service, consult the documentation
+associated with it.
+
+The message queue runs on the controller node.
+
+Install and configure components
+--------------------------------
+
+1. Install the package:
+
+
+.. code-block:: console
+
+ # apt install rabbitmq-server
+
+.. end
+
+
+
+
+
+
+2. Add the ``openstack`` user:
+
+ .. code-block:: console
+
+ # rabbitmqctl add_user openstack RABBIT_PASS
+
+ Creating user "openstack" ...
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with a suitable password.
+
+3. Permit configuration, write, and read access for the
+ ``openstack`` user:
+
+ .. code-block:: console
+
+ # rabbitmqctl set_permissions openstack ".*" ".*" ".*"
+
+ Setting permissions for user "openstack" in vhost "/" ...
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-messaging-obs.rst b/doc/install-guide/source/environment-messaging-obs.rst
new file mode 100644
index 0000000000..f8887f6215
--- /dev/null
+++ b/doc/install-guide/source/environment-messaging-obs.rst
@@ -0,0 +1,66 @@
+Message queue
+~~~~~~~~~~~~~
+
+OpenStack uses a :term:`message queue` to coordinate operations and
+status information among services. The message queue service typically
+runs on the controller node. OpenStack supports several message queue
+services including `RabbitMQ `__,
+`Qpid `__, and `ZeroMQ `__.
+However, most distributions that package OpenStack support a particular
+message queue service. This guide implements the RabbitMQ message queue
+service because most distributions support it. If you prefer to
+implement a different message queue service, consult the documentation
+associated with it.
+
+The message queue runs on the controller node.
+
+Install and configure components
+--------------------------------
+
+1. Install the package:
+
+
+
+
+.. code-block:: console
+
+ # zypper install rabbitmq-server
+
+.. end
+
+
+
+2. Start the message queue service and configure it to start when the
+ system boots:
+
+ .. code-block:: console
+
+ # systemctl enable rabbitmq-server.service
+ # systemctl start rabbitmq-server.service
+
+ .. end
+
+3. Add the ``openstack`` user:
+
+ .. code-block:: console
+
+ # rabbitmqctl add_user openstack RABBIT_PASS
+
+ Creating user "openstack" ...
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with a suitable password.
+
+4. Permit configuration, write, and read access for the
+ ``openstack`` user:
+
+ .. code-block:: console
+
+ # rabbitmqctl set_permissions openstack ".*" ".*" ".*"
+
+ Setting permissions for user "openstack" in vhost "/" ...
+
+ .. end
+
+
diff --git a/doc/install-guide/source/environment-messaging-rdo.rst b/doc/install-guide/source/environment-messaging-rdo.rst
new file mode 100644
index 0000000000..626f3d0492
--- /dev/null
+++ b/doc/install-guide/source/environment-messaging-rdo.rst
@@ -0,0 +1,66 @@
+Message queue
+~~~~~~~~~~~~~
+
+OpenStack uses a :term:`message queue` to coordinate operations and
+status information among services. The message queue service typically
+runs on the controller node. OpenStack supports several message queue
+services including `RabbitMQ `__,
+`Qpid `__, and `ZeroMQ `__.
+However, most distributions that package OpenStack support a particular
+message queue service. This guide implements the RabbitMQ message queue
+service because most distributions support it. If you prefer to
+implement a different message queue service, consult the documentation
+associated with it.
+
+The message queue runs on the controller node.
+
+Install and configure components
+--------------------------------
+
+1. Install the package:
+
+
+
+.. code-block:: console
+
+ # yum install rabbitmq-server
+
+.. end
+
+
+
+
+2. Start the message queue service and configure it to start when the
+ system boots:
+
+ .. code-block:: console
+
+ # systemctl enable rabbitmq-server.service
+ # systemctl start rabbitmq-server.service
+
+ .. end
+
+3. Add the ``openstack`` user:
+
+ .. code-block:: console
+
+ # rabbitmqctl add_user openstack RABBIT_PASS
+
+ Creating user "openstack" ...
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with a suitable password.
+
+4. Permit configuration, write, and read access for the
+ ``openstack`` user:
+
+ .. code-block:: console
+
+ # rabbitmqctl set_permissions openstack ".*" ".*" ".*"
+
+ Setting permissions for user "openstack" in vhost "/" ...
+
+ .. end
+
+
diff --git a/doc/install-guide/source/environment-messaging-ubuntu.rst b/doc/install-guide/source/environment-messaging-ubuntu.rst
new file mode 100644
index 0000000000..a72c1d74f5
--- /dev/null
+++ b/doc/install-guide/source/environment-messaging-ubuntu.rst
@@ -0,0 +1,56 @@
+Message queue
+~~~~~~~~~~~~~
+
+OpenStack uses a :term:`message queue` to coordinate operations and
+status information among services. The message queue service typically
+runs on the controller node. OpenStack supports several message queue
+services including `RabbitMQ `__,
+`Qpid `__, and `ZeroMQ `__.
+However, most distributions that package OpenStack support a particular
+message queue service. This guide implements the RabbitMQ message queue
+service because most distributions support it. If you prefer to
+implement a different message queue service, consult the documentation
+associated with it.
+
+The message queue runs on the controller node.
+
+Install and configure components
+--------------------------------
+
+1. Install the package:
+
+
+.. code-block:: console
+
+ # apt install rabbitmq-server
+
+.. end
+
+
+
+
+
+
+2. Add the ``openstack`` user:
+
+ .. code-block:: console
+
+ # rabbitmqctl add_user openstack RABBIT_PASS
+
+ Creating user "openstack" ...
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with a suitable password.
+
+3. Permit configuration, write, and read access for the
+ ``openstack`` user:
+
+ .. code-block:: console
+
+ # rabbitmqctl set_permissions openstack ".*" ".*" ".*"
+
+ Setting permissions for user "openstack" in vhost "/" ...
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-messaging.rst b/doc/install-guide/source/environment-messaging.rst
index e3ce51c9f4..ca5755d41e 100644
--- a/doc/install-guide/source/environment-messaging.rst
+++ b/doc/install-guide/source/environment-messaging.rst
@@ -14,101 +14,7 @@ associated with it.
The message queue runs on the controller node.
-Install and configure components
---------------------------------
+.. toctree::
+ :glob:
-1. Install the package:
-
- .. only:: ubuntu or debian
-
- .. code-block:: console
-
- # apt install rabbitmq-server
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- .. code-block:: console
-
- # yum install rabbitmq-server
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- .. code-block:: console
-
- # zypper install rabbitmq-server
-
- .. end
-
- .. endonly
-
-.. only:: rdo or obs
-
- 2. Start the message queue service and configure it to start when the
- system boots:
-
- .. code-block:: console
-
- # systemctl enable rabbitmq-server.service
- # systemctl start rabbitmq-server.service
-
- .. end
-
- 3. Add the ``openstack`` user:
-
- .. code-block:: console
-
- # rabbitmqctl add_user openstack RABBIT_PASS
-
- Creating user "openstack" ...
-
- .. end
-
- Replace ``RABBIT_PASS`` with a suitable password.
-
- 4. Permit configuration, write, and read access for the
- ``openstack`` user:
-
- .. code-block:: console
-
- # rabbitmqctl set_permissions openstack ".*" ".*" ".*"
-
- Setting permissions for user "openstack" in vhost "/" ...
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- 2. Add the ``openstack`` user:
-
- .. code-block:: console
-
- # rabbitmqctl add_user openstack RABBIT_PASS
-
- Creating user "openstack" ...
-
- .. end
-
- Replace ``RABBIT_PASS`` with a suitable password.
-
- 3. Permit configuration, write, and read access for the
- ``openstack`` user:
-
- .. code-block:: console
-
- # rabbitmqctl set_permissions openstack ".*" ".*" ".*"
-
- Setting permissions for user "openstack" in vhost "/" ...
-
- .. end
-
-.. endonly
+ environment-messaging-*
diff --git a/doc/install-guide/source/environment-networking-compute-debian.rst b/doc/install-guide/source/environment-networking-compute-debian.rst
new file mode 100644
index 0000000000..dd01fd5d62
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-compute-debian.rst
@@ -0,0 +1,50 @@
+Compute node
+~~~~~~~~~~~~
+
+Configure network interfaces
+----------------------------
+
+#. Configure the first interface as the management interface:
+
+ IP address: 10.0.0.31
+
+ Network mask: 255.255.255.0 (or /24)
+
+ Default gateway: 10.0.0.1
+
+ .. note::
+
+ Additional compute nodes should use 10.0.0.32, 10.0.0.33, and so on.
+
+#. The provider interface uses a special configuration without an IP
+ address assigned to it. Configure the second interface as the provider
+ interface:
+
+ Replace ``INTERFACE_NAME`` with the actual interface name. For example,
+ *eth1* or *ens224*.
+
+
+* Edit the ``/etc/network/interfaces`` file to contain the following:
+
+ .. path /etc/network/interfaces
+ .. code-block:: bash
+
+ # The provider network interface
+ auto INTERFACE_NAME
+ iface INTERFACE_NAME inet manual
+ up ip link set dev $IFACE up
+ down ip link set dev $IFACE down
+
+ .. end
+
+
+
+
+#. Reboot the system to activate the changes.
+
+Configure name resolution
+-------------------------
+
+#. Set the hostname of the node to ``compute1``.
+
+#. .. include:: shared/edit_hosts_file.txt
diff --git a/doc/install-guide/source/environment-networking-compute-obs.rst b/doc/install-guide/source/environment-networking-compute-obs.rst
new file mode 100644
index 0000000000..ac66fc32d7
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-compute-obs.rst
@@ -0,0 +1,48 @@
+Compute node
+~~~~~~~~~~~~
+
+Configure network interfaces
+----------------------------
+
+#. Configure the first interface as the management interface:
+
+ IP address: 10.0.0.31
+
+ Network mask: 255.255.255.0 (or /24)
+
+ Default gateway: 10.0.0.1
+
+ .. note::
+
+ Additional compute nodes should use 10.0.0.32, 10.0.0.33, and so on.
+
+#. The provider interface uses a special configuration without an IP
+ address assigned to it. Configure the second interface as the provider
+ interface:
+
+ Replace ``INTERFACE_NAME`` with the actual interface name. For example,
+ *eth1* or *ens224*.
+
+
+
+
+* Edit the ``/etc/sysconfig/network/ifcfg-INTERFACE_NAME`` file to
+ contain the following:
+
+ .. path /etc/sysconfig/network/ifcfg-INTERFACE_NAME
+ .. code-block:: bash
+
+ STARTMODE='auto'
+ BOOTPROTO='static'
+
+ .. end
+
+
+#. Reboot the system to activate the changes.
+
+Configure name resolution
+-------------------------
+
+#. Set the hostname of the node to ``compute1``.
+
+#. .. include:: shared/edit_hosts_file.txt
diff --git a/doc/install-guide/source/environment-networking-compute-rdo.rst b/doc/install-guide/source/environment-networking-compute-rdo.rst
new file mode 100644
index 0000000000..a7c1e6cbc2
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-compute-rdo.rst
@@ -0,0 +1,52 @@
+Compute node
+~~~~~~~~~~~~
+
+Configure network interfaces
+----------------------------
+
+#. Configure the first interface as the management interface:
+
+ IP address: 10.0.0.31
+
+ Network mask: 255.255.255.0 (or /24)
+
+ Default gateway: 10.0.0.1
+
+ .. note::
+
+ Additional compute nodes should use 10.0.0.32, 10.0.0.33, and so on.
+
+#. The provider interface uses a special configuration without an IP
+ address assigned to it. Configure the second interface as the provider
+ interface:
+
+ Replace ``INTERFACE_NAME`` with the actual interface name. For example,
+ *eth1* or *ens224*.
+
+
+
+* Edit the ``/etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME`` file
+ to contain the following:
+
+ Do not change the ``HWADDR`` and ``UUID`` keys.
+
+ .. path /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME
+ .. code-block:: bash
+
+ DEVICE=INTERFACE_NAME
+ TYPE=Ethernet
+ ONBOOT="yes"
+ BOOTPROTO="none"
+
+ .. end
+
+
+
+#. Reboot the system to activate the changes.
+
+Configure name resolution
+-------------------------
+
+#. Set the hostname of the node to ``compute1``.
+
+#. .. include:: shared/edit_hosts_file.txt
diff --git a/doc/install-guide/source/environment-networking-compute-ubuntu.rst b/doc/install-guide/source/environment-networking-compute-ubuntu.rst
new file mode 100644
index 0000000000..dd01fd5d62
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-compute-ubuntu.rst
@@ -0,0 +1,50 @@
+Compute node
+~~~~~~~~~~~~
+
+Configure network interfaces
+----------------------------
+
+#. Configure the first interface as the management interface:
+
+ IP address: 10.0.0.31
+
+ Network mask: 255.255.255.0 (or /24)
+
+ Default gateway: 10.0.0.1
+
+ .. note::
+
+ Additional compute nodes should use 10.0.0.32, 10.0.0.33, and so on.
+
+#. The provider interface uses a special configuration without an IP
+ address assigned to it. Configure the second interface as the provider
+ interface:
+
+ Replace ``INTERFACE_NAME`` with the actual interface name. For example,
+ *eth1* or *ens224*.
+
+
+* Edit the ``/etc/network/interfaces`` file to contain the following:
+
+ .. path /etc/network/interfaces
+ .. code-block:: bash
+
+ # The provider network interface
+ auto INTERFACE_NAME
+ iface INTERFACE_NAME inet manual
+ up ip link set dev $IFACE up
+ down ip link set dev $IFACE down
+
+ .. end
+
+
+
+
+#. Reboot the system to activate the changes.
+
+Configure name resolution
+-------------------------
+
+#. Set the hostname of the node to ``compute1``.
+
+#. .. include:: shared/edit_hosts_file.txt
diff --git a/doc/install-guide/source/environment-networking-compute.rst b/doc/install-guide/source/environment-networking-compute.rst
index c5f9ae89e6..d514d7546f 100644
--- a/doc/install-guide/source/environment-networking-compute.rst
+++ b/doc/install-guide/source/environment-networking-compute.rst
@@ -1,84 +1,7 @@
Compute node
~~~~~~~~~~~~
-Configure network interfaces
-----------------------------
+.. toctree::
+ :glob:
-#. Configure the first interface as the management interface:
-
- IP address: 10.0.0.31
-
- Network mask: 255.255.255.0 (or /24)
-
- Default gateway: 10.0.0.1
-
- .. note::
-
- Additional compute nodes should use 10.0.0.32, 10.0.0.33, and so on.
-
-#. The provider interface uses a special configuration without an IP
- address assigned to it. Configure the second interface as the provider
- interface:
-
- Replace ``INTERFACE_NAME`` with the actual interface name. For example,
- *eth1* or *ens224*.
-
- .. only:: ubuntu or debian
-
- * Edit the ``/etc/network/interfaces`` file to contain the following:
-
- .. path /etc/network/interfaces
- .. code-block:: bash
-
- # The provider network interface
- auto INTERFACE_NAME
- iface INTERFACE_NAME inet manual
- up ip link set dev $IFACE up
- down ip link set dev $IFACE down
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- * Edit the ``/etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME`` file
- to contain the following:
-
- Do not change the ``HWADDR`` and ``UUID`` keys.
-
- .. path /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME
- .. code-block:: bash
-
- DEVICE=INTERFACE_NAME
- TYPE=Ethernet
- ONBOOT="yes"
- BOOTPROTO="none"
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- * Edit the ``/etc/sysconfig/network/ifcfg-INTERFACE_NAME`` file to
- contain the following:
-
- .. path /etc/sysconfig/network/ifcfg-INTERFACE_NAME
- .. code-block:: bash
-
- STARTMODE='auto'
- BOOTPROTO='static'
-
- .. end
-
- .. endonly
-
-#. Reboot the system to activate the changes.
-
-Configure name resolution
--------------------------
-
-#. Set the hostname of the node to ``compute1``.
-
-#. .. include:: shared/edit_hosts_file.txt
+ environment-networking-compute-*
diff --git a/doc/install-guide/source/environment-networking-controller-debian.rst b/doc/install-guide/source/environment-networking-controller-debian.rst
new file mode 100644
index 0000000000..6b578edc96
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-controller-debian.rst
@@ -0,0 +1,46 @@
+Controller node
+~~~~~~~~~~~~~~~
+
+Configure network interfaces
+----------------------------
+
+#. Configure the first interface as the management interface:
+
+ IP address: 10.0.0.11
+
+ Network mask: 255.255.255.0 (or /24)
+
+ Default gateway: 10.0.0.1
+
+#. The provider interface uses a special configuration without an IP
+ address assigned to it. Configure the second interface as the provider
+ interface:
+
+ Replace ``INTERFACE_NAME`` with the actual interface name. For example,
+ *eth1* or *ens224*.
+
+
+* Edit the ``/etc/network/interfaces`` file to contain the following:
+
+ .. path /etc/network/interfaces
+ .. code-block:: bash
+
+ # The provider network interface
+ auto INTERFACE_NAME
+ iface INTERFACE_NAME inet manual
+ up ip link set dev $IFACE up
+ down ip link set dev $IFACE down
+
+ .. end
+
+
+
+
+#. Reboot the system to activate the changes.
+
+Configure name resolution
+-------------------------
+
+#. Set the hostname of the node to ``controller``.
+
+#. .. include:: shared/edit_hosts_file.txt
diff --git a/doc/install-guide/source/environment-networking-controller-obs.rst b/doc/install-guide/source/environment-networking-controller-obs.rst
new file mode 100644
index 0000000000..3118969ec1
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-controller-obs.rst
@@ -0,0 +1,44 @@
+Controller node
+~~~~~~~~~~~~~~~
+
+Configure network interfaces
+----------------------------
+
+#. Configure the first interface as the management interface:
+
+ IP address: 10.0.0.11
+
+ Network mask: 255.255.255.0 (or /24)
+
+ Default gateway: 10.0.0.1
+
+#. The provider interface uses a special configuration without an IP
+ address assigned to it. Configure the second interface as the provider
+ interface:
+
+ Replace ``INTERFACE_NAME`` with the actual interface name. For example,
+ *eth1* or *ens224*.
+
+
+
+
+* Edit the ``/etc/sysconfig/network/ifcfg-INTERFACE_NAME`` file to
+ contain the following:
+
+ .. path /etc/sysconfig/network/ifcfg-INTERFACE_NAME
+ .. code-block:: ini
+
+ STARTMODE='auto'
+ BOOTPROTO='static'
+
+ .. end
+
+
+#. Reboot the system to activate the changes.
+
+Configure name resolution
+-------------------------
+
+#. Set the hostname of the node to ``controller``.
+
+#. .. include:: shared/edit_hosts_file.txt
diff --git a/doc/install-guide/source/environment-networking-controller-rdo.rst b/doc/install-guide/source/environment-networking-controller-rdo.rst
new file mode 100644
index 0000000000..5f05c9adfd
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-controller-rdo.rst
@@ -0,0 +1,48 @@
+Controller node
+~~~~~~~~~~~~~~~
+
+Configure network interfaces
+----------------------------
+
+#. Configure the first interface as the management interface:
+
+ IP address: 10.0.0.11
+
+ Network mask: 255.255.255.0 (or /24)
+
+ Default gateway: 10.0.0.1
+
+#. The provider interface uses a special configuration without an IP
+ address assigned to it. Configure the second interface as the provider
+ interface:
+
+ Replace ``INTERFACE_NAME`` with the actual interface name. For example,
+ *eth1* or *ens224*.
+
+
+
+* Edit the ``/etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME`` file
+ to contain the following:
+
+ Do not change the ``HWADDR`` and ``UUID`` keys.
+
+ .. path /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME
+ .. code-block:: ini
+
+ DEVICE=INTERFACE_NAME
+ TYPE=Ethernet
+ ONBOOT="yes"
+ BOOTPROTO="none"
+
+ .. end
+
+
+
+#. Reboot the system to activate the changes.
+
+Configure name resolution
+-------------------------
+
+#. Set the hostname of the node to ``controller``.
+
+#. .. include:: shared/edit_hosts_file.txt
diff --git a/doc/install-guide/source/environment-networking-controller-ubuntu.rst b/doc/install-guide/source/environment-networking-controller-ubuntu.rst
new file mode 100644
index 0000000000..6b578edc96
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-controller-ubuntu.rst
@@ -0,0 +1,46 @@
+Controller node
+~~~~~~~~~~~~~~~
+
+Configure network interfaces
+----------------------------
+
+#. Configure the first interface as the management interface:
+
+ IP address: 10.0.0.11
+
+ Network mask: 255.255.255.0 (or /24)
+
+ Default gateway: 10.0.0.1
+
+#. The provider interface uses a special configuration without an IP
+ address assigned to it. Configure the second interface as the provider
+ interface:
+
+ Replace ``INTERFACE_NAME`` with the actual interface name. For example,
+ *eth1* or *ens224*.
+
+
+* Edit the ``/etc/network/interfaces`` file to contain the following:
+
+ .. path /etc/network/interfaces
+ .. code-block:: bash
+
+ # The provider network interface
+ auto INTERFACE_NAME
+ iface INTERFACE_NAME inet manual
+ up ip link set dev $IFACE up
+ down ip link set dev $IFACE down
+
+ .. end
+
+
+
+
+#. Reboot the system to activate the changes.
+
+Configure name resolution
+-------------------------
+
+#. Set the hostname of the node to ``controller``.
+
+#. .. include:: shared/edit_hosts_file.txt
diff --git a/doc/install-guide/source/environment-networking-controller.rst b/doc/install-guide/source/environment-networking-controller.rst
index eb4cb0181e..a9e95add5d 100644
--- a/doc/install-guide/source/environment-networking-controller.rst
+++ b/doc/install-guide/source/environment-networking-controller.rst
@@ -1,80 +1,7 @@
Controller node
~~~~~~~~~~~~~~~
-Configure network interfaces
-----------------------------
+.. toctree::
+ :glob:
-#. Configure the first interface as the management interface:
-
- IP address: 10.0.0.11
-
- Network mask: 255.255.255.0 (or /24)
-
- Default gateway: 10.0.0.1
-
-#. The provider interface uses a special configuration without an IP
- address assigned to it. Configure the second interface as the provider
- interface:
-
- Replace ``INTERFACE_NAME`` with the actual interface name. For example,
- *eth1* or *ens224*.
-
- .. only:: ubuntu or debian
-
- * Edit the ``/etc/network/interfaces`` file to contain the following:
-
- .. path /etc/network/interfaces
- .. code-block:: bash
-
- # The provider network interface
- auto INTERFACE_NAME
- iface INTERFACE_NAME inet manual
- up ip link set dev $IFACE up
- down ip link set dev $IFACE down
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- * Edit the ``/etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME`` file
- to contain the following:
-
- Do not change the ``HWADDR`` and ``UUID`` keys.
-
- .. path /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME
- .. code-block:: ini
-
- DEVICE=INTERFACE_NAME
- TYPE=Ethernet
- ONBOOT="yes"
- BOOTPROTO="none"
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- * Edit the ``/etc/sysconfig/network/ifcfg-INTERFACE_NAME`` file to
- contain the following:
-
- .. path /etc/sysconfig/network/ifcfg-INTERFACE_NAME
- .. code-block:: ini
-
- STARTMODE='auto'
- BOOTPROTO='static'
-
- .. end
-
- .. endonly
-
-#. Reboot the system to activate the changes.
-
-Configure name resolution
--------------------------
-
-#. Set the hostname of the node to ``controller``.
-
-#. .. include:: shared/edit_hosts_file.txt
+ environment-networking-controller-*
diff --git a/doc/install-guide/source/environment-networking-debian.rst b/doc/install-guide/source/environment-networking-debian.rst
new file mode 100644
index 0000000000..77c4403244
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-debian.rst
@@ -0,0 +1,91 @@
+Host networking
+~~~~~~~~~~~~~~~
+
+
+
+After installing the operating system on each node for the architecture
+that you choose to deploy, you must configure the network interfaces. We
+recommend that you disable any automated network management tools and
+manually edit the appropriate configuration files for your distribution.
+For more information on how to configure networking on your
+distribution, see the `documentation
+`__ .
+
+
+
+
+All nodes require Internet access for administrative purposes such as package
+installation, security updates, :term:`DNS `, and
+:term:`NTP `. In most cases, nodes should obtain
+Internet access through the management network interface.
+To highlight the importance of network separation, the example architectures
+use `private address space `__ for the
+management network and assume that the physical network infrastructure
+provides Internet access via :term:`NAT `
+or other methods. The example architectures use routable IP address space for
+the provider (external) network and assume that the physical network
+infrastructure provides direct Internet access.
+
+In the provider networks architecture, all instances attach directly
+to the provider network. In the self-service (private) networks architecture,
+instances can attach to a self-service or provider network. Self-service
+networks can reside entirely within OpenStack or provide some level of external
+network access using :term:`NAT ` through
+the provider network.
+
+.. _figure-networklayout:
+
+.. figure:: figures/networklayout.png
+ :alt: Network layout
+
+The example architectures assume use of the following networks:
+
+* Management on 10.0.0.0/24 with gateway 10.0.0.1
+
+ This network requires a gateway to provide Internet access to all
+ nodes for administrative purposes such as package installation,
+ security updates, :term:`DNS `, and
+ :term:`NTP `.
+
+* Provider on 203.0.113.0/24 with gateway 203.0.113.1
+
+ This network requires a gateway to provide Internet access to
+ instances in your OpenStack environment.
+
+You can modify these ranges and gateways to work with your particular
+network infrastructure.
+
+Network interface names vary by distribution. Traditionally,
+interfaces use ``eth`` followed by a sequential number. To cover all
+variations, this guide refers to the first interface as the
+interface with the lowest number and the second interface as the
+interface with the highest number.
+
+Unless you intend to use the exact configuration provided in this
+example architecture, you must modify the networks in this procedure to
+match your environment. Each node must resolve the other nodes by
+name in addition to IP address. For example, the ``controller`` name must
+resolve to ``10.0.0.11``, the IP address of the management interface on
+the controller node.
+
+.. warning::
+
+ Reconfiguring network interfaces will interrupt network
+ connectivity. We recommend using a local terminal session for these
+ procedures.
+
+.. note::
+
+ Your distribution does not enable a restrictive :term:`firewall` by
+ default. For more information about securing your environment,
+ refer to the `OpenStack Security Guide
+ `_.
+
+
+.. toctree::
+ :maxdepth: 1
+
+ environment-networking-controller.rst
+ environment-networking-compute.rst
+ environment-networking-storage-cinder.rst
+ environment-networking-verify.rst
diff --git a/doc/install-guide/source/environment-networking-obs.rst b/doc/install-guide/source/environment-networking-obs.rst
new file mode 100644
index 0000000000..9931877950
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-obs.rst
@@ -0,0 +1,96 @@
+Host networking
+~~~~~~~~~~~~~~~
+
+
+
+
+
+After installing the operating system on each node for the architecture
+that you choose to deploy, you must configure the network interfaces. We
+recommend that you disable any automated network management tools and
+manually edit the appropriate configuration files for your distribution.
+For more information on how to configure networking on your
+distribution, see the `SLES 12
+`__
+or `openSUSE
+`__
+documentation.
+
+
+All nodes require Internet access for administrative purposes such as package
+installation, security updates, :term:`DNS `, and
+:term:`NTP `. In most cases, nodes should obtain
+Internet access through the management network interface.
+To highlight the importance of network separation, the example architectures
+use `private address space `__ for the
+management network and assume that the physical network infrastructure
+provides Internet access via :term:`NAT `
+or other methods. The example architectures use routable IP address space for
+the provider (external) network and assume that the physical network
+infrastructure provides direct Internet access.
+
+In the provider networks architecture, all instances attach directly
+to the provider network. In the self-service (private) networks architecture,
+instances can attach to a self-service or provider network. Self-service
+networks can reside entirely within OpenStack or provide some level of external
+network access using :term:`NAT ` through
+the provider network.
+
+.. _figure-networklayout:
+
+.. figure:: figures/networklayout.png
+ :alt: Network layout
+
+The example architectures assume use of the following networks:
+
+* Management on 10.0.0.0/24 with gateway 10.0.0.1
+
+ This network requires a gateway to provide Internet access to all
+ nodes for administrative purposes such as package installation,
+ security updates, :term:`DNS `, and
+ :term:`NTP `.
+
+* Provider on 203.0.113.0/24 with gateway 203.0.113.1
+
+ This network requires a gateway to provide Internet access to
+ instances in your OpenStack environment.
+
+You can modify these ranges and gateways to work with your particular
+network infrastructure.
+
+Network interface names vary by distribution. Traditionally,
+interfaces use ``eth`` followed by a sequential number. To cover all
+variations, this guide refers to the first interface as the
+interface with the lowest number and the second interface as the
+interface with the highest number.
+
+Unless you intend to use the exact configuration provided in this
+example architecture, you must modify the networks in this procedure to
+match your environment. Each node must resolve the other nodes by
+name in addition to IP address. For example, the ``controller`` name must
+resolve to ``10.0.0.11``, the IP address of the management interface on
+the controller node.
+
+.. warning::
+
+ Reconfiguring network interfaces will interrupt network
+ connectivity. We recommend using a local terminal session for these
+ procedures.
+
+.. note::
+
+ Your distribution enables a restrictive :term:`firewall` by
+ default. During the installation process, certain steps will fail
+ unless you alter or disable the firewall. For more information
+ about securing your environment, refer to the `OpenStack Security
+ Guide `_.
+
+
+
+.. toctree::
+ :maxdepth: 1
+
+ environment-networking-controller.rst
+ environment-networking-compute.rst
+ environment-networking-storage-cinder.rst
+ environment-networking-verify.rst
diff --git a/doc/install-guide/source/environment-networking-rdo.rst b/doc/install-guide/source/environment-networking-rdo.rst
new file mode 100644
index 0000000000..0148f5bb8f
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-rdo.rst
@@ -0,0 +1,93 @@
+Host networking
+~~~~~~~~~~~~~~~
+
+
+
+
+After installing the operating system on each node for the architecture
+that you choose to deploy, you must configure the network interfaces. We
+recommend that you disable any automated network management tools and
+manually edit the appropriate configuration files for your distribution.
+For more information on how to configure networking on your
+distribution, see the `documentation
+`__ .
+
+
+
+All nodes require Internet access for administrative purposes such as package
+installation, security updates, :term:`DNS `, and
+:term:`NTP `. In most cases, nodes should obtain
+Internet access through the management network interface.
+To highlight the importance of network separation, the example architectures
+use `private address space `__ for the
+management network and assume that the physical network infrastructure
+provides Internet access via :term:`NAT `
+or other methods. The example architectures use routable IP address space for
+the provider (external) network and assume that the physical network
+infrastructure provides direct Internet access.
+
+In the provider networks architecture, all instances attach directly
+to the provider network. In the self-service (private) networks architecture,
+instances can attach to a self-service or provider network. Self-service
+networks can reside entirely within OpenStack or provide some level of external
+network access using :term:`NAT ` through
+the provider network.
+
+.. _figure-networklayout:
+
+.. figure:: figures/networklayout.png
+ :alt: Network layout
+
+The example architectures assume use of the following networks:
+
+* Management on 10.0.0.0/24 with gateway 10.0.0.1
+
+ This network requires a gateway to provide Internet access to all
+ nodes for administrative purposes such as package installation,
+ security updates, :term:`DNS `, and
+ :term:`NTP `.
+
+* Provider on 203.0.113.0/24 with gateway 203.0.113.1
+
+ This network requires a gateway to provide Internet access to
+ instances in your OpenStack environment.
+
+You can modify these ranges and gateways to work with your particular
+network infrastructure.
+
+Network interface names vary by distribution. Traditionally,
+interfaces use ``eth`` followed by a sequential number. To cover all
+variations, this guide refers to the first interface as the
+interface with the lowest number and the second interface as the
+interface with the highest number.
+
+Unless you intend to use the exact configuration provided in this
+example architecture, you must modify the networks in this procedure to
+match your environment. Each node must resolve the other nodes by
+name in addition to IP address. For example, the ``controller`` name must
+resolve to ``10.0.0.11``, the IP address of the management interface on
+the controller node.
+
+.. warning::
+
+ Reconfiguring network interfaces will interrupt network
+ connectivity. We recommend using a local terminal session for these
+ procedures.
+
+.. note::
+
+ Your distribution enables a restrictive :term:`firewall` by
+ default. During the installation process, certain steps will fail
+ unless you alter or disable the firewall. For more information
+ about securing your environment, refer to the `OpenStack Security
+ Guide `_.
+
+
+
+.. toctree::
+ :maxdepth: 1
+
+ environment-networking-controller.rst
+ environment-networking-compute.rst
+ environment-networking-storage-cinder.rst
+ environment-networking-verify.rst
diff --git a/doc/install-guide/source/environment-networking-ubuntu.rst b/doc/install-guide/source/environment-networking-ubuntu.rst
new file mode 100644
index 0000000000..57291d4636
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-ubuntu.rst
@@ -0,0 +1,90 @@
+Host networking
+~~~~~~~~~~~~~~~
+
+
+After installing the operating system on each node for the architecture
+that you choose to deploy, you must configure the network interfaces. We
+recommend that you disable any automated network management tools and
+manually edit the appropriate configuration files for your distribution.
+For more information on how to configure networking on your
+distribution, see the `documentation `_.
+
+
+
+
+
+All nodes require Internet access for administrative purposes such as package
+installation, security updates, :term:`DNS `, and
+:term:`NTP `. In most cases, nodes should obtain
+Internet access through the management network interface.
+To highlight the importance of network separation, the example architectures
+use `private address space `__ for the
+management network and assume that the physical network infrastructure
+provides Internet access via :term:`NAT `
+or other methods. The example architectures use routable IP address space for
+the provider (external) network and assume that the physical network
+infrastructure provides direct Internet access.
+
+In the provider networks architecture, all instances attach directly
+to the provider network. In the self-service (private) networks architecture,
+instances can attach to a self-service or provider network. Self-service
+networks can reside entirely within OpenStack or provide some level of external
+network access using :term:`NAT ` through
+the provider network.
+
+.. _figure-networklayout:
+
+.. figure:: figures/networklayout.png
+ :alt: Network layout
+
+The example architectures assume use of the following networks:
+
+* Management on 10.0.0.0/24 with gateway 10.0.0.1
+
+ This network requires a gateway to provide Internet access to all
+ nodes for administrative purposes such as package installation,
+ security updates, :term:`DNS `, and
+ :term:`NTP `.
+
+* Provider on 203.0.113.0/24 with gateway 203.0.113.1
+
+ This network requires a gateway to provide Internet access to
+ instances in your OpenStack environment.
+
+You can modify these ranges and gateways to work with your particular
+network infrastructure.
+
+Network interface names vary by distribution. Traditionally,
+interfaces use ``eth`` followed by a sequential number. To cover all
+variations, this guide refers to the first interface as the
+interface with the lowest number and the second interface as the
+interface with the highest number.
+
+Unless you intend to use the exact configuration provided in this
+example architecture, you must modify the networks in this procedure to
+match your environment. Each node must resolve the other nodes by
+name in addition to IP address. For example, the ``controller`` name must
+resolve to ``10.0.0.11``, the IP address of the management interface on
+the controller node.
+
+.. warning::
+
+ Reconfiguring network interfaces will interrupt network
+ connectivity. We recommend using a local terminal session for these
+ procedures.
+
+.. note::
+
+ Your distribution does not enable a restrictive :term:`firewall` by
+ default. For more information about securing your environment,
+ refer to the `OpenStack Security Guide
+ `_.
+
+
+.. toctree::
+ :maxdepth: 1
+
+ environment-networking-controller.rst
+ environment-networking-compute.rst
+ environment-networking-storage-cinder.rst
+ environment-networking-verify.rst
diff --git a/doc/install-guide/source/environment-networking-verify-debian.rst b/doc/install-guide/source/environment-networking-verify-debian.rst
new file mode 100644
index 0000000000..3da1e6204e
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-verify-debian.rst
@@ -0,0 +1,87 @@
+Verify connectivity
+-------------------
+
+We recommend that you verify network connectivity to the Internet and
+among the nodes before proceeding further.
+
+#. From the *controller* node, test access to the Internet:
+
+ .. code-block:: console
+
+ # ping -c 4 openstack.org
+
+ PING openstack.org (174.143.194.225) 56(84) bytes of data.
+ 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
+ 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
+
+ --- openstack.org ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
+ rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
+
+ .. end
+
+#. From the *controller* node, test access to the management interface on the
+ *compute* node:
+
+ .. code-block:: console
+
+ # ping -c 4 compute1
+
+ PING compute1 (10.0.0.31) 56(84) bytes of data.
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=1 ttl=64 time=0.263 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=2 ttl=64 time=0.202 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=3 ttl=64 time=0.203 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=4 ttl=64 time=0.202 ms
+
+ --- compute1 ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
+ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
+
+ .. end
+
+#. From the *compute* node, test access to the Internet:
+
+ .. code-block:: console
+
+ # ping -c 4 openstack.org
+
+ PING openstack.org (174.143.194.225) 56(84) bytes of data.
+ 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
+ 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
+
+ --- openstack.org ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
+ rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
+
+ .. end
+
+#. From the *compute* node, test access to the management interface on the
+ *controller* node:
+
+ .. code-block:: console
+
+ # ping -c 4 controller
+
+ PING controller (10.0.0.11) 56(84) bytes of data.
+ 64 bytes from controller (10.0.0.11): icmp_seq=1 ttl=64 time=0.263 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=2 ttl=64 time=0.202 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=3 ttl=64 time=0.203 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=4 ttl=64 time=0.202 ms
+
+ --- controller ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
+ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
+
+ .. end
+
+.. note::
+
+ Your distribution does not enable a restrictive :term:`firewall` by
+ default. For more information about securing your environment,
+ refer to the `OpenStack Security Guide
+ `_.
+
diff --git a/doc/install-guide/source/environment-networking-verify-obs.rst b/doc/install-guide/source/environment-networking-verify-obs.rst
new file mode 100644
index 0000000000..ee192d8f63
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-verify-obs.rst
@@ -0,0 +1,89 @@
+Verify connectivity
+-------------------
+
+We recommend that you verify network connectivity to the Internet and
+among the nodes before proceeding further.
+
+#. From the *controller* node, test access to the Internet:
+
+ .. code-block:: console
+
+ # ping -c 4 openstack.org
+
+ PING openstack.org (174.143.194.225) 56(84) bytes of data.
+ 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
+ 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
+
+ --- openstack.org ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
+ rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
+
+ .. end
+
+#. From the *controller* node, test access to the management interface on the
+ *compute* node:
+
+ .. code-block:: console
+
+ # ping -c 4 compute1
+
+ PING compute1 (10.0.0.31) 56(84) bytes of data.
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=1 ttl=64 time=0.263 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=2 ttl=64 time=0.202 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=3 ttl=64 time=0.203 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=4 ttl=64 time=0.202 ms
+
+ --- compute1 ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
+ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
+
+ .. end
+
+#. From the *compute* node, test access to the Internet:
+
+ .. code-block:: console
+
+ # ping -c 4 openstack.org
+
+ PING openstack.org (174.143.194.225) 56(84) bytes of data.
+ 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
+ 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
+
+ --- openstack.org ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
+ rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
+
+ .. end
+
+#. From the *compute* node, test access to the management interface on the
+ *controller* node:
+
+ .. code-block:: console
+
+ # ping -c 4 controller
+
+ PING controller (10.0.0.11) 56(84) bytes of data.
+ 64 bytes from controller (10.0.0.11): icmp_seq=1 ttl=64 time=0.263 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=2 ttl=64 time=0.202 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=3 ttl=64 time=0.203 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=4 ttl=64 time=0.202 ms
+
+ --- controller ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
+ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
+
+ .. end
+
+.. note::
+
+ Your distribution enables a restrictive :term:`firewall` by
+ default. During the installation process, certain steps will fail
+ unless you alter or disable the firewall. For more information
+ about securing your environment, refer to the `OpenStack Security
+ Guide `_.
+
+
diff --git a/doc/install-guide/source/environment-networking-verify-rdo.rst b/doc/install-guide/source/environment-networking-verify-rdo.rst
new file mode 100644
index 0000000000..ee192d8f63
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-verify-rdo.rst
@@ -0,0 +1,89 @@
+Verify connectivity
+-------------------
+
+We recommend that you verify network connectivity to the Internet and
+among the nodes before proceeding further.
+
+#. From the *controller* node, test access to the Internet:
+
+ .. code-block:: console
+
+ # ping -c 4 openstack.org
+
+ PING openstack.org (174.143.194.225) 56(84) bytes of data.
+ 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
+ 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
+
+ --- openstack.org ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
+ rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
+
+ .. end
+
+#. From the *controller* node, test access to the management interface on the
+ *compute* node:
+
+ .. code-block:: console
+
+ # ping -c 4 compute1
+
+ PING compute1 (10.0.0.31) 56(84) bytes of data.
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=1 ttl=64 time=0.263 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=2 ttl=64 time=0.202 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=3 ttl=64 time=0.203 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=4 ttl=64 time=0.202 ms
+
+ --- compute1 ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
+ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
+
+ .. end
+
+#. From the *compute* node, test access to the Internet:
+
+ .. code-block:: console
+
+ # ping -c 4 openstack.org
+
+ PING openstack.org (174.143.194.225) 56(84) bytes of data.
+ 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
+ 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
+
+ --- openstack.org ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
+ rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
+
+ .. end
+
+#. From the *compute* node, test access to the management interface on the
+ *controller* node:
+
+ .. code-block:: console
+
+ # ping -c 4 controller
+
+ PING controller (10.0.0.11) 56(84) bytes of data.
+ 64 bytes from controller (10.0.0.11): icmp_seq=1 ttl=64 time=0.263 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=2 ttl=64 time=0.202 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=3 ttl=64 time=0.203 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=4 ttl=64 time=0.202 ms
+
+ --- controller ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
+ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
+
+ .. end
+
+.. note::
+
+ Your distribution enables a restrictive :term:`firewall` by
+ default. During the installation process, certain steps will fail
+ unless you alter or disable the firewall. For more information
+ about securing your environment, refer to the `OpenStack Security
+ Guide `_.
+
+
diff --git a/doc/install-guide/source/environment-networking-verify-ubuntu.rst b/doc/install-guide/source/environment-networking-verify-ubuntu.rst
new file mode 100644
index 0000000000..3da1e6204e
--- /dev/null
+++ b/doc/install-guide/source/environment-networking-verify-ubuntu.rst
@@ -0,0 +1,87 @@
+Verify connectivity
+-------------------
+
+We recommend that you verify network connectivity to the Internet and
+among the nodes before proceeding further.
+
+#. From the *controller* node, test access to the Internet:
+
+ .. code-block:: console
+
+ # ping -c 4 openstack.org
+
+ PING openstack.org (174.143.194.225) 56(84) bytes of data.
+ 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
+ 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
+
+ --- openstack.org ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
+ rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
+
+ .. end
+
+#. From the *controller* node, test access to the management interface on the
+ *compute* node:
+
+ .. code-block:: console
+
+ # ping -c 4 compute1
+
+ PING compute1 (10.0.0.31) 56(84) bytes of data.
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=1 ttl=64 time=0.263 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=2 ttl=64 time=0.202 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=3 ttl=64 time=0.203 ms
+ 64 bytes from compute1 (10.0.0.31): icmp_seq=4 ttl=64 time=0.202 ms
+
+ --- compute1 ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
+ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
+
+ .. end
+
+#. From the *compute* node, test access to the Internet:
+
+ .. code-block:: console
+
+ # ping -c 4 openstack.org
+
+ PING openstack.org (174.143.194.225) 56(84) bytes of data.
+ 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
+ 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
+ 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
+
+ --- openstack.org ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
+ rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
+
+ .. end
+
+#. From the *compute* node, test access to the management interface on the
+ *controller* node:
+
+ .. code-block:: console
+
+ # ping -c 4 controller
+
+ PING controller (10.0.0.11) 56(84) bytes of data.
+ 64 bytes from controller (10.0.0.11): icmp_seq=1 ttl=64 time=0.263 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=2 ttl=64 time=0.202 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=3 ttl=64 time=0.203 ms
+ 64 bytes from controller (10.0.0.11): icmp_seq=4 ttl=64 time=0.202 ms
+
+ --- controller ping statistics ---
+ 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
+ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
+
+ .. end
+
+.. note::
+
+ Your distribution does not enable a restrictive :term:`firewall` by
+ default. For more information about securing your environment,
+ refer to the `OpenStack Security Guide
+ `_.
+
diff --git a/doc/install-guide/source/environment-networking-verify.rst b/doc/install-guide/source/environment-networking-verify.rst
index 7ed187fca9..4c08c9ffb7 100644
--- a/doc/install-guide/source/environment-networking-verify.rst
+++ b/doc/install-guide/source/environment-networking-verify.rst
@@ -1,100 +1,7 @@
Verify connectivity
-------------------
-We recommend that you verify network connectivity to the Internet and
-among the nodes before proceeding further.
+.. toctree::
+ :glob:
-#. From the *controller* node, test access to the Internet:
-
- .. code-block:: console
-
- # ping -c 4 openstack.org
-
- PING openstack.org (174.143.194.225) 56(84) bytes of data.
- 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
- 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
- 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
- 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
-
- --- openstack.org ping statistics ---
- 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
- rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
-
- .. end
-
-#. From the *controller* node, test access to the management interface on the
- *compute* node:
-
- .. code-block:: console
-
- # ping -c 4 compute1
-
- PING compute1 (10.0.0.31) 56(84) bytes of data.
- 64 bytes from compute1 (10.0.0.31): icmp_seq=1 ttl=64 time=0.263 ms
- 64 bytes from compute1 (10.0.0.31): icmp_seq=2 ttl=64 time=0.202 ms
- 64 bytes from compute1 (10.0.0.31): icmp_seq=3 ttl=64 time=0.203 ms
- 64 bytes from compute1 (10.0.0.31): icmp_seq=4 ttl=64 time=0.202 ms
-
- --- compute1 ping statistics ---
- 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
- rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
-
- .. end
-
-#. From the *compute* node, test access to the Internet:
-
- .. code-block:: console
-
- # ping -c 4 openstack.org
-
- PING openstack.org (174.143.194.225) 56(84) bytes of data.
- 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms
- 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms
- 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms
- 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms
-
- --- openstack.org ping statistics ---
- 4 packets transmitted, 4 received, 0% packet loss, time 3022ms
- rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms
-
- .. end
-
-#. From the *compute* node, test access to the management interface on the
- *controller* node:
-
- .. code-block:: console
-
- # ping -c 4 controller
-
- PING controller (10.0.0.11) 56(84) bytes of data.
- 64 bytes from controller (10.0.0.11): icmp_seq=1 ttl=64 time=0.263 ms
- 64 bytes from controller (10.0.0.11): icmp_seq=2 ttl=64 time=0.202 ms
- 64 bytes from controller (10.0.0.11): icmp_seq=3 ttl=64 time=0.203 ms
- 64 bytes from controller (10.0.0.11): icmp_seq=4 ttl=64 time=0.202 ms
-
- --- controller ping statistics ---
- 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
- rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms
-
- .. end
-
-.. note::
-
- .. only:: rdo or obs
-
- Your distribution enables a restrictive :term:`firewall` by
- default. During the installation process, certain steps will
- fail unless you alter or disable the firewall. For more
- information about securing your environment, refer to the
- `OpenStack Security Guide `_.
-
- .. endonly
-
- .. only:: ubuntu or debian
-
- Your distribution does not enable a restrictive :term:`firewall`
- by default. For more information about securing your environment,
- refer to the
- `OpenStack Security Guide `_.
-
- .. endonly
+ environment-networking-verify-*
diff --git a/doc/install-guide/source/environment-networking.rst b/doc/install-guide/source/environment-networking.rst
index 0667c29e4b..4d263f5a5b 100644
--- a/doc/install-guide/source/environment-networking.rst
+++ b/doc/install-guide/source/environment-networking.rst
@@ -3,141 +3,9 @@
Host networking
~~~~~~~~~~~~~~~
-.. only:: ubuntu
-
- After installing the operating system on each node for the architecture
- that you choose to deploy, you must configure the network interfaces. We
- recommend that you disable any automated network management tools and
- manually edit the appropriate configuration files for your distribution.
- For more information on how to configure networking on your
- distribution, see the `documentation `_.
-
-.. endonly
-
-.. only:: debian
-
- After installing the operating system on each node for the architecture
- that you choose to deploy, you must configure the network interfaces. We
- recommend that you disable any automated network management tools and
- manually edit the appropriate configuration files for your distribution.
- For more information on how to configure networking on your
- distribution, see the `documentation
- `__ .
-
-.. endonly
-
-.. only:: rdo
-
- After installing the operating system on each node for the architecture
- that you choose to deploy, you must configure the network interfaces. We
- recommend that you disable any automated network management tools and
- manually edit the appropriate configuration files for your distribution.
- For more information on how to configure networking on your
- distribution, see the `documentation
- `__ .
-
-.. endonly
-
-.. only:: obs
-
- After installing the operating system on each node for the architecture
- that you choose to deploy, you must configure the network interfaces. We
- recommend that you disable any automated network management tools and
- manually edit the appropriate configuration files for your distribution.
- For more information on how to configure networking on your
- distribution, see the `SLES 12
- `__
- or `openSUSE
- `__
- documentation.
-
-.. endonly
-
-All nodes require Internet access for administrative purposes such as package
-installation, security updates, :term:`DNS `, and
-:term:`NTP `. In most cases, nodes should obtain
-Internet access through the management network interface.
-To highlight the importance of network separation, the example architectures
-use `private address space `__ for the
-management network and assume that the physical network infrastructure
-provides Internet access via :term:`NAT `
-or other methods. The example architectures use routable IP address space for
-the provider (external) network and assume that the physical network
-infrastructure provides direct Internet access.
-
-In the provider networks architecture, all instances attach directly
-to the provider network. In the self-service (private) networks architecture,
-instances can attach to a self-service or provider network. Self-service
-networks can reside entirely within OpenStack or provide some level of external
-network access using :term:`NAT ` through
-the provider network.
-
-.. _figure-networklayout:
-
-.. figure:: figures/networklayout.png
- :alt: Network layout
-
-The example architectures assume use of the following networks:
-
-* Management on 10.0.0.0/24 with gateway 10.0.0.1
-
- This network requires a gateway to provide Internet access to all
- nodes for administrative purposes such as package installation,
- security updates, :term:`DNS `, and
- :term:`NTP `.
-
-* Provider on 203.0.113.0/24 with gateway 203.0.113.1
-
- This network requires a gateway to provide Internet access to
- instances in your OpenStack environment.
-
-You can modify these ranges and gateways to work with your particular
-network infrastructure.
-
-Network interface names vary by distribution. Traditionally,
-interfaces use ``eth`` followed by a sequential number. To cover all
-variations, this guide refers to the first interface as the
-interface with the lowest number and the second interface as the
-interface with the highest number.
-
-Unless you intend to use the exact configuration provided in this
-example architecture, you must modify the networks in this procedure to
-match your environment. Each node must resolve the other nodes by
-name in addition to IP address. For example, the ``controller`` name must
-resolve to ``10.0.0.11``, the IP address of the management interface on
-the controller node.
-
-.. warning::
-
- Reconfiguring network interfaces will interrupt network
- connectivity. We recommend using a local terminal session for these
- procedures.
-
-.. note::
-
- .. only:: rdo or obs
-
- Your distribution enables a restrictive :term:`firewall` by
- default. During the installation process, certain steps will
- fail unless you alter or disable the firewall. For more
- information about securing your environment, refer to the
- `OpenStack Security Guide `_.
-
- .. endonly
-
- .. only:: ubuntu or debian
-
- Your distribution does not enable a restrictive :term:`firewall`
- by default. For more information about securing your environment,
- refer to the
- `OpenStack Security Guide `_.
-
- .. endonly
-
.. toctree::
- :maxdepth: 1
- environment-networking-controller.rst
- environment-networking-compute.rst
- environment-networking-storage-cinder.rst
- environment-networking-verify.rst
+ environment-networking-debian
+ environment-networking-obs
+ environment-networking-rdo
+ environment-networking-ubuntu
diff --git a/doc/install-guide/source/environment-ntp-controller-debian.rst b/doc/install-guide/source/environment-ntp-controller-debian.rst
new file mode 100644
index 0000000000..0461b5dbe4
--- /dev/null
+++ b/doc/install-guide/source/environment-ntp-controller-debian.rst
@@ -0,0 +1,59 @@
+Controller node
+~~~~~~~~~~~~~~~
+
+Perform these steps on the controller node.
+
+Install and configure components
+--------------------------------
+
+1. Install the packages:
+
+
+.. code-block:: console
+
+ # apt install chrony
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/chrony/chrony.conf`` file and add, change, or remove
+ these keys as necessary for your environment:
+
+ .. code-block:: shell
+
+ server NTP_SERVER iburst
+
+ .. end
+
+ Replace ``NTP_SERVER`` with the hostname or IP address of a suitable more
+ accurate (lower stratum) NTP server. The configuration supports multiple
+ ``server`` keys.
+
+ .. note::
+
+ By default, the controller node synchronizes the time via a pool of
+ public servers. However, you can optionally configure alternative
+ servers such as those provided by your organization.
+
+3. To enable other nodes to connect to the chrony daemon on the
+ controller node, add this key to the ``/etc/chrony/chrony.conf``
+ file:
+
+ .. code-block:: shell
+
+ allow 10.0.0.0/24
+
+ .. end
+
+4. Restart the NTP service:
+
+ .. code-block:: console
+
+ # service chrony restart
+
+ .. end
+
+
diff --git a/doc/install-guide/source/environment-ntp-controller-obs.rst b/doc/install-guide/source/environment-ntp-controller-obs.rst
new file mode 100644
index 0000000000..8d09c7d7f3
--- /dev/null
+++ b/doc/install-guide/source/environment-ntp-controller-obs.rst
@@ -0,0 +1,61 @@
+Controller node
+~~~~~~~~~~~~~~~
+
+Perform these steps on the controller node.
+
+Install and configure components
+--------------------------------
+
+1. Install the packages:
+
+
+
+
+.. code-block:: console
+
+ # zypper install chrony
+
+.. end
+
+
+
+
+2. Edit the ``/etc/chrony.conf`` file and add, change, or remove these
+ keys as necessary for your environment:
+
+ .. code-block:: shell
+
+ server NTP_SERVER iburst
+
+ .. end
+
+ Replace ``NTP_SERVER`` with the hostname or IP address of a suitable more
+ accurate (lower stratum) NTP server. The configuration supports multiple
+ ``server`` keys.
+
+ .. note::
+
+ By default, the controller node synchronizes the time via a pool of
+ public servers. However, you can optionally configure alternative
+ servers such as those provided by your organization.
+
+3. To enable other nodes to connect to the chrony daemon on the
+ controller node, add this key to the ``/etc/chrony.conf`` file:
+
+ .. code-block:: shell
+
+ allow 10.0.0.0/24
+
+ .. end
+
+ If necessary, replace ``10.0.0.0/24`` with a description of your subnet.
+
+4. Start the NTP service and configure it to start when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable chronyd.service
+ # systemctl start chronyd.service
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-ntp-controller-rdo.rst b/doc/install-guide/source/environment-ntp-controller-rdo.rst
new file mode 100644
index 0000000000..7dad985713
--- /dev/null
+++ b/doc/install-guide/source/environment-ntp-controller-rdo.rst
@@ -0,0 +1,61 @@
+Controller node
+~~~~~~~~~~~~~~~
+
+Perform these steps on the controller node.
+
+Install and configure components
+--------------------------------
+
+1. Install the packages:
+
+
+
+.. code-block:: console
+
+ # yum install chrony
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/chrony.conf`` file and add, change, or remove these
+ keys as necessary for your environment:
+
+ .. code-block:: shell
+
+ server NTP_SERVER iburst
+
+ .. end
+
+ Replace ``NTP_SERVER`` with the hostname or IP address of a suitable more
+ accurate (lower stratum) NTP server. The configuration supports multiple
+ ``server`` keys.
+
+ .. note::
+
+ By default, the controller node synchronizes the time via a pool of
+ public servers. However, you can optionally configure alternative
+ servers such as those provided by your organization.
+
+3. To enable other nodes to connect to the chrony daemon on the
+ controller node, add this key to the ``/etc/chrony.conf`` file:
+
+ .. code-block:: shell
+
+ allow 10.0.0.0/24
+
+ .. end
+
+ If necessary, replace ``10.0.0.0/24`` with a description of your subnet.
+
+4. Start the NTP service and configure it to start when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable chronyd.service
+ # systemctl start chronyd.service
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-ntp-controller-ubuntu.rst b/doc/install-guide/source/environment-ntp-controller-ubuntu.rst
new file mode 100644
index 0000000000..0461b5dbe4
--- /dev/null
+++ b/doc/install-guide/source/environment-ntp-controller-ubuntu.rst
@@ -0,0 +1,59 @@
+Controller node
+~~~~~~~~~~~~~~~
+
+Perform these steps on the controller node.
+
+Install and configure components
+--------------------------------
+
+1. Install the packages:
+
+
+.. code-block:: console
+
+ # apt install chrony
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/chrony/chrony.conf`` file and add, change, or remove
+ these keys as necessary for your environment:
+
+ .. code-block:: shell
+
+ server NTP_SERVER iburst
+
+ .. end
+
+ Replace ``NTP_SERVER`` with the hostname or IP address of a suitable more
+ accurate (lower stratum) NTP server. The configuration supports multiple
+ ``server`` keys.
+
+ .. note::
+
+ By default, the controller node synchronizes the time via a pool of
+ public servers. However, you can optionally configure alternative
+ servers such as those provided by your organization.
+
+3. To enable other nodes to connect to the chrony daemon on the
+ controller node, add this key to the ``/etc/chrony/chrony.conf``
+ file:
+
+ .. code-block:: shell
+
+ allow 10.0.0.0/24
+
+ .. end
+
+4. Restart the NTP service:
+
+ .. code-block:: console
+
+ # service chrony restart
+
+ .. end
+
+
diff --git a/doc/install-guide/source/environment-ntp-controller.rst b/doc/install-guide/source/environment-ntp-controller.rst
index 7744660337..3a4e0a126d 100644
--- a/doc/install-guide/source/environment-ntp-controller.rst
+++ b/doc/install-guide/source/environment-ntp-controller.rst
@@ -3,122 +3,7 @@
Controller node
~~~~~~~~~~~~~~~
-Perform these steps on the controller node.
+.. toctree::
+ :glob:
-Install and configure components
---------------------------------
-
-1. Install the packages:
-
- .. only:: ubuntu or debian
-
- .. code-block:: console
-
- # apt install chrony
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- .. code-block:: console
-
- # yum install chrony
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- .. code-block:: console
-
- # zypper install chrony
-
- .. end
-
- .. endonly
-
-.. only:: ubuntu or debian
-
- 2. Edit the ``/etc/chrony/chrony.conf`` file and add, change, or remove
- these keys as necessary for your environment:
-
- .. code-block:: shell
-
- server NTP_SERVER iburst
-
- .. end
-
- Replace ``NTP_SERVER`` with the hostname or IP address of a suitable more
- accurate (lower stratum) NTP server. The configuration supports multiple
- ``server`` keys.
-
- .. note::
-
- By default, the controller node synchronizes the time via a pool of
- public servers. However, you can optionally configure alternative
- servers such as those provided by your organization.
-
- 3. To enable other nodes to connect to the chrony daemon on the controller node,
- add this key to the ``/etc/chrony/chrony.conf`` file:
-
- .. code-block:: shell
-
- allow 10.0.0.0/24
-
- .. end
-
- 4. Restart the NTP service:
-
- .. code-block:: console
-
- # service chrony restart
-
- .. end
-
-.. endonly
-
-.. only:: rdo or obs
-
- 2. Edit the ``/etc/chrony.conf`` file and add, change, or remove these
- keys as necessary for your environment:
-
- .. code-block:: shell
-
- server NTP_SERVER iburst
-
- .. end
-
- Replace ``NTP_SERVER`` with the hostname or IP address of a suitable more
- accurate (lower stratum) NTP server. The configuration supports multiple
- ``server`` keys.
-
- .. note::
-
- By default, the controller node synchronizes the time via a pool of
- public servers. However, you can optionally configure alternative
- servers such as those provided by your organization.
-
- 3. To enable other nodes to connect to the chrony daemon on the controller node,
- add this key to the ``/etc/chrony.conf`` file:
-
- .. code-block:: shell
-
- allow 10.0.0.0/24
-
- .. end
-
- If necessary, replace ``10.0.0.0/24`` with a description of your subnet.
-
- 4. Start the NTP service and configure it to start when the system boots:
-
- .. code-block:: console
-
- # systemctl enable chronyd.service
- # systemctl start chronyd.service
-
- .. end
-
-.. endonly
+ environment-ntp-controller-*
diff --git a/doc/install-guide/source/environment-ntp-other-debian.rst b/doc/install-guide/source/environment-ntp-other-debian.rst
new file mode 100644
index 0000000000..aca7e8b7e3
--- /dev/null
+++ b/doc/install-guide/source/environment-ntp-other-debian.rst
@@ -0,0 +1,43 @@
+Other nodes
+~~~~~~~~~~~
+
+Other nodes reference the controller node for clock synchronization.
+Perform these steps on all other nodes.
+
+Install and configure components
+--------------------------------
+
+1. Install the packages:
+
+
+.. code-block:: console
+
+ # apt install chrony
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/chrony/chrony.conf`` file and comment out or remove all
+ but one ``server`` key. Change it to reference the controller node:
+
+ .. path /etc/chrony/chrony.conf
+ .. code-block:: shell
+
+ server controller iburst
+
+ .. end
+
+3. Comment out the ``pool 2.debian.pool.ntp.org offline iburst`` line.
+
+4. Restart the NTP service:
+
+ .. code-block:: console
+
+ # service chrony restart
+
+ .. end
+
+
diff --git a/doc/install-guide/source/environment-ntp-other-obs.rst b/doc/install-guide/source/environment-ntp-other-obs.rst
new file mode 100644
index 0000000000..11e5401fda
--- /dev/null
+++ b/doc/install-guide/source/environment-ntp-other-obs.rst
@@ -0,0 +1,42 @@
+Other nodes
+~~~~~~~~~~~
+
+Other nodes reference the controller node for clock synchronization.
+Perform these steps on all other nodes.
+
+Install and configure components
+--------------------------------
+
+1. Install the packages:
+
+
+
+
+.. code-block:: console
+
+ # zypper install chrony
+
+.. end
+
+
+
+
+2. Edit the ``/etc/chrony.conf`` file and comment out or remove all but one
+ ``server`` key. Change it to reference the controller node:
+
+ .. path /etc/chrony.conf
+ .. code-block:: shell
+
+ server controller iburst
+
+ .. end
+
+3. Start the NTP service and configure it to start when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable chronyd.service
+ # systemctl start chronyd.service
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-ntp-other-rdo.rst b/doc/install-guide/source/environment-ntp-other-rdo.rst
new file mode 100644
index 0000000000..c123bdbe66
--- /dev/null
+++ b/doc/install-guide/source/environment-ntp-other-rdo.rst
@@ -0,0 +1,42 @@
+Other nodes
+~~~~~~~~~~~
+
+Other nodes reference the controller node for clock synchronization.
+Perform these steps on all other nodes.
+
+Install and configure components
+--------------------------------
+
+1. Install the packages:
+
+
+
+.. code-block:: console
+
+ # yum install chrony
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/chrony.conf`` file and comment out or remove all but one
+ ``server`` key. Change it to reference the controller node:
+
+ .. path /etc/chrony.conf
+ .. code-block:: shell
+
+ server controller iburst
+
+ .. end
+
+3. Start the NTP service and configure it to start when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable chronyd.service
+ # systemctl start chronyd.service
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-ntp-other-ubuntu.rst b/doc/install-guide/source/environment-ntp-other-ubuntu.rst
new file mode 100644
index 0000000000..aca7e8b7e3
--- /dev/null
+++ b/doc/install-guide/source/environment-ntp-other-ubuntu.rst
@@ -0,0 +1,43 @@
+Other nodes
+~~~~~~~~~~~
+
+Other nodes reference the controller node for clock synchronization.
+Perform these steps on all other nodes.
+
+Install and configure components
+--------------------------------
+
+1. Install the packages:
+
+
+.. code-block:: console
+
+ # apt install chrony
+
+.. end
+
+
+
+
+
+2. Edit the ``/etc/chrony/chrony.conf`` file and comment out or remove all
+ but one ``server`` key. Change it to reference the controller node:
+
+ .. path /etc/chrony/chrony.conf
+ .. code-block:: shell
+
+ server controller iburst
+
+ .. end
+
+3. Comment out the ``pool 2.debian.pool.ntp.org offline iburst`` line.
+
+4. Restart the NTP service:
+
+ .. code-block:: console
+
+ # service chrony restart
+
+ .. end
+
+
diff --git a/doc/install-guide/source/environment-ntp-other.rst b/doc/install-guide/source/environment-ntp-other.rst
index a2b5d255a2..4d3d991831 100644
--- a/doc/install-guide/source/environment-ntp-other.rst
+++ b/doc/install-guide/source/environment-ntp-other.rst
@@ -6,84 +6,7 @@ Other nodes
Other nodes reference the controller node for clock synchronization.
Perform these steps on all other nodes.
-Install and configure components
---------------------------------
+.. toctree::
+ :glob:
-1. Install the packages:
-
- .. only:: ubuntu or debian
-
- .. code-block:: console
-
- # apt install chrony
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- .. code-block:: console
-
- # yum install chrony
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- .. code-block:: console
-
- # zypper install chrony
-
- .. end
-
- .. endonly
-
-.. only:: ubuntu or debian
-
- 2. Edit the ``/etc/chrony/chrony.conf`` file and comment out or remove all
- but one ``server`` key. Change it to reference the controller node:
-
- .. path /etc/chrony/chrony.conf
- .. code-block:: shell
-
- server controller iburst
-
- .. end
-
- 3. Comment out the ``pool 2.debian.pool.ntp.org offline iburst`` line.
-
- 4. Restart the NTP service:
-
- .. code-block:: console
-
- # service chrony restart
-
- .. end
-
-.. endonly
-
-.. only:: rdo or obs
-
- 2. Edit the ``/etc/chrony.conf`` file and comment out or remove all but one
- ``server`` key. Change it to reference the controller node:
-
- .. path /etc/chrony.conf
- .. code-block:: shell
-
- server controller iburst
-
- .. end
-
- 3. Start the NTP service and configure it to start when the system boots:
-
- .. code-block:: console
-
- # systemctl enable chronyd.service
- # systemctl start chronyd.service
-
- .. end
-
-.. endonly
+ environment-ntp-other-*
diff --git a/doc/install-guide/source/environment-obs.rst b/doc/install-guide/source/environment-obs.rst
new file mode 100644
index 0000000000..30c6b30960
--- /dev/null
+++ b/doc/install-guide/source/environment-obs.rst
@@ -0,0 +1,81 @@
+===========
+Environment
+===========
+
+This section explains how to configure the controller node and one compute
+node using the example architecture.
+
+Although most environments include Identity, Image service, Compute, at least
+one networking service, and the Dashboard, the Object Storage service can
+operate independently. If your use case only involves Object Storage, you can
+skip to `Object Storage Installation Guide
+`_
+after configuring the appropriate nodes for it.
+
+You must use an account with administrative privileges to configure each node.
+Either run the commands as the ``root`` user or configure the ``sudo``
+utility.
+
+
+The :command:`systemctl enable` call on openSUSE outputs a warning message
+when the service uses SysV Init scripts instead of native systemd files. This
+warning can be ignored.
+
+
+For best performance, we recommend that your environment meets or exceeds
+the hardware requirements in :ref:`figure-hwreqs`.
+
+The following minimum requirements should support a proof-of-concept
+environment with core services and several :term:`CirrOS` instances:
+
+* Controller Node: 1 processor, 4 GB memory, and 5 GB storage
+
+* Compute Node: 1 processor, 2 GB memory, and 10 GB storage
+
+As the number of OpenStack services and virtual machines increase, so do the
+hardware requirements for the best performance. If performance degrades after
+enabling additional services or virtual machines, consider adding hardware
+resources to your environment.
+
+To minimize clutter and provide more resources for OpenStack, we recommend
+a minimal installation of your Linux distribution. Also, you must install a
+64-bit version of your distribution on each node.
+
+A single disk partition on each node works for most basic installations.
+However, you should consider :term:`Logical Volume Manager (LVM)` for
+installations with optional services such as Block Storage.
+
+For first-time installation and testing purposes, many users select to build
+each host as a :term:`virtual machine (VM)`. The primary benefits of VMs
+include the following:
+
+* One physical server can support multiple nodes, each with almost any
+ number of network interfaces.
+
+* Ability to take periodic "snap shots" throughout the installation
+ process and "roll back" to a working configuration in the event of a
+ problem.
+
+However, VMs will reduce performance of your instances, particularly if
+your hypervisor and/or processor lacks support for hardware acceleration
+of nested VMs.
+
+.. note::
+
+ If you choose to install on VMs, make sure your hypervisor provides
+ a way to disable MAC address filtering on the provider network
+ interface.
+
+For more information about system requirements, see the `OpenStack
+Operations Guide `_.
+
+.. toctree::
+ :maxdepth: 1
+
+ environment-security.rst
+ environment-networking.rst
+ environment-ntp.rst
+ environment-packages.rst
+ environment-sql-database.rst
+ environment-messaging.rst
+ environment-memcached.rst
diff --git a/doc/install-guide/source/environment-packages-debian.rst b/doc/install-guide/source/environment-packages-debian.rst
new file mode 100644
index 0000000000..9c39774ae4
--- /dev/null
+++ b/doc/install-guide/source/environment-packages-debian.rst
@@ -0,0 +1,88 @@
+OpenStack packages
+~~~~~~~~~~~~~~~~~~
+
+Distributions release OpenStack packages as part of the distribution or
+using other methods because of differing release schedules. Perform
+these procedures on all nodes.
+
+.. note::
+
+ The set up of OpenStack packages described here needs to be done on
+ all nodes: controller, compute, and Block Storage nodes.
+
+.. warning::
+
+ Your hosts must contain the latest versions of base installation
+ packages available for your distribution before proceeding further.
+
+.. note::
+
+ Disable or remove any automatic update services because they can
+ impact your OpenStack environment.
+
+
+
+
+
+Enable the backports repository
+-------------------------------
+
+The Newton release is available directly through the official
+Debian backports repository. To use this repository, follow
+the instruction from the official
+`Debian website `_,
+which basically suggest doing the following steps:
+
+#. On all nodes, adding the Debian 8 (Jessie) backport repository to
+ the source list:
+
+ .. code-block:: console
+
+ # echo "deb http://http.debian.net/debian jessie-backports main" \
+ >>/etc/apt/sources.list
+
+ .. end
+
+ .. note::
+
+ Later you can use the following command to install a package:
+
+ .. code-block:: console
+
+ # apt -t jessie-backports install ``PACKAGE``
+
+ .. end
+
+
+Finalize the installation
+-------------------------
+
+1. Upgrade the packages on all nodes:
+
+
+.. code-block:: console
+
+ # apt update && apt dist-upgrade
+
+.. end
+
+
+
+
+ .. note::
+
+ If the upgrade process includes a new kernel, reboot your host
+ to activate it.
+
+2. Install the OpenStack client:
+
+
+.. code-block:: console
+
+ # apt install python-openstackclient
+
+.. end
+
+
+
+
diff --git a/doc/install-guide/source/environment-packages-obs.rst b/doc/install-guide/source/environment-packages-obs.rst
new file mode 100644
index 0000000000..d89c143ef2
--- /dev/null
+++ b/doc/install-guide/source/environment-packages-obs.rst
@@ -0,0 +1,110 @@
+OpenStack packages
+~~~~~~~~~~~~~~~~~~
+
+Distributions release OpenStack packages as part of the distribution or
+using other methods because of differing release schedules. Perform
+these procedures on all nodes.
+
+.. note::
+
+ The set up of OpenStack packages described here needs to be done on
+ all nodes: controller, compute, and Block Storage nodes.
+
+.. warning::
+
+ Your hosts must contain the latest versions of base installation
+ packages available for your distribution before proceeding further.
+
+.. note::
+
+ Disable or remove any automatic update services because they can
+ impact your OpenStack environment.
+
+
+
+
+Enable the OpenStack repository
+-------------------------------
+
+* Enable the Open Build Service repositories based on your openSUSE or
+ SLES version:
+
+ **On openSUSE:**
+
+ .. code-block:: console
+
+ # zypper addrepo -f obs://Cloud:OpenStack:Ocata/openSUSE_Leap_42.2 Ocata
+
+ .. end
+
+ .. note::
+
+ The openSUSE distribution uses the concept of patterns to
+ represent collections of packages. If you selected 'Minimal
+ Server Selection (Text Mode)' during the initial installation,
+ you may be presented with a dependency conflict when you
+ attempt to install the OpenStack packages. To avoid this,
+ remove the minimal\_base-conflicts package:
+
+ .. code-block:: console
+
+ # zypper rm patterns-openSUSE-minimal_base-conflicts
+
+ .. end
+
+ **On SLES:**
+
+ .. code-block:: console
+
+ # zypper addrepo -f obs://Cloud:OpenStack:Ocata/SLE_12_SP2 Ocata
+
+ .. end
+
+ .. note::
+
+ The packages are signed by GPG key ``D85F9316``. You should
+ verify the fingerprint of the imported GPG key before using it.
+
+ .. code-block:: console
+
+ Key Name: Cloud:OpenStack OBS Project
+ Key Fingerprint: 35B34E18 ABC1076D 66D5A86B 893A90DA D85F9316
+ Key Created: 2015-12-16T16:48:37 CET
+ Key Expires: 2018-02-23T16:48:37 CET
+
+ .. end
+
+
+
+Finalize the installation
+-------------------------
+
+1. Upgrade the packages on all nodes:
+
+
+
+
+.. code-block:: console
+
+ # zypper refresh && zypper dist-upgrade
+
+.. end
+
+
+ .. note::
+
+ If the upgrade process includes a new kernel, reboot your host
+ to activate it.
+
+2. Install the OpenStack client:
+
+
+
+
+.. code-block:: console
+
+ # zypper install python-openstackclient
+
+.. end
+
+
diff --git a/doc/install-guide/source/environment-packages-rdo.rst b/doc/install-guide/source/environment-packages-rdo.rst
new file mode 100644
index 0000000000..6d5359caab
--- /dev/null
+++ b/doc/install-guide/source/environment-packages-rdo.rst
@@ -0,0 +1,127 @@
+OpenStack packages
+~~~~~~~~~~~~~~~~~~
+
+Distributions release OpenStack packages as part of the distribution or
+using other methods because of differing release schedules. Perform
+these procedures on all nodes.
+
+.. note::
+
+ The set up of OpenStack packages described here needs to be done on
+ all nodes: controller, compute, and Block Storage nodes.
+
+.. warning::
+
+ Your hosts must contain the latest versions of base installation
+ packages available for your distribution before proceeding further.
+
+.. note::
+
+ Disable or remove any automatic update services because they can
+ impact your OpenStack environment.
+
+
+
+Prerequisites
+-------------
+
+.. warning::
+
+ We recommend disabling EPEL when using RDO packages due to updates
+ in EPEL breaking backwards compatibility. Or, preferably pin package
+ versions using the ``yum-versionlock`` plugin.
+
+.. note::
+
+ The following steps apply to RHEL only. CentOS does not require these
+ steps.
+
+#. When using RHEL, it is assumed that you have registered your system using
+ Red Hat Subscription Management and that you have the
+ ``rhel-7-server-rpms`` repository enabled by default.
+
+ For more information on registering the system, see the
+ `Red Hat Enterprise Linux 7 System Administrator's Guide
+ `_.
+
+#. In addition to ``rhel-7-server-rpms``, you also need to have the
+ ``rhel-7-server-optional-rpms``, ``rhel-7-server-extras-rpms``, and
+ ``rhel-7-server-rh-common-rpms`` repositories enabled:
+
+ .. code-block:: console
+
+ # subscription-manager repos --enable=rhel-7-server-optional-rpms \
+ --enable=rhel-7-server-extras-rpms --enable=rhel-7-server-rh-common-rpms
+
+ .. end
+
+
+
+Enable the OpenStack repository
+-------------------------------
+
+* On CentOS, the ``extras`` repository provides the RPM that enables the
+ OpenStack repository. CentOS includes the ``extras`` repository by
+ default, so you can simply install the package to enable the OpenStack
+ repository.
+
+ .. code-block:: console
+
+ # yum install centos-release-openstack-ocata
+
+ .. end
+
+* On RHEL, download and install the RDO repository RPM to enable the
+ OpenStack repository.
+
+ .. code-block:: console
+
+ # yum install https://rdoproject.org/repos/rdo-release.rpm
+
+ .. end
+
+
+
+Finalize the installation
+-------------------------
+
+1. Upgrade the packages on all nodes:
+
+
+
+.. code-block:: console
+
+ # yum upgrade
+
+.. end
+
+
+
+ .. note::
+
+ If the upgrade process includes a new kernel, reboot your host
+ to activate it.
+
+2. Install the OpenStack client:
+
+
+
+.. code-block:: console
+
+ # yum install python-openstackclient
+
+.. end
+
+
+
+
+3. RHEL and CentOS enable :term:`SELinux` by default. Install the
+ ``openstack-selinux`` package to automatically manage security
+ policies for OpenStack services:
+
+ .. code-block:: console
+
+ # yum install openstack-selinux
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-packages-ubuntu.rst b/doc/install-guide/source/environment-packages-ubuntu.rst
new file mode 100644
index 0000000000..aa94b0229a
--- /dev/null
+++ b/doc/install-guide/source/environment-packages-ubuntu.rst
@@ -0,0 +1,69 @@
+OpenStack packages
+~~~~~~~~~~~~~~~~~~
+
+Distributions release OpenStack packages as part of the distribution or
+using other methods because of differing release schedules. Perform
+these procedures on all nodes.
+
+.. note::
+
+ The set up of OpenStack packages described here needs to be done on
+ all nodes: controller, compute, and Block Storage nodes.
+
+.. warning::
+
+ Your hosts must contain the latest versions of base installation
+ packages available for your distribution before proceeding further.
+
+.. note::
+
+ Disable or remove any automatic update services because they can
+ impact your OpenStack environment.
+
+
+Enable the OpenStack repository
+-------------------------------
+
+.. code-block:: console
+
+ # apt install software-properties-common
+ # add-apt-repository cloud-archive:ocata
+
+.. end
+
+
+
+
+
+Finalize the installation
+-------------------------
+
+1. Upgrade the packages on all nodes:
+
+
+.. code-block:: console
+
+ # apt update && apt dist-upgrade
+
+.. end
+
+
+
+
+ .. note::
+
+ If the upgrade process includes a new kernel, reboot your host
+ to activate it.
+
+2. Install the OpenStack client:
+
+
+.. code-block:: console
+
+ # apt install python-openstackclient
+
+.. end
+
+
+
+
diff --git a/doc/install-guide/source/environment-packages.rst b/doc/install-guide/source/environment-packages.rst
index 2ed6279e55..86a44ab1d9 100644
--- a/doc/install-guide/source/environment-packages.rst
+++ b/doc/install-guide/source/environment-packages.rst
@@ -20,252 +20,7 @@ these procedures on all nodes.
Disable or remove any automatic update services because they can
impact your OpenStack environment.
-.. only:: ubuntu
+.. toctree::
+ :glob:
- Enable the OpenStack repository
- -------------------------------
-
- .. code-block:: console
-
- # apt install software-properties-common
- # add-apt-repository cloud-archive:ocata
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- Prerequisites
- -------------
-
- .. warning::
-
- We recommend disabling EPEL when using RDO packages due to updates
- in EPEL breaking backwards compatibility. Or, preferably pin package
- versions using the ``yum-versionlock`` plugin.
-
- .. note::
-
- The following steps apply to RHEL only. CentOS does not require these
- steps.
-
- #. When using RHEL, it is assumed that you have registered your system using
- Red Hat Subscription Management and that you have the
- ``rhel-7-server-rpms`` repository enabled by default.
-
- For more information on registering the system, see the
- `Red Hat Enterprise Linux 7 System Administrator's Guide
- `_.
-
- #. In addition to ``rhel-7-server-rpms``, you also need to have the
- ``rhel-7-server-optional-rpms``, ``rhel-7-server-extras-rpms``, and
- ``rhel-7-server-rh-common-rpms`` repositories enabled:
-
- .. code-block:: console
-
- # subscription-manager repos --enable=rhel-7-server-optional-rpms \
- --enable=rhel-7-server-extras-rpms --enable=rhel-7-server-rh-common-rpms
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- Enable the OpenStack repository
- -------------------------------
-
- * On CentOS, the ``extras`` repository provides the RPM that enables the
- OpenStack repository. CentOS includes the ``extras`` repository by
- default, so you can simply install the package to enable the OpenStack
- repository.
-
- .. code-block:: console
-
- # yum install centos-release-openstack-ocata
-
- .. end
-
- * On RHEL, download and install the RDO repository RPM to enable the
- OpenStack repository.
-
- .. code-block:: console
-
- # yum install https://rdoproject.org/repos/rdo-release.rpm
-
- .. end
-
-.. only:: obs
-
- Enable the OpenStack repository
- -------------------------------
-
- * Enable the Open Build Service repositories based on your openSUSE or
- SLES version:
-
- **On openSUSE:**
-
- .. code-block:: console
-
- # zypper addrepo -f obs://Cloud:OpenStack:Ocata/openSUSE_Leap_42.2 Ocata
-
- .. end
-
- .. note::
-
- The openSUSE distribution uses the concept of patterns to
- represent collections of packages. If you selected 'Minimal
- Server Selection (Text Mode)' during the initial installation,
- you may be presented with a dependency conflict when you
- attempt to install the OpenStack packages. To avoid this,
- remove the minimal\_base-conflicts package:
-
- .. code-block:: console
-
- # zypper rm patterns-openSUSE-minimal_base-conflicts
-
- .. end
-
- **On SLES:**
-
- .. code-block:: console
-
- # zypper addrepo -f obs://Cloud:OpenStack:Ocata/SLE_12_SP2 Ocata
-
- .. end
-
- .. note::
-
- The packages are signed by GPG key ``D85F9316``. You should
- verify the fingerprint of the imported GPG key before using it.
-
- .. code-block:: console
-
- Key Name: Cloud:OpenStack OBS Project
- Key Fingerprint: 35B34E18 ABC1076D 66D5A86B 893A90DA D85F9316
- Key Created: 2015-12-16T16:48:37 CET
- Key Expires: 2018-02-23T16:48:37 CET
-
- .. end
-
-.. endonly
-
-.. only:: debian
-
- Enable the backports repository
- -------------------------------
-
- The Newton release is available directly through the official
- Debian backports repository. To use this repository, follow
- the instruction from the official
- `Debian website `_,
- which basically suggest doing the following steps:
-
- #. On all nodes, adding the Debian 8 (Jessie) backport repository to
- the source list:
-
- .. code-block:: console
-
- # echo "deb http://http.debian.net/debian jessie-backports main" \
- >>/etc/apt/sources.list
-
- .. end
-
- .. note::
-
- Later you can use the following command to install a package:
-
- .. code-block:: console
-
- # apt -t jessie-backports install ``PACKAGE``
-
- .. end
-
-.. endonly
-
-Finalize the installation
--------------------------
-
-1. Upgrade the packages on all nodes:
-
- .. only:: ubuntu or debian
-
- .. code-block:: console
-
- # apt update && apt dist-upgrade
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- .. code-block:: console
-
- # yum upgrade
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- .. code-block:: console
-
- # zypper refresh && zypper dist-upgrade
-
- .. end
-
- .. endonly
-
- .. note::
-
- If the upgrade process includes a new kernel, reboot your host
- to activate it.
-
-2. Install the OpenStack client:
-
- .. only:: debian or ubuntu
-
- .. code-block:: console
-
- # apt install python-openstackclient
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- .. code-block:: console
-
- # yum install python-openstackclient
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- .. code-block:: console
-
- # zypper install python-openstackclient
-
- .. end
-
- .. endonly
-
-.. only:: rdo
-
- 3. RHEL and CentOS enable :term:`SELinux` by default. Install the
- ``openstack-selinux`` package to automatically manage security
- policies for OpenStack services:
-
- .. code-block:: console
-
- # yum install openstack-selinux
-
- .. end
-
-.. endonly
+ environment-packages-*
diff --git a/doc/install-guide/source/environment-rdo.rst b/doc/install-guide/source/environment-rdo.rst
new file mode 100644
index 0000000000..2b7f561a0c
--- /dev/null
+++ b/doc/install-guide/source/environment-rdo.rst
@@ -0,0 +1,76 @@
+===========
+Environment
+===========
+
+This section explains how to configure the controller node and one compute
+node using the example architecture.
+
+Although most environments include Identity, Image service, Compute, at least
+one networking service, and the Dashboard, the Object Storage service can
+operate independently. If your use case only involves Object Storage, you can
+skip to `Object Storage Installation Guide
+`_
+after configuring the appropriate nodes for it.
+
+You must use an account with administrative privileges to configure each node.
+Either run the commands as the ``root`` user or configure the ``sudo``
+utility.
+
+
+For best performance, we recommend that your environment meets or exceeds
+the hardware requirements in :ref:`figure-hwreqs`.
+
+The following minimum requirements should support a proof-of-concept
+environment with core services and several :term:`CirrOS` instances:
+
+* Controller Node: 1 processor, 4 GB memory, and 5 GB storage
+
+* Compute Node: 1 processor, 2 GB memory, and 10 GB storage
+
+As the number of OpenStack services and virtual machines increase, so do the
+hardware requirements for the best performance. If performance degrades after
+enabling additional services or virtual machines, consider adding hardware
+resources to your environment.
+
+To minimize clutter and provide more resources for OpenStack, we recommend
+a minimal installation of your Linux distribution. Also, you must install a
+64-bit version of your distribution on each node.
+
+A single disk partition on each node works for most basic installations.
+However, you should consider :term:`Logical Volume Manager (LVM)` for
+installations with optional services such as Block Storage.
+
+For first-time installation and testing purposes, many users select to build
+each host as a :term:`virtual machine (VM)`. The primary benefits of VMs
+include the following:
+
+* One physical server can support multiple nodes, each with almost any
+ number of network interfaces.
+
+* Ability to take periodic "snap shots" throughout the installation
+ process and "roll back" to a working configuration in the event of a
+ problem.
+
+However, VMs will reduce performance of your instances, particularly if
+your hypervisor and/or processor lacks support for hardware acceleration
+of nested VMs.
+
+.. note::
+
+ If you choose to install on VMs, make sure your hypervisor provides
+ a way to disable MAC address filtering on the provider network
+ interface.
+
+For more information about system requirements, see the `OpenStack
+Operations Guide `_.
+
+.. toctree::
+ :maxdepth: 1
+
+ environment-security.rst
+ environment-networking.rst
+ environment-ntp.rst
+ environment-packages.rst
+ environment-sql-database.rst
+ environment-messaging.rst
+ environment-memcached.rst
diff --git a/doc/install-guide/source/environment-sql-database-debian.rst b/doc/install-guide/source/environment-sql-database-debian.rst
new file mode 100644
index 0000000000..8acb0633a8
--- /dev/null
+++ b/doc/install-guide/source/environment-sql-database-debian.rst
@@ -0,0 +1,68 @@
+SQL database
+~~~~~~~~~~~~
+
+Most OpenStack services use an SQL database to store information. The
+database typically runs on the controller node. The procedures in this
+guide use MariaDB or MySQL depending on the distribution. OpenStack
+services also support other SQL databases including
+`PostgreSQL `__.
+
+
+Install and configure components
+--------------------------------
+
+#. Install the packages:
+
+
+
+.. code-block:: console
+
+ # apt install mysql-server python-pymysql
+
+.. end
+
+
+
+
+
+2. Create and edit the ``/etc/mysql/conf.d/openstack.cnf`` file
+ and complete the following actions:
+
+ - Create a ``[mysqld]`` section, and set the ``bind-address``
+ key to the management IP address of the controller node to
+ enable access by other nodes via the management network. Set
+ additional keys to enable useful options and the UTF-8
+ character set:
+
+ .. path /etc/mysql/conf.d/openstack.cnf
+ .. code-block:: ini
+
+ [mysqld]
+ bind-address = 10.0.0.11
+
+ default-storage-engine = innodb
+ innodb_file_per_table = on
+ max_connections = 4096
+ collation-server = utf8_general_ci
+ character-set-server = utf8
+
+ .. end
+
+
+
+
+Finalize installation
+---------------------
+
+
+#. Restart the database service:
+
+ .. code-block:: console
+
+ # service mysql restart
+
+ .. end
+
+
+
+
diff --git a/doc/install-guide/source/environment-sql-database-obs.rst b/doc/install-guide/source/environment-sql-database-obs.rst
new file mode 100644
index 0000000000..d91f8abb71
--- /dev/null
+++ b/doc/install-guide/source/environment-sql-database-obs.rst
@@ -0,0 +1,82 @@
+SQL database
+~~~~~~~~~~~~
+
+Most OpenStack services use an SQL database to store information. The
+database typically runs on the controller node. The procedures in this
+guide use MariaDB or MySQL depending on the distribution. OpenStack
+services also support other SQL databases including
+`PostgreSQL `__.
+
+
+Install and configure components
+--------------------------------
+
+#. Install the packages:
+
+
+
+
+
+.. code-block:: console
+
+ # zypper install mariadb-client mariadb python-PyMySQL
+
+.. end
+
+
+
+
+
+2. Create and edit the ``/etc/my.cnf.d/openstack.cnf`` file
+ and complete the following actions:
+
+ - Create a ``[mysqld]`` section, and set the ``bind-address``
+ key to the management IP address of the controller node to
+ enable access by other nodes via the management network. Set
+ additional keys to enable useful options and the UTF-8
+ character set:
+
+ .. path /etc/my.cnf.d/openstack.cnf
+ .. code-block:: ini
+
+ [mysqld]
+ bind-address = 10.0.0.11
+
+ default-storage-engine = innodb
+ innodb_file_per_table = on
+ max_connections = 4096
+ collation-server = utf8_general_ci
+ character-set-server = utf8
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+
+#. Start the database service and configure it to start when the system
+ boots:
+
+
+
+.. code-block:: console
+
+ # systemctl enable mysql.service
+ # systemctl start mysql.service
+
+.. end
+
+
+
+2. Secure the database service by running the ``mysql_secure_installation``
+ script. In particular, choose a suitable password for the database
+ ``root`` account:
+
+ .. code-block:: console
+
+ # mysql_secure_installation
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-sql-database-rdo.rst b/doc/install-guide/source/environment-sql-database-rdo.rst
new file mode 100644
index 0000000000..92a3f03135
--- /dev/null
+++ b/doc/install-guide/source/environment-sql-database-rdo.rst
@@ -0,0 +1,82 @@
+SQL database
+~~~~~~~~~~~~
+
+Most OpenStack services use an SQL database to store information. The
+database typically runs on the controller node. The procedures in this
+guide use MariaDB or MySQL depending on the distribution. OpenStack
+services also support other SQL databases including
+`PostgreSQL `__.
+
+
+Install and configure components
+--------------------------------
+
+#. Install the packages:
+
+
+
+
+.. code-block:: console
+
+ # yum install mariadb mariadb-server python2-PyMySQL
+
+.. end
+
+
+
+
+
+
+2. Create and edit the ``/etc/my.cnf.d/openstack.cnf`` file
+ and complete the following actions:
+
+ - Create a ``[mysqld]`` section, and set the ``bind-address``
+ key to the management IP address of the controller node to
+ enable access by other nodes via the management network. Set
+ additional keys to enable useful options and the UTF-8
+ character set:
+
+ .. path /etc/my.cnf.d/openstack.cnf
+ .. code-block:: ini
+
+ [mysqld]
+ bind-address = 10.0.0.11
+
+ default-storage-engine = innodb
+ innodb_file_per_table = on
+ max_connections = 4096
+ collation-server = utf8_general_ci
+ character-set-server = utf8
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+
+#. Start the database service and configure it to start when the system
+ boots:
+
+
+.. code-block:: console
+
+ # systemctl enable mariadb.service
+ # systemctl start mariadb.service
+
+.. end
+
+
+
+
+2. Secure the database service by running the ``mysql_secure_installation``
+ script. In particular, choose a suitable password for the database
+ ``root`` account:
+
+ .. code-block:: console
+
+ # mysql_secure_installation
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-sql-database-ubuntu.rst b/doc/install-guide/source/environment-sql-database-ubuntu.rst
new file mode 100644
index 0000000000..9aa1c30a78
--- /dev/null
+++ b/doc/install-guide/source/environment-sql-database-ubuntu.rst
@@ -0,0 +1,87 @@
+SQL database
+~~~~~~~~~~~~
+
+Most OpenStack services use an SQL database to store information. The
+database typically runs on the controller node. The procedures in this
+guide use MariaDB or MySQL depending on the distribution. OpenStack
+services also support other SQL databases including
+`PostgreSQL `__.
+
+
+.. note::
+
+ As of Ubuntu 16.04, MariaDB was changed to use
+ the "unix_socket Authentication Plugin". Local authentication is
+ now performed using the user credentials (UID), and password
+ authentication is no longer used by default. This means that
+ the root user no longer uses a password for local access to
+ the server.
+
+
+Install and configure components
+--------------------------------
+
+#. Install the packages:
+
+
+.. code-block:: console
+
+ # apt install mariadb-server python-pymysql
+
+.. end
+
+
+
+
+
+
+
+2. Create and edit the ``/etc/mysql/mariadb.conf.d/99-openstack.cnf`` file
+ and complete the following actions:
+
+ - Create a ``[mysqld]`` section, and set the ``bind-address``
+ key to the management IP address of the controller node to
+ enable access by other nodes via the management network. Set
+ additional keys to enable useful options and the UTF-8
+ character set:
+
+ .. code-block:: ini
+
+ [mysqld]
+ bind-address = 10.0.0.11
+
+ default-storage-engine = innodb
+ innodb_file_per_table = on
+ max_connections = 4096
+ collation-server = utf8_general_ci
+ character-set-server = utf8
+ .. end
+
+
+
+Finalize installation
+---------------------
+
+
+#. Restart the database service:
+
+ .. code-block:: console
+
+ # service mysql restart
+
+ .. end
+
+
+
+
+
+2. Secure the database service by running the ``mysql_secure_installation``
+ script. In particular, choose a suitable password for the database
+ ``root`` account:
+
+ .. code-block:: console
+
+ # mysql_secure_installation
+
+ .. end
+
diff --git a/doc/install-guide/source/environment-sql-database.rst b/doc/install-guide/source/environment-sql-database.rst
index d4f7003049..6322ca5061 100644
--- a/doc/install-guide/source/environment-sql-database.rst
+++ b/doc/install-guide/source/environment-sql-database.rst
@@ -7,195 +7,8 @@ guide use MariaDB or MySQL depending on the distribution. OpenStack
services also support other SQL databases including
`PostgreSQL `__.
-.. only:: ubuntu
+.. toctree::
+ :glob:
- .. note::
+ environment-sql-database-*
- As of Ubuntu 16.04, MariaDB was changed to use
- the "unix_socket Authentication Plugin". Local authentication is
- now performed using the user credentials (UID), and password
- authentication is no longer used by default. This means that
- the root user no longer uses a password for local access to
- the server.
-
-.. endonly
-
-Install and configure components
---------------------------------
-
-#. Install the packages:
-
- .. only:: ubuntu
-
- .. code-block:: console
-
- # apt install mariadb-server python-pymysql
-
- .. end
-
- .. endonly
-
- .. only:: debian
-
- .. code-block:: console
-
- # apt install mysql-server python-pymysql
-
- .. end
-
- .. endonly
-
- .. only:: rdo
-
- .. code-block:: console
-
- # yum install mariadb mariadb-server python2-PyMySQL
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- .. code-block:: console
-
- # zypper install mariadb-client mariadb python-PyMySQL
-
- .. end
-
- .. endonly
-
-.. only:: debian
-
- 2. Create and edit the ``/etc/mysql/conf.d/openstack.cnf`` file
- and complete the following actions:
-
- - Create a ``[mysqld]`` section, and set the ``bind-address``
- key to the management IP address of the controller node to
- enable access by other nodes via the management network. Set
- additional keys to enable useful options and the UTF-8
- character set:
-
- .. path /etc/mysql/conf.d/openstack.cnf
- .. code-block:: ini
-
- [mysqld]
- bind-address = 10.0.0.11
-
- default-storage-engine = innodb
- innodb_file_per_table = on
- max_connections = 4096
- collation-server = utf8_general_ci
- character-set-server = utf8
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu
-
- 2. Create and edit the ``/etc/mysql/mariadb.conf.d/99-openstack.cnf`` file
- and complete the following actions:
-
- - Create a ``[mysqld]`` section, and set the ``bind-address``
- key to the management IP address of the controller node to
- enable access by other nodes via the management network. Set
- additional keys to enable useful options and the UTF-8
- character set:
-
- .. code-block:: ini
-
- [mysqld]
- bind-address = 10.0.0.11
-
- default-storage-engine = innodb
- innodb_file_per_table = on
- max_connections = 4096
- collation-server = utf8_general_ci
- character-set-server = utf8
- .. end
-
-.. endonly
-
-.. only:: obs or rdo
-
- 2. Create and edit the ``/etc/my.cnf.d/openstack.cnf`` file
- and complete the following actions:
-
- - Create a ``[mysqld]`` section, and set the ``bind-address``
- key to the management IP address of the controller node to
- enable access by other nodes via the management network. Set
- additional keys to enable useful options and the UTF-8
- character set:
-
- .. path /etc/my.cnf.d/openstack.cnf
- .. code-block:: ini
-
- [mysqld]
- bind-address = 10.0.0.11
-
- default-storage-engine = innodb
- innodb_file_per_table = on
- max_connections = 4096
- collation-server = utf8_general_ci
- character-set-server = utf8
-
- .. end
-
-.. endonly
-
-Finalize installation
----------------------
-
-.. only:: ubuntu or debian
-
- #. Restart the database service:
-
- .. code-block:: console
-
- # service mysql restart
-
- .. end
-
-.. endonly
-
-.. only:: rdo or obs
-
- #. Start the database service and configure it to start when the system
- boots:
-
- .. only:: rdo
-
- .. code-block:: console
-
- # systemctl enable mariadb.service
- # systemctl start mariadb.service
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- .. code-block:: console
-
- # systemctl enable mysql.service
- # systemctl start mysql.service
-
- .. end
-
- .. endonly
-
-.. only:: rdo or obs or ubuntu
-
- 2. Secure the database service by running the ``mysql_secure_installation``
- script. In particular, choose a suitable password for the database
- ``root`` account:
-
- .. code-block:: console
-
- # mysql_secure_installation
-
- .. end
-
-.. endonly
diff --git a/doc/install-guide/source/environment-ubuntu.rst b/doc/install-guide/source/environment-ubuntu.rst
new file mode 100644
index 0000000000..2b7f561a0c
--- /dev/null
+++ b/doc/install-guide/source/environment-ubuntu.rst
@@ -0,0 +1,76 @@
+===========
+Environment
+===========
+
+This section explains how to configure the controller node and one compute
+node using the example architecture.
+
+Although most environments include Identity, Image service, Compute, at least
+one networking service, and the Dashboard, the Object Storage service can
+operate independently. If your use case only involves Object Storage, you can
+skip to `Object Storage Installation Guide
+`_
+after configuring the appropriate nodes for it.
+
+You must use an account with administrative privileges to configure each node.
+Either run the commands as the ``root`` user or configure the ``sudo``
+utility.
+
+
+For best performance, we recommend that your environment meets or exceeds
+the hardware requirements in :ref:`figure-hwreqs`.
+
+The following minimum requirements should support a proof-of-concept
+environment with core services and several :term:`CirrOS` instances:
+
+* Controller Node: 1 processor, 4 GB memory, and 5 GB storage
+
+* Compute Node: 1 processor, 2 GB memory, and 10 GB storage
+
+As the number of OpenStack services and virtual machines increase, so do the
+hardware requirements for the best performance. If performance degrades after
+enabling additional services or virtual machines, consider adding hardware
+resources to your environment.
+
+To minimize clutter and provide more resources for OpenStack, we recommend
+a minimal installation of your Linux distribution. Also, you must install a
+64-bit version of your distribution on each node.
+
+A single disk partition on each node works for most basic installations.
+However, you should consider :term:`Logical Volume Manager (LVM)` for
+installations with optional services such as Block Storage.
+
+For first-time installation and testing purposes, many users select to build
+each host as a :term:`virtual machine (VM)`. The primary benefits of VMs
+include the following:
+
+* One physical server can support multiple nodes, each with almost any
+ number of network interfaces.
+
+* Ability to take periodic "snap shots" throughout the installation
+ process and "roll back" to a working configuration in the event of a
+ problem.
+
+However, VMs will reduce performance of your instances, particularly if
+your hypervisor and/or processor lacks support for hardware acceleration
+of nested VMs.
+
+.. note::
+
+ If you choose to install on VMs, make sure your hypervisor provides
+ a way to disable MAC address filtering on the provider network
+ interface.
+
+For more information about system requirements, see the `OpenStack
+Operations Guide `_.
+
+.. toctree::
+ :maxdepth: 1
+
+ environment-security.rst
+ environment-networking.rst
+ environment-ntp.rst
+ environment-packages.rst
+ environment-sql-database.rst
+ environment-messaging.rst
+ environment-memcached.rst
diff --git a/doc/install-guide/source/environment.rst b/doc/install-guide/source/environment.rst
index 74c287f49b..46588d6ee1 100644
--- a/doc/install-guide/source/environment.rst
+++ b/doc/install-guide/source/environment.rst
@@ -4,82 +4,9 @@
Environment
===========
-This section explains how to configure the controller node and one compute
-node using the example architecture.
-
-Although most environments include Identity, Image service, Compute, at least
-one networking service, and the Dashboard, the Object Storage service can
-operate independently. If your use case only involves Object Storage, you can
-skip to `Object Storage Installation Guide
-`_
-after configuring the appropriate nodes for it.
-
-You must use an account with administrative privileges to configure each node.
-Either run the commands as the ``root`` user or configure the ``sudo``
-utility.
-
-.. only:: obs
-
- The :command:`systemctl enable` call on openSUSE outputs a warning message
- when the service uses SysV Init scripts instead of native systemd files. This
- warning can be ignored.
-
-.. endonly
-
-For best performance, we recommend that your environment meets or exceeds
-the hardware requirements in :ref:`figure-hwreqs`.
-
-The following minimum requirements should support a proof-of-concept
-environment with core services and several :term:`CirrOS` instances:
-
-* Controller Node: 1 processor, 4 GB memory, and 5 GB storage
-
-* Compute Node: 1 processor, 2 GB memory, and 10 GB storage
-
-As the number of OpenStack services and virtual machines increase, so do the
-hardware requirements for the best performance. If performance degrades after
-enabling additional services or virtual machines, consider adding hardware
-resources to your environment.
-
-To minimize clutter and provide more resources for OpenStack, we recommend
-a minimal installation of your Linux distribution. Also, you must install a
-64-bit version of your distribution on each node.
-
-A single disk partition on each node works for most basic installations.
-However, you should consider :term:`Logical Volume Manager (LVM)` for
-installations with optional services such as Block Storage.
-
-For first-time installation and testing purposes, many users select to build
-each host as a :term:`virtual machine (VM)`. The primary benefits of VMs
-include the following:
-
-* One physical server can support multiple nodes, each with almost any
- number of network interfaces.
-
-* Ability to take periodic "snap shots" throughout the installation
- process and "roll back" to a working configuration in the event of a
- problem.
-
-However, VMs will reduce performance of your instances, particularly if
-your hypervisor and/or processor lacks support for hardware acceleration
-of nested VMs.
-
-.. note::
-
- If you choose to install on VMs, make sure your hypervisor provides
- a way to disable MAC address filtering on the provider network
- interface.
-
-For more information about system requirements, see the `OpenStack
-Operations Guide `_.
-
.. toctree::
- :maxdepth: 1
- environment-security.rst
- environment-networking.rst
- environment-ntp.rst
- environment-packages.rst
- environment-sql-database.rst
- environment-messaging.rst
- environment-memcached.rst
+ environment-debian
+ environment-obs
+ environment-rdo
+ environment-ubuntu
diff --git a/doc/install-guide/source/glance-install-debian.rst b/doc/install-guide/source/glance-install-debian.rst
new file mode 100644
index 0000000000..eaf691267a
--- /dev/null
+++ b/doc/install-guide/source/glance-install-debian.rst
@@ -0,0 +1,329 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Image service,
+code-named glance, on the controller node. For simplicity, this
+configuration stores images on the local file system.
+
+Prerequisites
+-------------
+
+Before you install and configure the Image service, you must
+create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``glance`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE glance;
+
+ .. end
+
+ * Grant proper access to the ``glance`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
+ IDENTIFIED BY 'GLANCE_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
+ IDENTIFIED BY 'GLANCE_DBPASS';
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``glance`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt glance
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 3f4e777c4062483ab8d9edd7dff829df |
+ | name | glance |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``glance`` user and
+ ``service`` project:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user glance admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``glance`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name glance \
+ --description "OpenStack Image" image
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Image |
+ | enabled | True |
+ | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | name | glance |
+ | type | image |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Image service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ image public http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 340be3625e9b4239a6415d034e98aace |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ image internal http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ image admin http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 0c37ed58103f4300a84ff125a539032d |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ .. end
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install glance
+
+ .. end
+
+
+2. Edit the ``/etc/glance/glance-api.conf`` file and complete the
+ following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with the password you chose for the
+ Image service database.
+
+ * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
+ configure Identity service access:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = glance
+ password = GLANCE_PASS
+
+ [paste_deploy]
+ # ...
+ flavor = keystone
+
+ .. end
+
+ Replace ``GLANCE_PASS`` with the password you chose for the
+ ``glance`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[glance_store]`` section, configure the local file
+ system store and location of image files:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [glance_store]
+ # ...
+ stores = file,http
+ default_store = file
+ filesystem_store_datadir = /var/lib/glance/images/
+
+ .. end
+
+3. Edit the ``/etc/glance/glance-registry.conf`` file and complete
+ the following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/glance/glance-registry.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with the password you chose for the
+ Image service database.
+
+ * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
+ configure Identity service access:
+
+ .. path /etc/glance/glance-registry.conf
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = glance
+ password = GLANCE_PASS
+
+ [paste_deploy]
+ # ...
+ flavor = keystone
+
+ .. end
+
+ Replace ``GLANCE_PASS`` with the password you chose for the
+ ``glance`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+4. Populate the Image service database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "glance-manage db_sync" glance
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+
+Finalize installation
+---------------------
+
+
+
+#. Restart the Image services:
+
+ .. code-block:: console
+
+ # service glance-registry restart
+ # service glance-api restart
+
+ .. end
+
diff --git a/doc/install-guide/source/glance-install-obs.rst b/doc/install-guide/source/glance-install-obs.rst
new file mode 100644
index 0000000000..6d42fea396
--- /dev/null
+++ b/doc/install-guide/source/glance-install-obs.rst
@@ -0,0 +1,333 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Image service,
+code-named glance, on the controller node. For simplicity, this
+configuration stores images on the local file system.
+
+Prerequisites
+-------------
+
+Before you install and configure the Image service, you must
+create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``glance`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE glance;
+
+ .. end
+
+ * Grant proper access to the ``glance`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
+ IDENTIFIED BY 'GLANCE_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
+ IDENTIFIED BY 'GLANCE_DBPASS';
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``glance`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt glance
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 3f4e777c4062483ab8d9edd7dff829df |
+ | name | glance |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``glance`` user and
+ ``service`` project:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user glance admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``glance`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name glance \
+ --description "OpenStack Image" image
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Image |
+ | enabled | True |
+ | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | name | glance |
+ | type | image |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Image service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ image public http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 340be3625e9b4239a6415d034e98aace |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ image internal http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ image admin http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 0c37ed58103f4300a84ff125a539032d |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ .. end
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+.. note::
+
+ Starting with the Newton release, SUSE OpenStack packages are shipping
+ with the upstream default configuration files. For example
+ ``/etc/glance/glance-api.conf`` or
+ ``/etc/glance/glance-registry.conf``, with customizations in
+ ``/etc/glance/glance-api.conf.d/`` or
+ ``/etc/glance/glance-registry.conf.d/``. While the following
+ instructions modify the default configuration files, adding new files
+ in ``/etc/glance/glance-api.conf.d`` or
+ ``/etc/glance/glance-registry.conf.d`` achieves the same result.
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # zypper install openstack-glance \
+ openstack-glance-api openstack-glance-registry
+
+ .. end
+
+
+
+
+2. Edit the ``/etc/glance/glance-api.conf`` file and complete the
+ following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with the password you chose for the
+ Image service database.
+
+ * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
+ configure Identity service access:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = glance
+ password = GLANCE_PASS
+
+ [paste_deploy]
+ # ...
+ flavor = keystone
+
+ .. end
+
+ Replace ``GLANCE_PASS`` with the password you chose for the
+ ``glance`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[glance_store]`` section, configure the local file
+ system store and location of image files:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [glance_store]
+ # ...
+ stores = file,http
+ default_store = file
+ filesystem_store_datadir = /var/lib/glance/images/
+
+ .. end
+
+3. Edit the ``/etc/glance/glance-registry.conf`` file and complete
+ the following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/glance/glance-registry.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with the password you chose for the
+ Image service database.
+
+ * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
+ configure Identity service access:
+
+ .. path /etc/glance/glance-registry.conf
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = glance
+ password = GLANCE_PASS
+
+ [paste_deploy]
+ # ...
+ flavor = keystone
+
+ .. end
+
+ Replace ``GLANCE_PASS`` with the password you chose for the
+ ``glance`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+Finalize installation
+---------------------
+
+
+* Start the Image services and configure them to start when
+ the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-glance-api.service \
+ openstack-glance-registry.service
+ # systemctl start openstack-glance-api.service \
+ openstack-glance-registry.service
+
+ .. end
+
+
diff --git a/doc/install-guide/source/glance-install-rdo.rst b/doc/install-guide/source/glance-install-rdo.rst
new file mode 100644
index 0000000000..7e9e28951b
--- /dev/null
+++ b/doc/install-guide/source/glance-install-rdo.rst
@@ -0,0 +1,332 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Image service,
+code-named glance, on the controller node. For simplicity, this
+configuration stores images on the local file system.
+
+Prerequisites
+-------------
+
+Before you install and configure the Image service, you must
+create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``glance`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE glance;
+
+ .. end
+
+ * Grant proper access to the ``glance`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
+ IDENTIFIED BY 'GLANCE_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
+ IDENTIFIED BY 'GLANCE_DBPASS';
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``glance`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt glance
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 3f4e777c4062483ab8d9edd7dff829df |
+ | name | glance |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``glance`` user and
+ ``service`` project:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user glance admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``glance`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name glance \
+ --description "OpenStack Image" image
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Image |
+ | enabled | True |
+ | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | name | glance |
+ | type | image |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Image service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ image public http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 340be3625e9b4239a6415d034e98aace |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ image internal http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ image admin http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 0c37ed58103f4300a84ff125a539032d |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ .. end
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # yum install openstack-glance
+
+ .. end
+
+
+
+2. Edit the ``/etc/glance/glance-api.conf`` file and complete the
+ following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with the password you chose for the
+ Image service database.
+
+ * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
+ configure Identity service access:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = glance
+ password = GLANCE_PASS
+
+ [paste_deploy]
+ # ...
+ flavor = keystone
+
+ .. end
+
+ Replace ``GLANCE_PASS`` with the password you chose for the
+ ``glance`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[glance_store]`` section, configure the local file
+ system store and location of image files:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [glance_store]
+ # ...
+ stores = file,http
+ default_store = file
+ filesystem_store_datadir = /var/lib/glance/images/
+
+ .. end
+
+3. Edit the ``/etc/glance/glance-registry.conf`` file and complete
+ the following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/glance/glance-registry.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with the password you chose for the
+ Image service database.
+
+ * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
+ configure Identity service access:
+
+ .. path /etc/glance/glance-registry.conf
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = glance
+ password = GLANCE_PASS
+
+ [paste_deploy]
+ # ...
+ flavor = keystone
+
+ .. end
+
+ Replace ``GLANCE_PASS`` with the password you chose for the
+ ``glance`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+4. Populate the Image service database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "glance-manage db_sync" glance
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+
+Finalize installation
+---------------------
+
+
+* Start the Image services and configure them to start when
+ the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-glance-api.service \
+ openstack-glance-registry.service
+ # systemctl start openstack-glance-api.service \
+ openstack-glance-registry.service
+
+ .. end
+
+
diff --git a/doc/install-guide/source/glance-install-ubuntu.rst b/doc/install-guide/source/glance-install-ubuntu.rst
new file mode 100644
index 0000000000..c84a51dc83
--- /dev/null
+++ b/doc/install-guide/source/glance-install-ubuntu.rst
@@ -0,0 +1,329 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Image service,
+code-named glance, on the controller node. For simplicity, this
+configuration stores images on the local file system.
+
+Prerequisites
+-------------
+
+Before you install and configure the Image service, you must
+create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ # mysql
+
+ .. end
+
+
+
+ * Create the ``glance`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE glance;
+
+ .. end
+
+ * Grant proper access to the ``glance`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
+ IDENTIFIED BY 'GLANCE_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
+ IDENTIFIED BY 'GLANCE_DBPASS';
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``glance`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt glance
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 3f4e777c4062483ab8d9edd7dff829df |
+ | name | glance |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``glance`` user and
+ ``service`` project:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user glance admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``glance`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name glance \
+ --description "OpenStack Image" image
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Image |
+ | enabled | True |
+ | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | name | glance |
+ | type | image |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Image service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ image public http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 340be3625e9b4239a6415d034e98aace |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ image internal http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ image admin http://controller:9292
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 0c37ed58103f4300a84ff125a539032d |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
+ | service_name | glance |
+ | service_type | image |
+ | url | http://controller:9292 |
+ +--------------+----------------------------------+
+
+ .. end
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install glance
+
+ .. end
+
+
+2. Edit the ``/etc/glance/glance-api.conf`` file and complete the
+ following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with the password you chose for the
+ Image service database.
+
+ * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
+ configure Identity service access:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = glance
+ password = GLANCE_PASS
+
+ [paste_deploy]
+ # ...
+ flavor = keystone
+
+ .. end
+
+ Replace ``GLANCE_PASS`` with the password you chose for the
+ ``glance`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[glance_store]`` section, configure the local file
+ system store and location of image files:
+
+ .. path /etc/glance/glance.conf
+ .. code-block:: ini
+
+ [glance_store]
+ # ...
+ stores = file,http
+ default_store = file
+ filesystem_store_datadir = /var/lib/glance/images/
+
+ .. end
+
+3. Edit the ``/etc/glance/glance-registry.conf`` file and complete
+ the following actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/glance/glance-registry.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
+
+ .. end
+
+ Replace ``GLANCE_DBPASS`` with the password you chose for the
+ Image service database.
+
+ * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
+ configure Identity service access:
+
+ .. path /etc/glance/glance-registry.conf
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = glance
+ password = GLANCE_PASS
+
+ [paste_deploy]
+ # ...
+ flavor = keystone
+
+ .. end
+
+ Replace ``GLANCE_PASS`` with the password you chose for the
+ ``glance`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+4. Populate the Image service database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "glance-manage db_sync" glance
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+
+Finalize installation
+---------------------
+
+
+
+#. Restart the Image services:
+
+ .. code-block:: console
+
+ # service glance-registry restart
+ # service glance-api restart
+
+ .. end
+
diff --git a/doc/install-guide/source/glance-install.rst b/doc/install-guide/source/glance-install.rst
index e2f3efc8bf..9fa396ffb8 100644
--- a/doc/install-guide/source/glance-install.rst
+++ b/doc/install-guide/source/glance-install.rst
@@ -5,398 +5,7 @@ This section describes how to install and configure the Image service,
code-named glance, on the controller node. For simplicity, this
configuration stores images on the local file system.
-Prerequisites
--------------
+.. toctree::
+ :glob:
-Before you install and configure the Image service, you must
-create a database, service credentials, and API endpoints.
-
-#. To create the database, complete these steps:
-
- .. only:: ubuntu
-
- * Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- # mysql
-
- .. end
-
- .. endonly
-
- .. only:: rdo or debian or obs
-
- * Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- $ mysql -u root -p
-
- .. end
-
- .. endonly
-
- * Create the ``glance`` database:
-
- .. code-block:: console
-
- MariaDB [(none)]> CREATE DATABASE glance;
-
- .. end
-
- * Grant proper access to the ``glance`` database:
-
- .. code-block:: console
-
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
- IDENTIFIED BY 'GLANCE_DBPASS';
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
- IDENTIFIED BY 'GLANCE_DBPASS';
-
- .. end
-
- Replace ``GLANCE_DBPASS`` with a suitable password.
-
- * Exit the database access client.
-
-#. Source the ``admin`` credentials to gain access to
- admin-only CLI commands:
-
- .. code-block:: console
-
- $ . admin-openrc
-
- .. end
-
-#. To create the service credentials, complete these steps:
-
- * Create the ``glance`` user:
-
- .. code-block:: console
-
- $ openstack user create --domain default --password-prompt glance
-
- User Password:
- Repeat User Password:
- +---------------------+----------------------------------+
- | Field | Value |
- +---------------------+----------------------------------+
- | domain_id | default |
- | enabled | True |
- | id | 3f4e777c4062483ab8d9edd7dff829df |
- | name | glance |
- | options | {} |
- | password_expires_at | None |
- +---------------------+----------------------------------+
-
- .. end
-
- * Add the ``admin`` role to the ``glance`` user and
- ``service`` project:
-
- .. code-block:: console
-
- $ openstack role add --project service --user glance admin
-
- .. end
-
- .. note::
-
- This command provides no output.
-
- * Create the ``glance`` service entity:
-
- .. code-block:: console
-
- $ openstack service create --name glance \
- --description "OpenStack Image" image
-
- +-------------+----------------------------------+
- | Field | Value |
- +-------------+----------------------------------+
- | description | OpenStack Image |
- | enabled | True |
- | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
- | name | glance |
- | type | image |
- +-------------+----------------------------------+
-
- .. end
-
-#. Create the Image service API endpoints:
-
- .. code-block:: console
-
- $ openstack endpoint create --region RegionOne \
- image public http://controller:9292
-
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | 340be3625e9b4239a6415d034e98aace |
- | interface | public |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
- | service_name | glance |
- | service_type | image |
- | url | http://controller:9292 |
- +--------------+----------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- image internal http://controller:9292
-
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 |
- | interface | internal |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
- | service_name | glance |
- | service_type | image |
- | url | http://controller:9292 |
- +--------------+----------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- image admin http://controller:9292
-
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | 0c37ed58103f4300a84ff125a539032d |
- | interface | admin |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 |
- | service_name | glance |
- | service_type | image |
- | url | http://controller:9292 |
- +--------------+----------------------------------+
-
- .. end
-
-Install and configure components
---------------------------------
-
-.. include:: shared/note_configuration_vary_by_distribution.rst
-
-.. only:: obs
-
- .. note::
-
- Starting with the Newton release, SUSE OpenStack packages are shipping
- with the upstream default configuration files. For example
- ``/etc/glance/glance-api.conf`` or
- ``/etc/glance/glance-registry.conf``, with customizations in
- ``/etc/glance/glance-api.conf.d/`` or
- ``/etc/glance/glance-registry.conf.d/``. While the following
- instructions modify the default configuration files, adding new files
- in ``/etc/glance/glance-api.conf.d`` or
- ``/etc/glance/glance-registry.conf.d`` achieves the same result.
-
-.. endonly
-
-.. only:: obs
-
- #. Install the packages:
-
- .. code-block:: console
-
- # zypper install openstack-glance \
- openstack-glance-api openstack-glance-registry
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- #. Install the packages:
-
- .. code-block:: console
-
- # yum install openstack-glance
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Install the packages:
-
- .. code-block:: console
-
- # apt install glance
-
- .. end
-
-.. endonly
-
-2. Edit the ``/etc/glance/glance-api.conf`` file and complete the
- following actions:
-
- * In the ``[database]`` section, configure database access:
-
- .. path /etc/glance/glance.conf
- .. code-block:: ini
-
- [database]
- # ...
- connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
-
- .. end
-
- Replace ``GLANCE_DBPASS`` with the password you chose for the
- Image service database.
-
- * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
- configure Identity service access:
-
- .. path /etc/glance/glance.conf
- .. code-block:: ini
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = glance
- password = GLANCE_PASS
-
- [paste_deploy]
- # ...
- flavor = keystone
-
- .. end
-
- Replace ``GLANCE_PASS`` with the password you chose for the
- ``glance`` user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
- * In the ``[glance_store]`` section, configure the local file
- system store and location of image files:
-
- .. path /etc/glance/glance.conf
- .. code-block:: ini
-
- [glance_store]
- # ...
- stores = file,http
- default_store = file
- filesystem_store_datadir = /var/lib/glance/images/
-
- .. end
-
-3. Edit the ``/etc/glance/glance-registry.conf`` file and complete
- the following actions:
-
- * In the ``[database]`` section, configure database access:
-
- .. path /etc/glance/glance-registry.conf
- .. code-block:: ini
-
- [database]
- # ...
- connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
-
- .. end
-
- Replace ``GLANCE_DBPASS`` with the password you chose for the
- Image service database.
-
- * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections,
- configure Identity service access:
-
- .. path /etc/glance/glance-registry.conf
- .. code-block:: ini
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = glance
- password = GLANCE_PASS
-
- [paste_deploy]
- # ...
- flavor = keystone
-
- .. end
-
- Replace ``GLANCE_PASS`` with the password you chose for the
- ``glance`` user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
-.. only:: rdo or ubuntu or debian
-
- 4. Populate the Image service database:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "glance-manage db_sync" glance
-
- .. end
-
- .. note::
-
- Ignore any deprecation messages in this output.
-
-.. endonly
-
-Finalize installation
----------------------
-
-.. only:: obs or rdo
-
- * Start the Image services and configure them to start when
- the system boots:
-
- .. code-block:: console
-
- # systemctl enable openstack-glance-api.service \
- openstack-glance-registry.service
- # systemctl start openstack-glance-api.service \
- openstack-glance-registry.service
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Restart the Image services:
-
- .. code-block:: console
-
- # service glance-registry restart
- # service glance-api restart
-
- .. end
-
-.. endonly
+ glance-install-*
diff --git a/doc/install-guide/source/horizon-install-debian.rst b/doc/install-guide/source/horizon-install-debian.rst
new file mode 100644
index 0000000000..1c6cc80c66
--- /dev/null
+++ b/doc/install-guide/source/horizon-install-debian.rst
@@ -0,0 +1,212 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the dashboard
+on the controller node.
+
+The only core service required by the dashboard is the Identity service.
+You can use the dashboard in combination with other services, such as
+Image service, Compute, and Networking. You can also use the dashboard
+in environments with stand-alone services such as Object Storage.
+
+.. note::
+
+ This section assumes proper installation, configuration, and operation
+ of the Identity service using the Apache HTTP server and Memcached
+ service as described in the :ref:`Install and configure the Identity
+ service ` section.
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+
+1. Install the packages:
+
+ .. code-block:: console
+
+ # apt install openstack-dashboard-apache
+
+ .. end
+
+2. Respond to prompts for web server configuration.
+
+ .. note::
+
+ The automatic configuration process generates a self-signed
+ SSL certificate. Consider obtaining an official certificate
+ for production environments.
+
+ .. note::
+
+ There are two modes of installation. One using ``/horizon`` as the URL,
+ keeping your default vhost and only adding an Alias directive: this is
+ the default. The other mode will remove the default Apache vhost and install
+ the dashboard on the webroot. It was the only available option
+ before the Liberty release. If you prefer to set the Apache configuration
+ manually, install the ``openstack-dashboard`` package instead of
+ ``openstack-dashboard-apache``.
+
+
+
+
+
+2. Edit the
+ ``/etc/openstack-dashboard/local_settings.py``
+ file and complete the following actions:
+
+ * Configure the dashboard to use OpenStack services on the
+ ``controller`` node:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_HOST = "controller"
+
+ .. end
+
+ * In the Dashboard configuration section, allow your hosts to access
+ Dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ ALLOWED_HOSTS = ['one.example.com', 'two.example.com']
+
+ .. end
+
+ .. note::
+
+ - Do not edit the ``ALLOWED_HOSTS`` parameter under the Ubuntu
+ configuration section.
+ - ``ALLOWED_HOSTS`` can also be ``['*']`` to accept all hosts. This
+ may be useful for development work, but is potentially insecure
+ and should not be used in production. See the
+ `Django documentation
+ `_
+ for further information.
+
+ * Configure the ``memcached`` session storage service:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+
+ CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': 'controller:11211',
+ }
+ }
+
+ .. end
+
+ .. note::
+
+ Comment out any other session storage configuration.
+
+ * Enable the Identity API version 3:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
+
+ .. end
+
+ * Enable support for domains:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
+
+ .. end
+
+ * Configure API versions:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_API_VERSIONS = {
+ "identity": 3,
+ "image": 2,
+ "volume": 2,
+ }
+
+ .. end
+
+ * Configure ``Default`` as the default domain for users that you create
+ via the dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
+
+ .. end
+
+ * Configure ``user`` as the default role for
+ users that you create via the dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
+
+ .. end
+
+ * If you chose networking option 1, disable support for layer-3
+ networking services:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_NEUTRON_NETWORK = {
+ ...
+ 'enable_router': False,
+ 'enable_quotas': False,
+ 'enable_ipv6': False,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': False,
+ 'enable_firewall': False,
+ 'enable_vpn': False,
+ 'enable_fip_topology_check': False,
+ }
+
+ .. end
+
+ * Optionally, configure the time zone:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ TIME_ZONE = "TIME_ZONE"
+
+ .. end
+
+ Replace ``TIME_ZONE`` with an appropriate time zone identifier.
+ For more information, see the `list of time zones
+ `__.
+
+
+Finalize installation
+---------------------
+
+
+* Reload the web server configuration:
+
+ .. code-block:: console
+
+ # service apache2 reload
+
+ .. end
+
+
+
diff --git a/doc/install-guide/source/horizon-install-obs.rst b/doc/install-guide/source/horizon-install-obs.rst
new file mode 100644
index 0000000000..c8d26a7516
--- /dev/null
+++ b/doc/install-guide/source/horizon-install-obs.rst
@@ -0,0 +1,204 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the dashboard
+on the controller node.
+
+The only core service required by the dashboard is the Identity service.
+You can use the dashboard in combination with other services, such as
+Image service, Compute, and Networking. You can also use the dashboard
+in environments with stand-alone services such as Object Storage.
+
+.. note::
+
+ This section assumes proper installation, configuration, and operation
+ of the Identity service using the Apache HTTP server and Memcached
+ service as described in the :ref:`Install and configure the Identity
+ service ` section.
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+1. Install the packages:
+
+ .. code-block:: console
+
+ # zypper install openstack-dashboard
+
+ .. end
+
+
+
+
+
+
+2. Configure the web server:
+
+ .. code-block:: console
+
+ # cp /etc/apache2/conf.d/openstack-dashboard.conf.sample \
+ /etc/apache2/conf.d/openstack-dashboard.conf
+ # a2enmod rewrite
+
+ .. end
+
+3. Edit the
+ ``/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py``
+ file and complete the following actions:
+
+ * Configure the dashboard to use OpenStack services on the
+ ``controller`` node:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_HOST = "controller"
+
+ .. end
+
+ * Allow your hosts to access the dashboard:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ ALLOWED_HOSTS = ['one.example.com', 'two.example.com']
+
+ .. end
+
+ .. note::
+
+ ``ALLOWED_HOSTS`` can also be ``['*']`` to accept all hosts. This may be
+ useful for development work, but is potentially insecure and should
+ not be used in production. See `Django documentation
+ `_
+ for further information.
+
+ * Configure the ``memcached`` session storage service:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+
+ CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': 'controller:11211',
+ }
+ }
+
+ .. end
+
+ .. note::
+
+ Comment out any other session storage configuration.
+
+ * Enable the Identity API version 3:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
+
+ .. end
+
+ * Enable support for domains:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
+
+ .. end
+
+ * Configure API versions:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_API_VERSIONS = {
+ "identity": 3,
+ "image": 2,
+ "volume": 2,
+ }
+
+ .. end
+
+ * Configure ``Default`` as the default domain for users that you create
+ via the dashboard:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
+
+ .. end
+
+ * Configure ``user`` as the default role for
+ users that you create via the dashboard:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
+
+ .. end
+
+ * If you chose networking option 1, disable support for layer-3
+ networking services:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_NEUTRON_NETWORK = {
+ ...
+ 'enable_router': False,
+ 'enable_quotas': False,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': False,
+ 'enable_firewall': False,
+ 'enable_vpn': False,
+ 'enable_fip_topology_check': False,
+ }
+
+ .. end
+
+ * Optionally, configure the time zone:
+
+ .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
+ .. code-block:: python
+
+ TIME_ZONE = "TIME_ZONE"
+
+ .. end
+
+ Replace ``TIME_ZONE`` with an appropriate time zone identifier.
+ For more information, see the `list of time zones
+ `__.
+
+
+
+
+Finalize installation
+---------------------
+
+
+
+* Restart the web server and session storage service:
+
+ .. code-block:: console
+
+ # systemctl restart apache2.service memcached.service
+
+ .. end
+
+ .. note::
+
+ The ``systemctl restart`` command starts each service if
+ not currently running.
+
+
diff --git a/doc/install-guide/source/horizon-install-rdo.rst b/doc/install-guide/source/horizon-install-rdo.rst
new file mode 100644
index 0000000000..c6c8961b69
--- /dev/null
+++ b/doc/install-guide/source/horizon-install-rdo.rst
@@ -0,0 +1,194 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the dashboard
+on the controller node.
+
+The only core service required by the dashboard is the Identity service.
+You can use the dashboard in combination with other services, such as
+Image service, Compute, and Networking. You can also use the dashboard
+in environments with stand-alone services such as Object Storage.
+
+.. note::
+
+ This section assumes proper installation, configuration, and operation
+ of the Identity service using the Apache HTTP server and Memcached
+ service as described in the :ref:`Install and configure the Identity
+ service ` section.
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+1. Install the packages:
+
+ .. code-block:: console
+
+ # yum install openstack-dashboard
+
+ .. end
+
+
+
+
+
+
+2. Edit the
+ ``/etc/openstack-dashboard/local_settings``
+ file and complete the following actions:
+
+ * Configure the dashboard to use OpenStack services on the
+ ``controller`` node:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ OPENSTACK_HOST = "controller"
+
+ .. end
+
+ * Allow your hosts to access the dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ ALLOWED_HOSTS = ['one.example.com', 'two.example.com']
+
+ .. end
+
+ .. note::
+
+ ALLOWED_HOSTS can also be ['*'] to accept all hosts. This may be
+ useful for development work, but is potentially insecure and should
+ not be used in production. See
+ https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
+ for further information.
+
+ * Configure the ``memcached`` session storage service:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+
+ CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': 'controller:11211',
+ }
+ }
+
+ .. end
+
+ .. note::
+
+ Comment out any other session storage configuration.
+
+ * Enable the Identity API version 3:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
+
+ .. end
+
+ * Enable support for domains:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
+
+ .. end
+
+ * Configure API versions:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ OPENSTACK_API_VERSIONS = {
+ "identity": 3,
+ "image": 2,
+ "volume": 2,
+ }
+
+ .. end
+
+ * Configure ``Default`` as the default domain for users that you create
+ via the dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
+
+ .. end
+
+ * Configure ``user`` as the default role for
+ users that you create via the dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
+
+ .. end
+
+ * If you chose networking option 1, disable support for layer-3
+ networking services:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ OPENSTACK_NEUTRON_NETWORK = {
+ ...
+ 'enable_router': False,
+ 'enable_quotas': False,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': False,
+ 'enable_firewall': False,
+ 'enable_vpn': False,
+ 'enable_fip_topology_check': False,
+ }
+
+ .. end
+
+ * Optionally, configure the time zone:
+
+ .. path /etc/openstack-dashboard/local_settings
+ .. code-block:: python
+
+ TIME_ZONE = "TIME_ZONE"
+
+ .. end
+
+ Replace ``TIME_ZONE`` with an appropriate time zone identifier.
+ For more information, see the `list of time zones
+ `__.
+
+
+
+Finalize installation
+---------------------
+
+
+
+
+* Restart the web server and session storage service:
+
+ .. code-block:: console
+
+ # systemctl restart httpd.service memcached.service
+
+ .. end
+
+ .. note::
+
+ The ``systemctl restart`` command starts each service if
+ not currently running.
+
diff --git a/doc/install-guide/source/horizon-install-ubuntu.rst b/doc/install-guide/source/horizon-install-ubuntu.rst
new file mode 100644
index 0000000000..03dee2e59a
--- /dev/null
+++ b/doc/install-guide/source/horizon-install-ubuntu.rst
@@ -0,0 +1,194 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the dashboard
+on the controller node.
+
+The only core service required by the dashboard is the Identity service.
+You can use the dashboard in combination with other services, such as
+Image service, Compute, and Networking. You can also use the dashboard
+in environments with stand-alone services such as Object Storage.
+
+.. note::
+
+ This section assumes proper installation, configuration, and operation
+ of the Identity service using the Apache HTTP server and Memcached
+ service as described in the :ref:`Install and configure the Identity
+ service ` section.
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+1. Install the packages:
+
+ .. code-block:: console
+
+ # apt install openstack-dashboard
+
+ .. end
+
+
+
+
+
+
+2. Edit the
+ ``/etc/openstack-dashboard/local_settings.py``
+ file and complete the following actions:
+
+ * Configure the dashboard to use OpenStack services on the
+ ``controller`` node:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_HOST = "controller"
+
+ .. end
+
+ * In the Dashboard configuration section, allow your hosts to access
+ Dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ ALLOWED_HOSTS = ['one.example.com', 'two.example.com']
+
+ .. end
+
+ .. note::
+
+ - Do not edit the ``ALLOWED_HOSTS`` parameter under the Ubuntu
+ configuration section.
+ - ``ALLOWED_HOSTS`` can also be ``['*']`` to accept all hosts. This
+ may be useful for development work, but is potentially insecure
+ and should not be used in production. See the
+ `Django documentation
+ `_
+ for further information.
+
+ * Configure the ``memcached`` session storage service:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+
+ CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': 'controller:11211',
+ }
+ }
+
+ .. end
+
+ .. note::
+
+ Comment out any other session storage configuration.
+
+ * Enable the Identity API version 3:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
+
+ .. end
+
+ * Enable support for domains:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
+
+ .. end
+
+ * Configure API versions:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_API_VERSIONS = {
+ "identity": 3,
+ "image": 2,
+ "volume": 2,
+ }
+
+ .. end
+
+ * Configure ``Default`` as the default domain for users that you create
+ via the dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
+
+ .. end
+
+ * Configure ``user`` as the default role for
+ users that you create via the dashboard:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
+
+ .. end
+
+ * If you chose networking option 1, disable support for layer-3
+ networking services:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ OPENSTACK_NEUTRON_NETWORK = {
+ ...
+ 'enable_router': False,
+ 'enable_quotas': False,
+ 'enable_ipv6': False,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': False,
+ 'enable_firewall': False,
+ 'enable_vpn': False,
+ 'enable_fip_topology_check': False,
+ }
+
+ .. end
+
+ * Optionally, configure the time zone:
+
+ .. path /etc/openstack-dashboard/local_settings.py
+ .. code-block:: python
+
+ TIME_ZONE = "TIME_ZONE"
+
+ .. end
+
+ Replace ``TIME_ZONE`` with an appropriate time zone identifier.
+ For more information, see the `list of time zones
+ `__.
+
+
+Finalize installation
+---------------------
+
+
+* Reload the web server configuration:
+
+ .. code-block:: console
+
+ # service apache2 reload
+
+ .. end
+
+
+
diff --git a/doc/install-guide/source/horizon-install.rst b/doc/install-guide/source/horizon-install.rst
index 4c9293dbdc..ac2ca61a6b 100644
--- a/doc/install-guide/source/horizon-install.rst
+++ b/doc/install-guide/source/horizon-install.rst
@@ -16,554 +16,7 @@ in environments with stand-alone services such as Object Storage.
service as described in the :ref:`Install and configure the Identity
service ` section.
-Install and configure components
---------------------------------
+.. toctree::
+ :glob:
-.. include:: shared/note_configuration_vary_by_distribution.rst
-
-.. only:: obs
-
- 1. Install the packages:
-
- .. code-block:: console
-
- # zypper install openstack-dashboard
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- 1. Install the packages:
-
- .. code-block:: console
-
- # yum install openstack-dashboard
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu
-
- 1. Install the packages:
-
- .. code-block:: console
-
- # apt install openstack-dashboard
-
- .. end
-
-.. endonly
-
-.. only:: debian
-
- 1. Install the packages:
-
- .. code-block:: console
-
- # apt install openstack-dashboard-apache
-
- .. end
-
- 2. Respond to prompts for web server configuration.
-
- .. note::
-
- The automatic configuration process generates a self-signed
- SSL certificate. Consider obtaining an official certificate
- for production environments.
-
- .. note::
-
- There are two modes of installation. One using ``/horizon`` as the URL,
- keeping your default vhost and only adding an Alias directive: this is
- the default. The other mode will remove the default Apache vhost and install
- the dashboard on the webroot. It was the only available option
- before the Liberty release. If you prefer to set the Apache configuration
- manually, install the ``openstack-dashboard`` package instead of
- ``openstack-dashboard-apache``.
-
-.. endonly
-
-.. only:: obs
-
- 2. Configure the web server:
-
- .. code-block:: console
-
- # cp /etc/apache2/conf.d/openstack-dashboard.conf.sample \
- /etc/apache2/conf.d/openstack-dashboard.conf
- # a2enmod rewrite
-
- .. end
-
- 3. Edit the
- ``/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py``
- file and complete the following actions:
-
- * Configure the dashboard to use OpenStack services on the
- ``controller`` node:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- OPENSTACK_HOST = "controller"
-
- .. end
-
- * Allow your hosts to access the dashboard:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- ALLOWED_HOSTS = ['one.example.com', 'two.example.com']
-
- .. end
-
- .. note::
-
- ``ALLOWED_HOSTS`` can also be ``['*']`` to accept all hosts. This may be
- useful for development work, but is potentially insecure and should
- not be used in production. See `Django documentation
- `_
- for further information.
-
- * Configure the ``memcached`` session storage service:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
-
- CACHES = {
- 'default': {
- 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
- 'LOCATION': 'controller:11211',
- }
- }
-
- .. end
-
- .. note::
-
- Comment out any other session storage configuration.
-
- * Enable the Identity API version 3:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
-
- .. end
-
- * Enable support for domains:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
-
- .. end
-
- * Configure API versions:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- OPENSTACK_API_VERSIONS = {
- "identity": 3,
- "image": 2,
- "volume": 2,
- }
-
- .. end
-
- * Configure ``Default`` as the default domain for users that you create
- via the dashboard:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
-
- .. end
-
- * Configure ``user`` as the default role for
- users that you create via the dashboard:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
-
- .. end
-
- * If you chose networking option 1, disable support for layer-3
- networking services:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- OPENSTACK_NEUTRON_NETWORK = {
- ...
- 'enable_router': False,
- 'enable_quotas': False,
- 'enable_distributed_router': False,
- 'enable_ha_router': False,
- 'enable_lb': False,
- 'enable_firewall': False,
- 'enable_vpn': False,
- 'enable_fip_topology_check': False,
- }
-
- .. end
-
- * Optionally, configure the time zone:
-
- .. path /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py
- .. code-block:: python
-
- TIME_ZONE = "TIME_ZONE"
-
- .. end
-
- Replace ``TIME_ZONE`` with an appropriate time zone identifier.
- For more information, see the `list of time zones
- `__.
-
-.. endonly
-
-.. only:: rdo
-
- 2. Edit the
- ``/etc/openstack-dashboard/local_settings``
- file and complete the following actions:
-
- * Configure the dashboard to use OpenStack services on the
- ``controller`` node:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- OPENSTACK_HOST = "controller"
-
- .. end
-
- * Allow your hosts to access the dashboard:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- ALLOWED_HOSTS = ['one.example.com', 'two.example.com']
-
- .. end
-
- .. note::
-
- ALLOWED_HOSTS can also be ['*'] to accept all hosts. This may be
- useful for development work, but is potentially insecure and should
- not be used in production. See
- https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
- for further information.
-
- * Configure the ``memcached`` session storage service:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
-
- CACHES = {
- 'default': {
- 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
- 'LOCATION': 'controller:11211',
- }
- }
-
- .. end
-
- .. note::
-
- Comment out any other session storage configuration.
-
- * Enable the Identity API version 3:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
-
- .. end
-
- * Enable support for domains:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
-
- .. end
-
- * Configure API versions:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- OPENSTACK_API_VERSIONS = {
- "identity": 3,
- "image": 2,
- "volume": 2,
- }
-
- .. end
-
- * Configure ``Default`` as the default domain for users that you create
- via the dashboard:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
-
- .. end
-
- * Configure ``user`` as the default role for
- users that you create via the dashboard:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
-
- .. end
-
- * If you chose networking option 1, disable support for layer-3
- networking services:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- OPENSTACK_NEUTRON_NETWORK = {
- ...
- 'enable_router': False,
- 'enable_quotas': False,
- 'enable_distributed_router': False,
- 'enable_ha_router': False,
- 'enable_lb': False,
- 'enable_firewall': False,
- 'enable_vpn': False,
- 'enable_fip_topology_check': False,
- }
-
- .. end
-
- * Optionally, configure the time zone:
-
- .. path /etc/openstack-dashboard/local_settings
- .. code-block:: python
-
- TIME_ZONE = "TIME_ZONE"
-
- .. end
-
- Replace ``TIME_ZONE`` with an appropriate time zone identifier.
- For more information, see the `list of time zones
- `__.
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- 2. Edit the
- ``/etc/openstack-dashboard/local_settings.py``
- file and complete the following actions:
-
- * Configure the dashboard to use OpenStack services on the
- ``controller`` node:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- OPENSTACK_HOST = "controller"
-
- .. end
-
- * In the Dashboard configuration section, allow your hosts to access
- Dashboard:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- ALLOWED_HOSTS = ['one.example.com', 'two.example.com']
-
- .. end
-
- .. note::
-
- - Do not edit the ``ALLOWED_HOSTS`` parameter under the Ubuntu
- configuration section.
- - ``ALLOWED_HOSTS`` can also be ``['*']`` to accept all hosts. This
- may be useful for development work, but is potentially insecure
- and should not be used in production. See the
- `Django documentation
- `_
- for further information.
-
- * Configure the ``memcached`` session storage service:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
-
- CACHES = {
- 'default': {
- 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
- 'LOCATION': 'controller:11211',
- }
- }
-
- .. end
-
- .. note::
-
- Comment out any other session storage configuration.
-
- * Enable the Identity API version 3:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
-
- .. end
-
- * Enable support for domains:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
-
- .. end
-
- * Configure API versions:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- OPENSTACK_API_VERSIONS = {
- "identity": 3,
- "image": 2,
- "volume": 2,
- }
-
- .. end
-
- * Configure ``Default`` as the default domain for users that you create
- via the dashboard:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
-
- .. end
-
- * Configure ``user`` as the default role for
- users that you create via the dashboard:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
-
- .. end
-
- * If you chose networking option 1, disable support for layer-3
- networking services:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- OPENSTACK_NEUTRON_NETWORK = {
- ...
- 'enable_router': False,
- 'enable_quotas': False,
- 'enable_ipv6': False,
- 'enable_distributed_router': False,
- 'enable_ha_router': False,
- 'enable_lb': False,
- 'enable_firewall': False,
- 'enable_vpn': False,
- 'enable_fip_topology_check': False,
- }
-
- .. end
-
- * Optionally, configure the time zone:
-
- .. path /etc/openstack-dashboard/local_settings.py
- .. code-block:: python
-
- TIME_ZONE = "TIME_ZONE"
-
- .. end
-
- Replace ``TIME_ZONE`` with an appropriate time zone identifier.
- For more information, see the `list of time zones
- `__.
-
-.. endonly
-
-Finalize installation
----------------------
-
-.. only:: ubuntu or debian
-
- * Reload the web server configuration:
-
- .. code-block:: console
-
- # service apache2 reload
-
- .. end
-
-.. endonly
-
-.. only:: obs
-
- * Restart the web server and session storage service:
-
- .. code-block:: console
-
- # systemctl restart apache2.service memcached.service
-
- .. end
-
- .. note::
-
- The ``systemctl restart`` command starts each service if
- not currently running.
-
-.. endonly
-
-.. only:: rdo
-
- * Restart the web server and session storage service:
-
- .. code-block:: console
-
- # systemctl restart httpd.service memcached.service
-
- .. end
-
- .. note::
-
- The ``systemctl restart`` command starts each service if
- not currently running.
-
-.. endonly
+ horizon-install-*
diff --git a/doc/install-guide/source/horizon-verify-debian.rst b/doc/install-guide/source/horizon-verify-debian.rst
new file mode 100644
index 0000000000..536abcbdf1
--- /dev/null
+++ b/doc/install-guide/source/horizon-verify-debian.rst
@@ -0,0 +1,14 @@
+Verify operation
+~~~~~~~~~~~~~~~~
+
+Verify operation of the dashboard.
+
+
+Access the dashboard using a web browser at
+``http://controller/``.
+
+
+
+
+Authenticate using ``admin`` or ``demo`` user
+and ``default`` domain credentials.
diff --git a/doc/install-guide/source/horizon-verify-obs.rst b/doc/install-guide/source/horizon-verify-obs.rst
new file mode 100644
index 0000000000..536abcbdf1
--- /dev/null
+++ b/doc/install-guide/source/horizon-verify-obs.rst
@@ -0,0 +1,14 @@
+Verify operation
+~~~~~~~~~~~~~~~~
+
+Verify operation of the dashboard.
+
+
+Access the dashboard using a web browser at
+``http://controller/``.
+
+
+
+
+Authenticate using ``admin`` or ``demo`` user
+and ``default`` domain credentials.
diff --git a/doc/install-guide/source/horizon-verify-rdo.rst b/doc/install-guide/source/horizon-verify-rdo.rst
new file mode 100644
index 0000000000..43394ed166
--- /dev/null
+++ b/doc/install-guide/source/horizon-verify-rdo.rst
@@ -0,0 +1,14 @@
+Verify operation
+~~~~~~~~~~~~~~~~
+
+Verify operation of the dashboard.
+
+
+
+Access the dashboard using a web browser at
+``http://controller/dashboard``.
+
+
+
+Authenticate using ``admin`` or ``demo`` user
+and ``default`` domain credentials.
diff --git a/doc/install-guide/source/horizon-verify-ubuntu.rst b/doc/install-guide/source/horizon-verify-ubuntu.rst
new file mode 100644
index 0000000000..aad1ccb661
--- /dev/null
+++ b/doc/install-guide/source/horizon-verify-ubuntu.rst
@@ -0,0 +1,14 @@
+Verify operation
+~~~~~~~~~~~~~~~~
+
+Verify operation of the dashboard.
+
+
+
+
+Access the dashboard using a web browser at
+``http://controller/horizon``.
+
+
+Authenticate using ``admin`` or ``demo`` user
+and ``default`` domain credentials.
diff --git a/doc/install-guide/source/horizon-verify.rst b/doc/install-guide/source/horizon-verify.rst
index 10fc4cfea9..e9586fe064 100644
--- a/doc/install-guide/source/horizon-verify.rst
+++ b/doc/install-guide/source/horizon-verify.rst
@@ -1,28 +1,7 @@
Verify operation
~~~~~~~~~~~~~~~~
-Verify operation of the dashboard.
+.. toctree::
+ :glob:
-.. only:: obs or debian
-
- Access the dashboard using a web browser at
- ``http://controller/``.
-
-.. endonly
-
-.. only:: rdo
-
- Access the dashboard using a web browser at
- ``http://controller/dashboard``.
-
-.. endonly
-
-.. only:: ubuntu
-
- Access the dashboard using a web browser at
- ``http://controller/horizon``.
-
-.. endonly
-
-Authenticate using ``admin`` or ``demo`` user
-and ``default`` domain credentials.
+ horizon-verify-*
diff --git a/doc/install-guide/source/index-debian.rst b/doc/install-guide/source/index-debian.rst
new file mode 100644
index 0000000000..dbc6780173
--- /dev/null
+++ b/doc/install-guide/source/index-debian.rst
@@ -0,0 +1,84 @@
+==========================================
+OpenStack Installation Tutorial for Debian
+==========================================
+
+
+Abstract
+~~~~~~~~
+
+The OpenStack system consists of several key services that are separately
+installed. These services work together depending on your cloud
+needs and include the Compute, Identity, Networking, Image, Block Storage,
+Object Storage, Telemetry, Orchestration, and Database services. You
+can install any of these projects separately and configure them stand-alone
+or as connected entities.
+
+
+
+
+
+This guide walks through an installation by using packages
+available through Debian 8 (code name: Jessie).
+
+.. note::
+
+ This guide uses installation with debconf set to non-interactive
+ mode. That is, there will be no debconf prompt. To configure a computer
+ to use this mode, run the following command:
+
+ .. code-block:: console
+
+ # dpkg-reconfigure debconf
+
+ .. end
+
+ If you prefer to use debconf, refer to the debconf
+ install-guide for Debian.
+
+
+Explanations of configuration options and sample configuration files
+are included.
+
+.. note::
+ The Training Labs scripts provide an automated way of deploying the
+ cluster described in this Installation Guide into VirtualBox or KVM
+ VMs. You will need a desktop computer or a laptop with at least 8
+ GB memory and 20 GB free storage running Linux, MaOS, or Windows.
+ Please see the
+ `OpenStack Training Labs `_.
+
+This guide documents the OpenStack Ocata release.
+
+.. warning::
+
+ This guide is a work-in-progress and is subject to updates frequently.
+ Pre-release packages have been used for testing, and some instructions
+ may not work with final versions. Please help us make this guide better
+ by reporting any errors you encounter.
+
+Contents
+~~~~~~~~
+
+.. toctree::
+ :maxdepth: 2
+
+ common/conventions.rst
+ overview.rst
+ environment.rst
+ keystone.rst
+ glance.rst
+ nova.rst
+ neutron.rst
+ horizon.rst
+ cinder.rst
+ additional-services.rst
+ launch-instance.rst
+ common/appendix.rst
+
+.. Pseudo only directive for each distribution used by the build tool.
+ This pseudo only directive for toctree only works fine with Tox.
+ When you directly build this guide with Sphinx,
+ some navigation menu may not work properly.
+.. Keep this pseudo only directive not to break translation tool chain
+ at the openstack-doc-tools repo until it is changed.
+.. end of contents
diff --git a/doc/install-guide/source/index-obs.rst b/doc/install-guide/source/index-obs.rst
new file mode 100644
index 0000000000..643cefcb38
--- /dev/null
+++ b/doc/install-guide/source/index-obs.rst
@@ -0,0 +1,72 @@
+======================================================================
+OpenStack Installation Tutorial for openSUSE and SUSE Linux Enterprise
+======================================================================
+
+
+
+
+Abstract
+~~~~~~~~
+
+The OpenStack system consists of several key services that are separately
+installed. These services work together depending on your cloud
+needs and include the Compute, Identity, Networking, Image, Block Storage,
+Object Storage, Telemetry, Orchestration, and Database services. You
+can install any of these projects separately and configure them stand-alone
+or as connected entities.
+
+
+
+
+This guide will show you how to install OpenStack by using packages
+on openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 - for
+both SP1 and SP2 - through the Open Build Service Cloud repository.
+
+
+
+Explanations of configuration options and sample configuration files
+are included.
+
+.. note::
+ The Training Labs scripts provide an automated way of deploying the
+ cluster described in this Installation Guide into VirtualBox or KVM
+ VMs. You will need a desktop computer or a laptop with at least 8
+ GB memory and 20 GB free storage running Linux, MaOS, or Windows.
+ Please see the
+ `OpenStack Training Labs `_.
+
+This guide documents the OpenStack Ocata release.
+
+.. warning::
+
+ This guide is a work-in-progress and is subject to updates frequently.
+ Pre-release packages have been used for testing, and some instructions
+ may not work with final versions. Please help us make this guide better
+ by reporting any errors you encounter.
+
+Contents
+~~~~~~~~
+
+.. toctree::
+ :maxdepth: 2
+
+ common/conventions.rst
+ overview.rst
+ environment.rst
+ keystone.rst
+ glance.rst
+ nova.rst
+ neutron.rst
+ horizon.rst
+ cinder.rst
+ additional-services.rst
+ launch-instance.rst
+ common/appendix.rst
+
+.. Pseudo only directive for each distribution used by the build tool.
+ This pseudo only directive for toctree only works fine with Tox.
+ When you directly build this guide with Sphinx,
+ some navigation menu may not work properly.
+.. Keep this pseudo only directive not to break translation tool chain
+ at the openstack-doc-tools repo until it is changed.
+.. end of contents
diff --git a/doc/install-guide/source/index-rdo.rst b/doc/install-guide/source/index-rdo.rst
new file mode 100644
index 0000000000..ae0e447c48
--- /dev/null
+++ b/doc/install-guide/source/index-rdo.rst
@@ -0,0 +1,73 @@
+=======================================================================
+OpenStack Installation Tutorial for Red Hat Enterprise Linux and CentOS
+=======================================================================
+
+
+
+
+
+Abstract
+~~~~~~~~
+
+The OpenStack system consists of several key services that are separately
+installed. These services work together depending on your cloud
+needs and include the Compute, Identity, Networking, Image, Block Storage,
+Object Storage, Telemetry, Orchestration, and Database services. You
+can install any of these projects separately and configure them stand-alone
+or as connected entities.
+
+
+This guide will show you how to install OpenStack by using packages
+available on Red Hat Enterprise Linux 7 and its derivatives through
+the RDO repository.
+
+
+
+
+
+Explanations of configuration options and sample configuration files
+are included.
+
+.. note::
+ The Training Labs scripts provide an automated way of deploying the
+ cluster described in this Installation Guide into VirtualBox or KVM
+ VMs. You will need a desktop computer or a laptop with at least 8
+ GB memory and 20 GB free storage running Linux, MaOS, or Windows.
+ Please see the
+ `OpenStack Training Labs `_.
+
+This guide documents the OpenStack Ocata release.
+
+.. warning::
+
+ This guide is a work-in-progress and is subject to updates frequently.
+ Pre-release packages have been used for testing, and some instructions
+ may not work with final versions. Please help us make this guide better
+ by reporting any errors you encounter.
+
+Contents
+~~~~~~~~
+
+.. toctree::
+ :maxdepth: 2
+
+ common/conventions.rst
+ overview.rst
+ environment.rst
+ keystone.rst
+ glance.rst
+ nova.rst
+ neutron.rst
+ horizon.rst
+ cinder.rst
+ additional-services.rst
+ launch-instance.rst
+ common/appendix.rst
+
+.. Pseudo only directive for each distribution used by the build tool.
+ This pseudo only directive for toctree only works fine with Tox.
+ When you directly build this guide with Sphinx,
+ some navigation menu may not work properly.
+.. Keep this pseudo only directive not to break translation tool chain
+ at the openstack-doc-tools repo until it is changed.
+.. end of contents
diff --git a/doc/install-guide/source/index-ubuntu.rst b/doc/install-guide/source/index-ubuntu.rst
new file mode 100644
index 0000000000..100f88e8f9
--- /dev/null
+++ b/doc/install-guide/source/index-ubuntu.rst
@@ -0,0 +1,71 @@
+==========================================
+OpenStack Installation Tutorial for Ubuntu
+==========================================
+
+
+
+Abstract
+~~~~~~~~
+
+The OpenStack system consists of several key services that are separately
+installed. These services work together depending on your cloud
+needs and include the Compute, Identity, Networking, Image, Block Storage,
+Object Storage, Telemetry, Orchestration, and Database services. You
+can install any of these projects separately and configure them stand-alone
+or as connected entities.
+
+
+
+This guide will walk through an installation by using packages
+available through Canonical's Ubuntu Cloud archive repository for
+Ubuntu 16.04 (LTS).
+
+
+
+
+Explanations of configuration options and sample configuration files
+are included.
+
+.. note::
+ The Training Labs scripts provide an automated way of deploying the
+ cluster described in this Installation Guide into VirtualBox or KVM
+ VMs. You will need a desktop computer or a laptop with at least 8
+ GB memory and 20 GB free storage running Linux, MaOS, or Windows.
+ Please see the
+ `OpenStack Training Labs `_.
+
+This guide documents the OpenStack Ocata release.
+
+.. warning::
+
+ This guide is a work-in-progress and is subject to updates frequently.
+ Pre-release packages have been used for testing, and some instructions
+ may not work with final versions. Please help us make this guide better
+ by reporting any errors you encounter.
+
+Contents
+~~~~~~~~
+
+.. toctree::
+ :maxdepth: 2
+
+ common/conventions.rst
+ overview.rst
+ environment.rst
+ keystone.rst
+ glance.rst
+ nova.rst
+ neutron.rst
+ horizon.rst
+ cinder.rst
+ additional-services.rst
+ launch-instance.rst
+ common/appendix.rst
+
+.. Pseudo only directive for each distribution used by the build tool.
+ This pseudo only directive for toctree only works fine with Tox.
+ When you directly build this guide with Sphinx,
+ some navigation menu may not work properly.
+.. Keep this pseudo only directive not to break translation tool chain
+ at the openstack-doc-tools repo until it is changed.
+.. end of contents
diff --git a/doc/install-guide/source/index.rst b/doc/install-guide/source/index.rst
index 847d958c4b..6d8169fbee 100644
--- a/doc/install-guide/source/index.rst
+++ b/doc/install-guide/source/index.rst
@@ -1,140 +1,11 @@
-.. title:: OpenStack Installation Tutorial
-
-.. Don't remove or change title tag manually, which is used by the build tool.
-
-.. only:: rdo
-
- =======================================================================
- OpenStack Installation Tutorial for Red Hat Enterprise Linux and CentOS
- =======================================================================
-
-.. endonly
-
-.. only:: obs
-
- ======================================================================
- OpenStack Installation Tutorial for openSUSE and SUSE Linux Enterprise
- ======================================================================
-
-.. endonly
-
-.. only:: ubuntu
-
- ==========================================
- OpenStack Installation Tutorial for Ubuntu
- ==========================================
-
-.. endonly
-
-.. only:: debian
-
- ==========================================
- OpenStack Installation Tutorial for Debian
- ==========================================
-
-.. endonly
-
-Abstract
-~~~~~~~~
-
-The OpenStack system consists of several key services that are separately
-installed. These services work together depending on your cloud
-needs and include the Compute, Identity, Networking, Image, Block Storage,
-Object Storage, Telemetry, Orchestration, and Database services. You
-can install any of these projects separately and configure them stand-alone
-or as connected entities.
-
-.. only:: rdo
-
- This guide will show you how to install OpenStack by using packages
- available on Red Hat Enterprise Linux 7 and its derivatives through
- the RDO repository.
-
-.. endonly
-
-.. only:: ubuntu
-
- This guide will walk through an installation by using packages
- available through Canonical's Ubuntu Cloud archive repository for
- Ubuntu 16.04 (LTS).
-
-.. endonly
-
-.. only:: obs
-
- This guide will show you how to install OpenStack by using packages
- on openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 - for
- both SP1 and SP2 - through the Open Build Service Cloud repository.
-
-.. endonly
-
-.. only:: debian
-
- This guide walks through an installation by using packages
- available through Debian 8 (code name: Jessie).
-
- .. note::
-
- This guide uses installation with debconf set to non-interactive
- mode. That is, there will be no debconf prompt. To configure a computer
- to use this mode, run the following command:
-
- .. code-block:: console
-
- # dpkg-reconfigure debconf
-
- .. end
-
- If you prefer to use debconf, refer to the debconf
- install-guide for Debian.
-
-.. endonly
-
-Explanations of configuration options and sample configuration files
-are included.
-
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MaOS, or Windows.
- Please see the
- `OpenStack Training Labs `_.
-
-This guide documents the OpenStack Ocata release.
-
-.. warning::
-
- This guide is a work-in-progress and is subject to updates frequently.
- Pre-release packages have been used for testing, and some instructions
- may not work with final versions. Please help us make this guide better
- by reporting any errors you encounter.
-
-Contents
-~~~~~~~~
+=================================
+ OpenStack Installation Tutorial
+=================================
.. toctree::
- :maxdepth: 2
+ :maxdepth: 3
- common/conventions.rst
- overview.rst
- environment.rst
- keystone.rst
- glance.rst
- nova.rst
- neutron.rst
- horizon.rst
- cinder.rst
- additional-services.rst
- launch-instance.rst
- common/appendix.rst
-
-.. Pseudo only directive for each distribution used by the build tool.
- This pseudo only directive for toctree only works fine with Tox.
- When you directly build this guide with Sphinx,
- some navigation menu may not work properly.
-.. Keep this pseudo only directive not to break translation tool chain
- at the openstack-doc-tools repo until it is changed.
-.. only:: obs or rdo or ubuntu
-.. only:: debian
-.. end of contents
+ index-debian
+ index-obs
+ index-rdo
+ index-ubuntu
diff --git a/doc/install-guide/source/keystone-install-debian.rst b/doc/install-guide/source/keystone-install-debian.rst
new file mode 100644
index 0000000000..cf0a79e96f
--- /dev/null
+++ b/doc/install-guide/source/keystone-install-debian.rst
@@ -0,0 +1,197 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the OpenStack
+Identity service, code-named keystone, on the controller node. For
+scalability purposes, this configuration deploys Fernet tokens and
+the Apache HTTP server to handle requests.
+
+Prerequisites
+-------------
+
+Before you install and configure the Identity service, you must
+create a database.
+
+
+
+
+#. Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+2. Create the ``keystone`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE keystone;
+
+ .. end
+
+#. Grant proper access to the ``keystone`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
+ IDENTIFIED BY 'KEYSTONE_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
+ IDENTIFIED BY 'KEYSTONE_DBPASS';
+
+ .. end
+
+ Replace ``KEYSTONE_DBPASS`` with a suitable password.
+
+#. Exit the database access client.
+
+.. _keystone-install-configure-debian:
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+.. note::
+
+ This guide uses the Apache HTTP server with ``mod_wsgi`` to serve
+ Identity service requests on ports 5000 and 35357. By default, the
+ keystone service still listens on these ports. The package handles
+ all of the Apache configuration for you (including the activation of
+ the ``mod_wsgi`` apache2 module and keystone configuration in Apache).
+
+#. Run the following command to install the packages:
+
+ .. code-block:: console
+
+ # apt install keystone
+
+ .. end
+
+
+
+
+
+2. Edit the ``/etc/keystone/keystone.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/keystone/keystone.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
+
+ .. end
+
+ Replace ``KEYSTONE_DBPASS`` with the password you chose for the database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[token]`` section, configure the Fernet token provider:
+
+ .. path /etc/keystone/keystone.conf
+ .. code-block:: ini
+
+ [token]
+ # ...
+ provider = fernet
+
+ .. end
+
+3. Populate the Identity service database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "keystone-manage db_sync" keystone
+
+ .. end
+
+4. Initialize Fernet key repositories:
+
+ .. code-block:: console
+
+ # keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
+ # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
+
+ .. end
+
+5. Bootstrap the Identity service:
+
+ .. code-block:: console
+
+ # keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
+ --bootstrap-admin-url http://controller:35357/v3/ \
+ --bootstrap-internal-url http://controller:5000/v3/ \
+ --bootstrap-public-url http://controller:5000/v3/ \
+ --bootstrap-region-id RegionOne
+
+ .. end
+
+ Replace ``ADMIN_PASS`` with a suitable password for an administrative user.
+
+Configure the Apache HTTP server
+--------------------------------
+
+
+
+#. Edit the ``/etc/apache2/apache2.conf`` file and configure the
+ ``ServerName`` option to reference the controller node:
+
+ .. path /etc/apache2/apache2.conf
+ .. code-block:: apache
+
+ ServerName controller
+
+ .. end
+
+
+
+.. note::
+
+ The Debian package will perform the below operations for you:
+
+ .. code-block:: console
+
+ # a2enmod wsgi
+ # a2ensite wsgi-keystone.conf
+ # invoke-rc.d apache2 restart
+
+ .. end
+
+
+
+
+Finalize the installation
+-------------------------
+
+
+
+
+2. Configure the administrative account
+
+ .. code-block:: console
+
+ $ export OS_USERNAME=admin
+ $ export OS_PASSWORD=ADMIN_PASS
+ $ export OS_PROJECT_NAME=admin
+ $ export OS_USER_DOMAIN_NAME=Default
+ $ export OS_PROJECT_DOMAIN_NAME=Default
+ $ export OS_AUTH_URL=http://controller:35357/v3
+ $ export OS_IDENTITY_API_VERSION=3
+
+ .. end
+
+ Replace ``ADMIN_PASS`` with the password used in the
+ ``keystone-manage bootstrap`` command in `keystone-install-configure-debian`_.
diff --git a/doc/install-guide/source/keystone-install-obs.rst b/doc/install-guide/source/keystone-install-obs.rst
new file mode 100644
index 0000000000..eb31bedfde
--- /dev/null
+++ b/doc/install-guide/source/keystone-install-obs.rst
@@ -0,0 +1,261 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the OpenStack
+Identity service, code-named keystone, on the controller node. For
+scalability purposes, this configuration deploys Fernet tokens and
+the Apache HTTP server to handle requests.
+
+Prerequisites
+-------------
+
+Before you install and configure the Identity service, you must
+create a database.
+
+
+.. note::
+
+ Before you begin, ensure you have the most recent version of
+ ``python-pyasn1`` `installed `_.
+
+
+
+
+#. Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+2. Create the ``keystone`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE keystone;
+
+ .. end
+
+#. Grant proper access to the ``keystone`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
+ IDENTIFIED BY 'KEYSTONE_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
+ IDENTIFIED BY 'KEYSTONE_DBPASS';
+
+ .. end
+
+ Replace ``KEYSTONE_DBPASS`` with a suitable password.
+
+#. Exit the database access client.
+
+.. _keystone-install-configure-obs:
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+.. note::
+
+ This guide uses the Apache HTTP server with ``mod_wsgi`` to serve
+ Identity service requests on ports 5000 and 35357. By default, the
+ keystone service still listens on these ports. Therefore, this guide
+ manually disables the keystone service.
+
+
+
+.. note::
+
+ Starting with the Newton release, SUSE OpenStack packages are shipping
+ with the upstream default configuration files. For example
+ ``/etc/keystone/keystone.conf``, with customizations in
+ ``/etc/keystone/keystone.conf.d/010-keystone.conf``. While the
+ following instructions modify the default configuration file, adding a
+ new file in ``/etc/keystone/keystone.conf.d`` achieves the same
+ result.
+
+
+
+
+
+
+#. Run the following command to install the packages:
+
+ .. code-block:: console
+
+ # zypper install openstack-keystone apache2-mod_wsgi
+
+ .. end
+
+
+2. Edit the ``/etc/keystone/keystone.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/keystone/keystone.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
+
+ .. end
+
+ Replace ``KEYSTONE_DBPASS`` with the password you chose for the database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[token]`` section, configure the Fernet token provider:
+
+ .. path /etc/keystone/keystone.conf
+ .. code-block:: ini
+
+ [token]
+ # ...
+ provider = fernet
+
+ .. end
+
+3. Populate the Identity service database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "keystone-manage db_sync" keystone
+
+ .. end
+
+4. Initialize Fernet key repositories:
+
+ .. code-block:: console
+
+ # keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
+ # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
+
+ .. end
+
+5. Bootstrap the Identity service:
+
+ .. code-block:: console
+
+ # keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
+ --bootstrap-admin-url http://controller:35357/v3/ \
+ --bootstrap-internal-url http://controller:5000/v3/ \
+ --bootstrap-public-url http://controller:5000/v3/ \
+ --bootstrap-region-id RegionOne
+
+ .. end
+
+ Replace ``ADMIN_PASS`` with a suitable password for an administrative user.
+
+Configure the Apache HTTP server
+--------------------------------
+
+
+
+
+
+#. Edit the ``/etc/sysconfig/apache2`` file and configure the
+ ``APACHE_SERVERNAME`` option to reference the controller node:
+
+ .. path /etc/sysconfig/apache2
+ .. code-block:: shell
+
+ APACHE_SERVERNAME="controller"
+
+ .. end
+
+#. Create the ``/etc/apache2/conf.d/wsgi-keystone.conf`` file
+ with the following content:
+
+ .. path /etc/apache2/conf.d/wsgi-keystone.conf
+ .. code-block:: apache
+
+ Listen 5000
+ Listen 35357
+
+
+ WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
+ WSGIProcessGroup keystone-public
+ WSGIScriptAlias / /usr/bin/keystone-wsgi-public
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ ErrorLogFormat "%{cu}t %M"
+ ErrorLog /var/log/apache2/keystone.log
+ CustomLog /var/log/apache2/keystone_access.log combined
+
+
+ Require all granted
+
+
+
+
+ WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
+ WSGIProcessGroup keystone-admin
+ WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ ErrorLogFormat "%{cu}t %M"
+ ErrorLog /var/log/apache2/keystone.log
+ CustomLog /var/log/apache2/keystone_access.log combined
+
+
+ Require all granted
+
+
+
+ .. end
+
+#. Recursively change the ownership of the ``/etc/keystone`` directory:
+
+ .. code-block:: console
+
+ # chown -R keystone:keystone /etc/keystone
+
+ .. end
+
+
+
+Finalize the installation
+-------------------------
+
+
+
+
+#. Start the Apache HTTP service and configure it to start when the system
+ boots:
+
+ .. code-block:: console
+
+ # systemctl enable apache2.service
+ # systemctl start apache2.service
+
+ .. end
+
+
+2. Configure the administrative account
+
+ .. code-block:: console
+
+ $ export OS_USERNAME=admin
+ $ export OS_PASSWORD=ADMIN_PASS
+ $ export OS_PROJECT_NAME=admin
+ $ export OS_USER_DOMAIN_NAME=Default
+ $ export OS_PROJECT_DOMAIN_NAME=Default
+ $ export OS_AUTH_URL=http://controller:35357/v3
+ $ export OS_IDENTITY_API_VERSION=3
+
+ .. end
+
+ Replace ``ADMIN_PASS`` with the password used in the
+ ``keystone-manage bootstrap`` command in `keystone-install-configure-obs`_.
diff --git a/doc/install-guide/source/keystone-install-rdo.rst b/doc/install-guide/source/keystone-install-rdo.rst
new file mode 100644
index 0000000000..b68f116e59
--- /dev/null
+++ b/doc/install-guide/source/keystone-install-rdo.rst
@@ -0,0 +1,203 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the OpenStack
+Identity service, code-named keystone, on the controller node. For
+scalability purposes, this configuration deploys Fernet tokens and
+the Apache HTTP server to handle requests.
+
+Prerequisites
+-------------
+
+Before you install and configure the Identity service, you must
+create a database.
+
+
+
+
+#. Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+2. Create the ``keystone`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE keystone;
+
+ .. end
+
+#. Grant proper access to the ``keystone`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
+ IDENTIFIED BY 'KEYSTONE_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
+ IDENTIFIED BY 'KEYSTONE_DBPASS';
+
+ .. end
+
+ Replace ``KEYSTONE_DBPASS`` with a suitable password.
+
+#. Exit the database access client.
+
+.. _keystone-install-configure-rdo:
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+.. note::
+
+ This guide uses the Apache HTTP server with ``mod_wsgi`` to serve
+ Identity service requests on ports 5000 and 35357. By default, the
+ keystone service still listens on these ports. Therefore, this guide
+ manually disables the keystone service.
+
+
+
+
+
+
+#. Run the following command to install the packages:
+
+ .. code-block:: console
+
+ # yum install openstack-keystone httpd mod_wsgi
+
+ .. end
+
+
+
+2. Edit the ``/etc/keystone/keystone.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/keystone/keystone.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
+
+ .. end
+
+ Replace ``KEYSTONE_DBPASS`` with the password you chose for the database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[token]`` section, configure the Fernet token provider:
+
+ .. path /etc/keystone/keystone.conf
+ .. code-block:: ini
+
+ [token]
+ # ...
+ provider = fernet
+
+ .. end
+
+3. Populate the Identity service database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "keystone-manage db_sync" keystone
+
+ .. end
+
+4. Initialize Fernet key repositories:
+
+ .. code-block:: console
+
+ # keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
+ # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
+
+ .. end
+
+5. Bootstrap the Identity service:
+
+ .. code-block:: console
+
+ # keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
+ --bootstrap-admin-url http://controller:35357/v3/ \
+ --bootstrap-internal-url http://controller:5000/v3/ \
+ --bootstrap-public-url http://controller:5000/v3/ \
+ --bootstrap-region-id RegionOne
+
+ .. end
+
+ Replace ``ADMIN_PASS`` with a suitable password for an administrative user.
+
+Configure the Apache HTTP server
+--------------------------------
+
+
+#. Edit the ``/etc/httpd/conf/httpd.conf`` file and configure the
+ ``ServerName`` option to reference the controller node:
+
+ .. path /etc/httpd/conf/httpd
+ .. code-block:: apache
+
+ ServerName controller
+
+ .. end
+
+#. Create a link to the ``/usr/share/keystone/wsgi-keystone.conf`` file:
+
+ .. code-block:: console
+
+ # ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
+
+ .. end
+
+
+
+
+
+
+Finalize the installation
+-------------------------
+
+
+
+#. Start the Apache HTTP service and configure it to start when the system
+ boots:
+
+ .. code-block:: console
+
+ # systemctl enable httpd.service
+ # systemctl start httpd.service
+
+ .. end
+
+
+
+2. Configure the administrative account
+
+ .. code-block:: console
+
+ $ export OS_USERNAME=admin
+ $ export OS_PASSWORD=ADMIN_PASS
+ $ export OS_PROJECT_NAME=admin
+ $ export OS_USER_DOMAIN_NAME=Default
+ $ export OS_PROJECT_DOMAIN_NAME=Default
+ $ export OS_AUTH_URL=http://controller:35357/v3
+ $ export OS_IDENTITY_API_VERSION=3
+
+ .. end
+
+ Replace ``ADMIN_PASS`` with the password used in the
+ ``keystone-manage bootstrap`` command in `keystone-install-configure-rdo`_.
diff --git a/doc/install-guide/source/keystone-install-ubuntu.rst b/doc/install-guide/source/keystone-install-ubuntu.rst
new file mode 100644
index 0000000000..5692a00683
--- /dev/null
+++ b/doc/install-guide/source/keystone-install-ubuntu.rst
@@ -0,0 +1,193 @@
+Install and configure
+~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the OpenStack
+Identity service, code-named keystone, on the controller node. For
+scalability purposes, this configuration deploys Fernet tokens and
+the Apache HTTP server to handle requests.
+
+Prerequisites
+-------------
+
+Before you install and configure the Identity service, you must
+create a database.
+
+
+
+#. Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ # mysql
+
+ .. end
+
+
+
+2. Create the ``keystone`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE keystone;
+
+ .. end
+
+#. Grant proper access to the ``keystone`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
+ IDENTIFIED BY 'KEYSTONE_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
+ IDENTIFIED BY 'KEYSTONE_DBPASS';
+
+ .. end
+
+ Replace ``KEYSTONE_DBPASS`` with a suitable password.
+
+#. Exit the database access client.
+
+.. _keystone-install-configure-ubuntu:
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+.. note::
+
+ This guide uses the Apache HTTP server with ``mod_wsgi`` to serve
+ Identity service requests on ports 5000 and 35357. By default, the
+ keystone service still listens on these ports. The package handles
+ all of the Apache configuration for you (including the activation of
+ the ``mod_wsgi`` apache2 module and keystone configuration in Apache).
+
+#. Run the following command to install the packages:
+
+ .. code-block:: console
+
+ # apt install keystone
+
+ .. end
+
+
+
+
+
+2. Edit the ``/etc/keystone/keystone.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/keystone/keystone.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
+
+ .. end
+
+ Replace ``KEYSTONE_DBPASS`` with the password you chose for the database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[token]`` section, configure the Fernet token provider:
+
+ .. path /etc/keystone/keystone.conf
+ .. code-block:: ini
+
+ [token]
+ # ...
+ provider = fernet
+
+ .. end
+
+3. Populate the Identity service database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "keystone-manage db_sync" keystone
+
+ .. end
+
+4. Initialize Fernet key repositories:
+
+ .. code-block:: console
+
+ # keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
+ # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
+
+ .. end
+
+5. Bootstrap the Identity service:
+
+ .. code-block:: console
+
+ # keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
+ --bootstrap-admin-url http://controller:35357/v3/ \
+ --bootstrap-internal-url http://controller:5000/v3/ \
+ --bootstrap-public-url http://controller:5000/v3/ \
+ --bootstrap-region-id RegionOne
+
+ .. end
+
+ Replace ``ADMIN_PASS`` with a suitable password for an administrative user.
+
+Configure the Apache HTTP server
+--------------------------------
+
+
+
+#. Edit the ``/etc/apache2/apache2.conf`` file and configure the
+ ``ServerName`` option to reference the controller node:
+
+ .. path /etc/apache2/apache2.conf
+ .. code-block:: apache
+
+ ServerName controller
+
+ .. end
+
+
+
+
+
+Finalize the installation
+-------------------------
+
+
+#. Restart the Apache service:
+
+ .. code-block:: console
+
+ # service apache2 restart
+
+ .. end
+
+
+
+
+2. Configure the administrative account
+
+ .. code-block:: console
+
+ $ export OS_USERNAME=admin
+ $ export OS_PASSWORD=ADMIN_PASS
+ $ export OS_PROJECT_NAME=admin
+ $ export OS_USER_DOMAIN_NAME=Default
+ $ export OS_PROJECT_DOMAIN_NAME=Default
+ $ export OS_AUTH_URL=http://controller:35357/v3
+ $ export OS_IDENTITY_API_VERSION=3
+
+ .. end
+
+ Replace ``ADMIN_PASS`` with the password used in the
+ ``keystone-manage bootstrap`` command in `keystone-install-configure-ubuntu`_.
diff --git a/doc/install-guide/source/keystone-install.rst b/doc/install-guide/source/keystone-install.rst
index 98992370bd..ad47da9089 100644
--- a/doc/install-guide/source/keystone-install.rst
+++ b/doc/install-guide/source/keystone-install.rst
@@ -8,385 +8,7 @@ Identity service, code-named keystone, on the controller node. For
scalability purposes, this configuration deploys Fernet tokens and
the Apache HTTP server to handle requests.
-Prerequisites
--------------
+.. toctree::
+ :glob:
-Before you install and configure the Identity service, you must
-create a database.
-
-.. only:: obs
-
- .. note::
-
- Before you begin, ensure you have the most recent version of
- ``python-pyasn1`` `installed `_.
-
-.. endonly
-
-.. only:: ubuntu
-
- #. Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- # mysql
-
- .. end
-
- .. endonly
-
-.. only:: rdo or debian or obs
-
- #. Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- $ mysql -u root -p
-
- .. end
-
- .. endonly
-
-2. Create the ``keystone`` database:
-
- .. code-block:: console
-
- MariaDB [(none)]> CREATE DATABASE keystone;
-
- .. end
-
-#. Grant proper access to the ``keystone`` database:
-
- .. code-block:: console
-
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
- IDENTIFIED BY 'KEYSTONE_DBPASS';
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
- IDENTIFIED BY 'KEYSTONE_DBPASS';
-
- .. end
-
- Replace ``KEYSTONE_DBPASS`` with a suitable password.
-
-#. Exit the database access client.
-
-.. _keystone-install-configure:
-
-Install and configure components
---------------------------------
-
-.. include:: shared/note_configuration_vary_by_distribution.rst
-
-.. only:: obs or rdo
-
- .. note::
-
- This guide uses the Apache HTTP server with ``mod_wsgi`` to serve
- Identity service requests on ports 5000 and 35357. By default, the
- keystone service still listens on these ports. Therefore, this guide
- manually disables the keystone service.
-
-.. endonly
-
-.. only:: obs
-
- .. note::
-
- Starting with the Newton release, SUSE OpenStack packages are shipping
- with the upstream default configuration files. For example
- ``/etc/keystone/keystone.conf``, with customizations in
- ``/etc/keystone/keystone.conf.d/010-keystone.conf``. While the
- following instructions modify the default configuration file, adding a
- new file in ``/etc/keystone/keystone.conf.d`` achieves the same
- result.
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- .. note::
-
- This guide uses the Apache HTTP server with ``mod_wsgi`` to serve
- Identity service requests on ports 5000 and 35357. By default, the
- keystone service still listens on these ports. The package handles
- all of the Apache configuration for you (including the activation of
- the ``mod_wsgi`` apache2 module and keystone configuration in Apache).
-
- #. Run the following command to install the packages:
-
- .. code-block:: console
-
- # apt install keystone
-
- .. end
-
-.. endonly
-
-
-.. only:: rdo
-
- #. Run the following command to install the packages:
-
- .. code-block:: console
-
- # yum install openstack-keystone httpd mod_wsgi
-
- .. end
-
-.. endonly
-
-.. only:: obs
-
- #. Run the following command to install the packages:
-
- .. code-block:: console
-
- # zypper install openstack-keystone apache2-mod_wsgi
-
- .. end
-
-.. endonly
-
-2. Edit the ``/etc/keystone/keystone.conf`` file and complete the following
- actions:
-
- * In the ``[database]`` section, configure database access:
-
- .. path /etc/keystone/keystone.conf
- .. code-block:: ini
-
- [database]
- # ...
- connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
-
- .. end
-
- Replace ``KEYSTONE_DBPASS`` with the password you chose for the database.
-
- .. note::
-
- Comment out or remove any other ``connection`` options in the
- ``[database]`` section.
-
- * In the ``[token]`` section, configure the Fernet token provider:
-
- .. path /etc/keystone/keystone.conf
- .. code-block:: ini
-
- [token]
- # ...
- provider = fernet
-
- .. end
-
-3. Populate the Identity service database:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "keystone-manage db_sync" keystone
-
- .. end
-
-4. Initialize Fernet key repositories:
-
- .. code-block:: console
-
- # keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
- # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
-
- .. end
-
-5. Bootstrap the Identity service:
-
- .. code-block:: console
-
- # keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
- --bootstrap-admin-url http://controller:35357/v3/ \
- --bootstrap-internal-url http://controller:5000/v3/ \
- --bootstrap-public-url http://controller:5000/v3/ \
- --bootstrap-region-id RegionOne
-
- .. end
-
- Replace ``ADMIN_PASS`` with a suitable password for an administrative user.
-
-Configure the Apache HTTP server
---------------------------------
-
-.. only:: rdo
-
- #. Edit the ``/etc/httpd/conf/httpd.conf`` file and configure the
- ``ServerName`` option to reference the controller node:
-
- .. path /etc/httpd/conf/httpd
- .. code-block:: apache
-
- ServerName controller
-
- .. end
-
- #. Create a link to the ``/usr/share/keystone/wsgi-keystone.conf`` file:
-
- .. code-block:: console
-
- # ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Edit the ``/etc/apache2/apache2.conf`` file and configure the
- ``ServerName`` option to reference the controller node:
-
- .. path /etc/apache2/apache2.conf
- .. code-block:: apache
-
- ServerName controller
-
- .. end
-
-.. endonly
-
-.. only:: debian
-
- .. note::
-
- The Debian package will perform the below operations for you:
-
- .. code-block:: console
-
- # a2enmod wsgi
- # a2ensite wsgi-keystone.conf
- # invoke-rc.d apache2 restart
-
- .. end
-
-.. endonly
-
-.. only:: obs
-
- #. Edit the ``/etc/sysconfig/apache2`` file and configure the
- ``APACHE_SERVERNAME`` option to reference the controller node:
-
- .. path /etc/sysconfig/apache2
- .. code-block:: shell
-
- APACHE_SERVERNAME="controller"
-
- .. end
-
- #. Create the ``/etc/apache2/conf.d/wsgi-keystone.conf`` file
- with the following content:
-
- .. path /etc/apache2/conf.d/wsgi-keystone.conf
- .. code-block:: apache
-
- Listen 5000
- Listen 35357
-
-
- WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-public
- WSGIScriptAlias / /usr/bin/keystone-wsgi-public
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- ErrorLogFormat "%{cu}t %M"
- ErrorLog /var/log/apache2/keystone.log
- CustomLog /var/log/apache2/keystone_access.log combined
-
-
- Require all granted
-
-
-
-
- WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- ErrorLogFormat "%{cu}t %M"
- ErrorLog /var/log/apache2/keystone.log
- CustomLog /var/log/apache2/keystone_access.log combined
-
-
- Require all granted
-
-
-
- .. end
-
- #. Recursively change the ownership of the ``/etc/keystone`` directory:
-
- .. code-block:: console
-
- # chown -R keystone:keystone /etc/keystone
-
- .. end
-
-.. endonly
-
-
-Finalize the installation
--------------------------
-
-.. only:: ubuntu
-
- #. Restart the Apache service:
-
- .. code-block:: console
-
- # service apache2 restart
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- #. Start the Apache HTTP service and configure it to start when the system
- boots:
-
- .. code-block:: console
-
- # systemctl enable httpd.service
- # systemctl start httpd.service
-
- .. end
-
-.. endonly
-
-.. only:: obs
-
- #. Start the Apache HTTP service and configure it to start when the system
- boots:
-
- .. code-block:: console
-
- # systemctl enable apache2.service
- # systemctl start apache2.service
-
- .. end
-
-.. endonly
-
-2. Configure the administrative account
-
- .. code-block:: console
-
- $ export OS_USERNAME=admin
- $ export OS_PASSWORD=ADMIN_PASS
- $ export OS_PROJECT_NAME=admin
- $ export OS_USER_DOMAIN_NAME=Default
- $ export OS_PROJECT_DOMAIN_NAME=Default
- $ export OS_AUTH_URL=http://controller:35357/v3
- $ export OS_IDENTITY_API_VERSION=3
-
- .. end
-
- Replace ``ADMIN_PASS`` with the password used in the
- ``keystone-manage bootstrap`` command in `keystone-install-configure`_.
+ keystone-install-*
diff --git a/doc/install-guide/source/keystone-verify-debian.rst b/doc/install-guide/source/keystone-verify-debian.rst
new file mode 100644
index 0000000000..545e18f4c1
--- /dev/null
+++ b/doc/install-guide/source/keystone-verify-debian.rst
@@ -0,0 +1,74 @@
+Verify operation
+~~~~~~~~~~~~~~~~
+
+Verify operation of the Identity service before installing other
+services.
+
+.. note::
+
+ Perform these commands on the controller node.
+
+
+
+2. Unset the temporary ``OS_AUTH_URL`` and ``OS_PASSWORD``
+ environment variable:
+
+ .. code-block:: console
+
+ $ unset OS_AUTH_URL OS_PASSWORD
+
+ .. end
+
+3. As the ``admin`` user, request an authentication token:
+
+ .. code-block:: console
+
+ $ openstack --os-auth-url http://controller:35357/v3 \
+ --os-project-domain-name Default --os-user-domain-name Default \
+ --os-project-name admin --os-username admin token issue
+
+ Password:
+ +------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +------------+-----------------------------------------------------------------+
+ | expires | 2016-02-12T20:14:07.056119Z |
+ | id | gAAAAABWvi7_B8kKQD9wdXac8MoZiQldmjEO643d-e_j-XXq9AmIegIbA7UHGPv |
+ | | atnN21qtOMjCFWX7BReJEQnVOAj3nclRQgAYRsfSU_MrsuWb4EDtnjU7HEpoBb4 |
+ | | o6ozsA_NmFWEpLeKy0uNn_WeKbAhYygrsmQGA49dclHVnz-OMVLiyM9ws |
+ | project_id | 343d245e850143a096806dfaefa9afdc |
+ | user_id | ac3377633149401296f6c0d92d79dc16 |
+ +------------+-----------------------------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ This command uses the password for the ``admin`` user.
+
+4. As the ``demo`` user, request an authentication token:
+
+ .. code-block:: console
+
+ $ openstack --os-auth-url http://controller:5000/v3 \
+ --os-project-domain-name Default --os-user-domain-name Default \
+ --os-project-name demo --os-username demo token issue
+
+ Password:
+ +------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +------------+-----------------------------------------------------------------+
+ | expires | 2016-02-12T20:15:39.014479Z |
+ | id | gAAAAABWvi9bsh7vkiby5BpCCnc-JkbGhm9wH3fabS_cY7uabOubesi-Me6IGWW |
+ | | yQqNegDDZ5jw7grI26vvgy1J5nCVwZ_zFRqPiz_qhbq29mgbQLglbkq6FQvzBRQ |
+ | | JcOzq3uwhzNxszJWmzGC7rJE_H0A_a3UFhqv8M4zMRYSbS2YF0MyFmp_U |
+ | project_id | ed0b60bf607743088218b0a533d5943f |
+ | user_id | 58126687cbcc4888bfa9ab73a2256f27 |
+ +------------+-----------------------------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ This command uses the password for the ``demo``
+ user and API port 5000 which only allows regular (non-admin)
+ access to the Identity service API.
diff --git a/doc/install-guide/source/keystone-verify-obs.rst b/doc/install-guide/source/keystone-verify-obs.rst
new file mode 100644
index 0000000000..8989a99c2b
--- /dev/null
+++ b/doc/install-guide/source/keystone-verify-obs.rst
@@ -0,0 +1,83 @@
+Verify operation
+~~~~~~~~~~~~~~~~
+
+Verify operation of the Identity service before installing other
+services.
+
+.. note::
+
+ Perform these commands on the controller node.
+
+
+#. For security reasons, disable the temporary authentication
+ token mechanism:
+
+ Edit the ``/etc/keystone/keystone-paste.ini``
+ file and remove ``admin_token_auth`` from the
+ ``[pipeline:public_api]``, ``[pipeline:admin_api]``,
+ and ``[pipeline:api_v3]`` sections.
+
+
+
+2. Unset the temporary ``OS_AUTH_URL`` and ``OS_PASSWORD``
+ environment variable:
+
+ .. code-block:: console
+
+ $ unset OS_AUTH_URL OS_PASSWORD
+
+ .. end
+
+3. As the ``admin`` user, request an authentication token:
+
+ .. code-block:: console
+
+ $ openstack --os-auth-url http://controller:35357/v3 \
+ --os-project-domain-name Default --os-user-domain-name Default \
+ --os-project-name admin --os-username admin token issue
+
+ Password:
+ +------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +------------+-----------------------------------------------------------------+
+ | expires | 2016-02-12T20:14:07.056119Z |
+ | id | gAAAAABWvi7_B8kKQD9wdXac8MoZiQldmjEO643d-e_j-XXq9AmIegIbA7UHGPv |
+ | | atnN21qtOMjCFWX7BReJEQnVOAj3nclRQgAYRsfSU_MrsuWb4EDtnjU7HEpoBb4 |
+ | | o6ozsA_NmFWEpLeKy0uNn_WeKbAhYygrsmQGA49dclHVnz-OMVLiyM9ws |
+ | project_id | 343d245e850143a096806dfaefa9afdc |
+ | user_id | ac3377633149401296f6c0d92d79dc16 |
+ +------------+-----------------------------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ This command uses the password for the ``admin`` user.
+
+4. As the ``demo`` user, request an authentication token:
+
+ .. code-block:: console
+
+ $ openstack --os-auth-url http://controller:5000/v3 \
+ --os-project-domain-name Default --os-user-domain-name Default \
+ --os-project-name demo --os-username demo token issue
+
+ Password:
+ +------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +------------+-----------------------------------------------------------------+
+ | expires | 2016-02-12T20:15:39.014479Z |
+ | id | gAAAAABWvi9bsh7vkiby5BpCCnc-JkbGhm9wH3fabS_cY7uabOubesi-Me6IGWW |
+ | | yQqNegDDZ5jw7grI26vvgy1J5nCVwZ_zFRqPiz_qhbq29mgbQLglbkq6FQvzBRQ |
+ | | JcOzq3uwhzNxszJWmzGC7rJE_H0A_a3UFhqv8M4zMRYSbS2YF0MyFmp_U |
+ | project_id | ed0b60bf607743088218b0a533d5943f |
+ | user_id | 58126687cbcc4888bfa9ab73a2256f27 |
+ +------------+-----------------------------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ This command uses the password for the ``demo``
+ user and API port 5000 which only allows regular (non-admin)
+ access to the Identity service API.
diff --git a/doc/install-guide/source/keystone-verify-rdo.rst b/doc/install-guide/source/keystone-verify-rdo.rst
new file mode 100644
index 0000000000..943e13ebbd
--- /dev/null
+++ b/doc/install-guide/source/keystone-verify-rdo.rst
@@ -0,0 +1,83 @@
+Verify operation
+~~~~~~~~~~~~~~~~
+
+Verify operation of the Identity service before installing other
+services.
+
+.. note::
+
+ Perform these commands on the controller node.
+
+
+
+#. For security reasons, disable the temporary authentication
+ token mechanism:
+
+ Edit the ``/etc/keystone/keystone-paste.ini``
+ file and remove ``admin_token_auth`` from the
+ ``[pipeline:public_api]``, ``[pipeline:admin_api]``,
+ and ``[pipeline:api_v3]`` sections.
+
+
+2. Unset the temporary ``OS_AUTH_URL`` and ``OS_PASSWORD``
+ environment variable:
+
+ .. code-block:: console
+
+ $ unset OS_AUTH_URL OS_PASSWORD
+
+ .. end
+
+3. As the ``admin`` user, request an authentication token:
+
+ .. code-block:: console
+
+ $ openstack --os-auth-url http://controller:35357/v3 \
+ --os-project-domain-name Default --os-user-domain-name Default \
+ --os-project-name admin --os-username admin token issue
+
+ Password:
+ +------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +------------+-----------------------------------------------------------------+
+ | expires | 2016-02-12T20:14:07.056119Z |
+ | id | gAAAAABWvi7_B8kKQD9wdXac8MoZiQldmjEO643d-e_j-XXq9AmIegIbA7UHGPv |
+ | | atnN21qtOMjCFWX7BReJEQnVOAj3nclRQgAYRsfSU_MrsuWb4EDtnjU7HEpoBb4 |
+ | | o6ozsA_NmFWEpLeKy0uNn_WeKbAhYygrsmQGA49dclHVnz-OMVLiyM9ws |
+ | project_id | 343d245e850143a096806dfaefa9afdc |
+ | user_id | ac3377633149401296f6c0d92d79dc16 |
+ +------------+-----------------------------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ This command uses the password for the ``admin`` user.
+
+4. As the ``demo`` user, request an authentication token:
+
+ .. code-block:: console
+
+ $ openstack --os-auth-url http://controller:5000/v3 \
+ --os-project-domain-name Default --os-user-domain-name Default \
+ --os-project-name demo --os-username demo token issue
+
+ Password:
+ +------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +------------+-----------------------------------------------------------------+
+ | expires | 2016-02-12T20:15:39.014479Z |
+ | id | gAAAAABWvi9bsh7vkiby5BpCCnc-JkbGhm9wH3fabS_cY7uabOubesi-Me6IGWW |
+ | | yQqNegDDZ5jw7grI26vvgy1J5nCVwZ_zFRqPiz_qhbq29mgbQLglbkq6FQvzBRQ |
+ | | JcOzq3uwhzNxszJWmzGC7rJE_H0A_a3UFhqv8M4zMRYSbS2YF0MyFmp_U |
+ | project_id | ed0b60bf607743088218b0a533d5943f |
+ | user_id | 58126687cbcc4888bfa9ab73a2256f27 |
+ +------------+-----------------------------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ This command uses the password for the ``demo``
+ user and API port 5000 which only allows regular (non-admin)
+ access to the Identity service API.
diff --git a/doc/install-guide/source/keystone-verify-ubuntu.rst b/doc/install-guide/source/keystone-verify-ubuntu.rst
new file mode 100644
index 0000000000..8989a99c2b
--- /dev/null
+++ b/doc/install-guide/source/keystone-verify-ubuntu.rst
@@ -0,0 +1,83 @@
+Verify operation
+~~~~~~~~~~~~~~~~
+
+Verify operation of the Identity service before installing other
+services.
+
+.. note::
+
+ Perform these commands on the controller node.
+
+
+#. For security reasons, disable the temporary authentication
+ token mechanism:
+
+ Edit the ``/etc/keystone/keystone-paste.ini``
+ file and remove ``admin_token_auth`` from the
+ ``[pipeline:public_api]``, ``[pipeline:admin_api]``,
+ and ``[pipeline:api_v3]`` sections.
+
+
+
+2. Unset the temporary ``OS_AUTH_URL`` and ``OS_PASSWORD``
+ environment variable:
+
+ .. code-block:: console
+
+ $ unset OS_AUTH_URL OS_PASSWORD
+
+ .. end
+
+3. As the ``admin`` user, request an authentication token:
+
+ .. code-block:: console
+
+ $ openstack --os-auth-url http://controller:35357/v3 \
+ --os-project-domain-name Default --os-user-domain-name Default \
+ --os-project-name admin --os-username admin token issue
+
+ Password:
+ +------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +------------+-----------------------------------------------------------------+
+ | expires | 2016-02-12T20:14:07.056119Z |
+ | id | gAAAAABWvi7_B8kKQD9wdXac8MoZiQldmjEO643d-e_j-XXq9AmIegIbA7UHGPv |
+ | | atnN21qtOMjCFWX7BReJEQnVOAj3nclRQgAYRsfSU_MrsuWb4EDtnjU7HEpoBb4 |
+ | | o6ozsA_NmFWEpLeKy0uNn_WeKbAhYygrsmQGA49dclHVnz-OMVLiyM9ws |
+ | project_id | 343d245e850143a096806dfaefa9afdc |
+ | user_id | ac3377633149401296f6c0d92d79dc16 |
+ +------------+-----------------------------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ This command uses the password for the ``admin`` user.
+
+4. As the ``demo`` user, request an authentication token:
+
+ .. code-block:: console
+
+ $ openstack --os-auth-url http://controller:5000/v3 \
+ --os-project-domain-name Default --os-user-domain-name Default \
+ --os-project-name demo --os-username demo token issue
+
+ Password:
+ +------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +------------+-----------------------------------------------------------------+
+ | expires | 2016-02-12T20:15:39.014479Z |
+ | id | gAAAAABWvi9bsh7vkiby5BpCCnc-JkbGhm9wH3fabS_cY7uabOubesi-Me6IGWW |
+ | | yQqNegDDZ5jw7grI26vvgy1J5nCVwZ_zFRqPiz_qhbq29mgbQLglbkq6FQvzBRQ |
+ | | JcOzq3uwhzNxszJWmzGC7rJE_H0A_a3UFhqv8M4zMRYSbS2YF0MyFmp_U |
+ | project_id | ed0b60bf607743088218b0a533d5943f |
+ | user_id | 58126687cbcc4888bfa9ab73a2256f27 |
+ +------------+-----------------------------------------------------------------+
+
+ .. end
+
+ .. note::
+
+ This command uses the password for the ``demo``
+ user and API port 5000 which only allows regular (non-admin)
+ access to the Identity service API.
diff --git a/doc/install-guide/source/keystone-verify.rst b/doc/install-guide/source/keystone-verify.rst
index f7e066f7f0..f81466ab0a 100644
--- a/doc/install-guide/source/keystone-verify.rst
+++ b/doc/install-guide/source/keystone-verify.rst
@@ -8,89 +8,7 @@ services.
Perform these commands on the controller node.
-.. only:: obs or ubuntu
+.. toctree::
+ :glob:
- #. For security reasons, disable the temporary authentication
- token mechanism:
-
- Edit the ``/etc/keystone/keystone-paste.ini``
- file and remove ``admin_token_auth`` from the
- ``[pipeline:public_api]``, ``[pipeline:admin_api]``,
- and ``[pipeline:api_v3]`` sections.
-
-.. endonly
-
-.. only:: rdo
-
- #. For security reasons, disable the temporary authentication
- token mechanism:
-
- Edit the ``/etc/keystone/keystone-paste.ini``
- file and remove ``admin_token_auth`` from the
- ``[pipeline:public_api]``, ``[pipeline:admin_api]``,
- and ``[pipeline:api_v3]`` sections.
-
-.. endonly
-
-2. Unset the temporary ``OS_AUTH_URL`` and ``OS_PASSWORD``
- environment variable:
-
- .. code-block:: console
-
- $ unset OS_AUTH_URL OS_PASSWORD
-
- .. end
-
-3. As the ``admin`` user, request an authentication token:
-
- .. code-block:: console
-
- $ openstack --os-auth-url http://controller:35357/v3 \
- --os-project-domain-name Default --os-user-domain-name Default \
- --os-project-name admin --os-username admin token issue
-
- Password:
- +------------+-----------------------------------------------------------------+
- | Field | Value |
- +------------+-----------------------------------------------------------------+
- | expires | 2016-02-12T20:14:07.056119Z |
- | id | gAAAAABWvi7_B8kKQD9wdXac8MoZiQldmjEO643d-e_j-XXq9AmIegIbA7UHGPv |
- | | atnN21qtOMjCFWX7BReJEQnVOAj3nclRQgAYRsfSU_MrsuWb4EDtnjU7HEpoBb4 |
- | | o6ozsA_NmFWEpLeKy0uNn_WeKbAhYygrsmQGA49dclHVnz-OMVLiyM9ws |
- | project_id | 343d245e850143a096806dfaefa9afdc |
- | user_id | ac3377633149401296f6c0d92d79dc16 |
- +------------+-----------------------------------------------------------------+
-
- .. end
-
- .. note::
-
- This command uses the password for the ``admin`` user.
-
-4. As the ``demo`` user, request an authentication token:
-
- .. code-block:: console
-
- $ openstack --os-auth-url http://controller:5000/v3 \
- --os-project-domain-name Default --os-user-domain-name Default \
- --os-project-name demo --os-username demo token issue
-
- Password:
- +------------+-----------------------------------------------------------------+
- | Field | Value |
- +------------+-----------------------------------------------------------------+
- | expires | 2016-02-12T20:15:39.014479Z |
- | id | gAAAAABWvi9bsh7vkiby5BpCCnc-JkbGhm9wH3fabS_cY7uabOubesi-Me6IGWW |
- | | yQqNegDDZ5jw7grI26vvgy1J5nCVwZ_zFRqPiz_qhbq29mgbQLglbkq6FQvzBRQ |
- | | JcOzq3uwhzNxszJWmzGC7rJE_H0A_a3UFhqv8M4zMRYSbS2YF0MyFmp_U |
- | project_id | ed0b60bf607743088218b0a533d5943f |
- | user_id | 58126687cbcc4888bfa9ab73a2256f27 |
- +------------+-----------------------------------------------------------------+
-
- .. end
-
- .. note::
-
- This command uses the password for the ``demo``
- user and API port 5000 which only allows regular (non-admin)
- access to the Identity service API.
+ keystone-verify-*
diff --git a/doc/install-guide/source/neutron-compute-install-debian.rst b/doc/install-guide/source/neutron-compute-install-debian.rst
new file mode 100644
index 0000000000..f7b91b28bc
--- /dev/null
+++ b/doc/install-guide/source/neutron-compute-install-debian.rst
@@ -0,0 +1,146 @@
+Install and configure compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The compute node handles connectivity and :term:`security groups ` for instances.
+
+
+Install the components
+----------------------
+
+.. code-block:: console
+
+ # apt install neutron-linuxbridge-agent
+
+.. end
+
+
+
+
+Configure the common component
+------------------------------
+
+The Networking common component configuration includes the
+authentication mechanism, message queue, and plug-in.
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, comment out any ``connection`` options
+ because compute nodes do not directly access the database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack``
+ account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+
+Configure networking options
+----------------------------
+
+Choose the same networking option that you chose for the controller node to
+configure services specific to it. Afterwards, return here and proceed to
+:ref:`neutron-compute-compute-debian`.
+
+.. toctree::
+ :maxdepth: 1
+
+ neutron-compute-install-option1.rst
+ neutron-compute-install-option2.rst
+
+.. _neutron-compute-compute-debian:
+
+Configure the Compute service to use the Networking service
+-----------------------------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and complete the following actions:
+
+ * In the ``[neutron]`` section, configure access parameters:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [neutron]
+ # ...
+ url = http://controller:9696
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+Finalize installation
+---------------------
+
+
+
+
+#. Restart the Compute service:
+
+ .. code-block:: console
+
+ # service nova-compute restart
+
+ .. end
+
+#. Restart the Linux bridge agent:
+
+ .. code-block:: console
+
+ # service neutron-linuxbridge-agent restart
+
+ .. end
+
diff --git a/doc/install-guide/source/neutron-compute-install-obs.rst b/doc/install-guide/source/neutron-compute-install-obs.rst
new file mode 100644
index 0000000000..2cc9b8ab96
--- /dev/null
+++ b/doc/install-guide/source/neutron-compute-install-obs.rst
@@ -0,0 +1,161 @@
+Install and configure compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The compute node handles connectivity and :term:`security groups ` for instances.
+
+
+
+
+Install the components
+----------------------
+
+.. code-block:: console
+
+ # zypper install --no-recommends \
+ openstack-neutron-linuxbridge-agent bridge-utils
+
+.. end
+
+
+Configure the common component
+------------------------------
+
+The Networking common component configuration includes the
+authentication mechanism, message queue, and plug-in.
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, comment out any ``connection`` options
+ because compute nodes do not directly access the database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack``
+ account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+
+Configure networking options
+----------------------------
+
+Choose the same networking option that you chose for the controller node to
+configure services specific to it. Afterwards, return here and proceed to
+:ref:`neutron-compute-compute-obs`.
+
+.. toctree::
+ :maxdepth: 1
+
+ neutron-compute-install-option1.rst
+ neutron-compute-install-option2.rst
+
+.. _neutron-compute-compute-obs:
+
+Configure the Compute service to use the Networking service
+-----------------------------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and complete the following actions:
+
+ * In the ``[neutron]`` section, configure access parameters:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [neutron]
+ # ...
+ url = http://controller:9696
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+Finalize installation
+---------------------
+
+
+
+#. The Networking service initialization scripts expect the variable
+ ``NEUTRON_PLUGIN_CONF`` in the ``/etc/sysconfig/neutron`` file to
+ reference the ML2 plug-in configuration file. Ensure that the
+ ``/etc/sysconfig/neutron`` file contains the following:
+
+ .. path /etc/sysconfig/neutron
+ .. code-block:: ini
+
+ NEUTRON_PLUGIN_CONF="/etc/neutron/plugins/ml2/ml2_conf.ini"
+
+ .. end
+
+#. Restart the Compute service:
+
+ .. code-block:: console
+
+ # systemctl restart openstack-nova-compute.service
+
+ .. end
+
+#. Start the Linux Bridge agent and configure it to start when the
+ system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-neutron-linuxbridge-agent.service
+ # systemctl start openstack-neutron-linuxbridge-agent.service
+
+ .. end
+
+
diff --git a/doc/install-guide/source/neutron-compute-install-option1.rst b/doc/install-guide/source/neutron-compute-install-option1.rst
index b5ea21ae64..068f083120 100644
--- a/doc/install-guide/source/neutron-compute-install-option1.rst
+++ b/doc/install-guide/source/neutron-compute-install-option1.rst
@@ -50,5 +50,4 @@ networking infrastructure for instances and handles security groups.
.. end
-Return to
-:ref:`Networking compute node configuration `.
+Return to *Networking compute node configuration*
diff --git a/doc/install-guide/source/neutron-compute-install-option2.rst b/doc/install-guide/source/neutron-compute-install-option2.rst
index 39947e5e84..8d711b10de 100644
--- a/doc/install-guide/source/neutron-compute-install-option2.rst
+++ b/doc/install-guide/source/neutron-compute-install-option2.rst
@@ -61,5 +61,4 @@ networking infrastructure for instances and handles security groups.
.. end
-Return to
-:ref:`Networking compute node configuration `.
+Return to *Networking compute node configuration*.
diff --git a/doc/install-guide/source/neutron-compute-install-rdo.rst b/doc/install-guide/source/neutron-compute-install-rdo.rst
new file mode 100644
index 0000000000..b11ee9b2ee
--- /dev/null
+++ b/doc/install-guide/source/neutron-compute-install-rdo.rst
@@ -0,0 +1,164 @@
+Install and configure compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The compute node handles connectivity and :term:`security groups ` for instances.
+
+
+
+Install the components
+----------------------
+
+.. todo:
+
+ https://bugzilla.redhat.com/show_bug.cgi?id=1334626
+
+.. code-block:: console
+
+ # yum install openstack-neutron-linuxbridge ebtables ipset
+
+.. end
+
+
+
+Configure the common component
+------------------------------
+
+The Networking common component configuration includes the
+authentication mechanism, message queue, and plug-in.
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, comment out any ``connection`` options
+ because compute nodes do not directly access the database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack``
+ account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/neutron/tmp
+
+ .. end
+
+
+
+Configure networking options
+----------------------------
+
+Choose the same networking option that you chose for the controller node to
+configure services specific to it. Afterwards, return here and proceed to
+:ref:`neutron-compute-compute-rdo`.
+
+.. toctree::
+ :maxdepth: 1
+
+ neutron-compute-install-option1.rst
+ neutron-compute-install-option2.rst
+
+.. _neutron-compute-compute-rdo:
+
+Configure the Compute service to use the Networking service
+-----------------------------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and complete the following actions:
+
+ * In the ``[neutron]`` section, configure access parameters:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [neutron]
+ # ...
+ url = http://controller:9696
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+Finalize installation
+---------------------
+
+
+#. Restart the Compute service:
+
+ .. code-block:: console
+
+ # systemctl restart openstack-nova-compute.service
+
+ .. end
+
+#. Start the Linux bridge agent and configure it to start when the
+ system boots:
+
+ .. code-block:: console
+
+ # systemctl enable neutron-linuxbridge-agent.service
+ # systemctl start neutron-linuxbridge-agent.service
+
+ .. end
+
+
+
diff --git a/doc/install-guide/source/neutron-compute-install-ubuntu.rst b/doc/install-guide/source/neutron-compute-install-ubuntu.rst
new file mode 100644
index 0000000000..0dec38d2e5
--- /dev/null
+++ b/doc/install-guide/source/neutron-compute-install-ubuntu.rst
@@ -0,0 +1,146 @@
+Install and configure compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The compute node handles connectivity and :term:`security groups ` for instances.
+
+
+Install the components
+----------------------
+
+.. code-block:: console
+
+ # apt install neutron-linuxbridge-agent
+
+.. end
+
+
+
+
+Configure the common component
+------------------------------
+
+The Networking common component configuration includes the
+authentication mechanism, message queue, and plug-in.
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, comment out any ``connection`` options
+ because compute nodes do not directly access the database.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack``
+ account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+
+Configure networking options
+----------------------------
+
+Choose the same networking option that you chose for the controller node to
+configure services specific to it. Afterwards, return here and proceed to
+:ref:`neutron-compute-compute-ubuntu`.
+
+.. toctree::
+ :maxdepth: 1
+
+ neutron-compute-install-option1.rst
+ neutron-compute-install-option2.rst
+
+.. _neutron-compute-compute-ubuntu:
+
+Configure the Compute service to use the Networking service
+-----------------------------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and complete the following actions:
+
+ * In the ``[neutron]`` section, configure access parameters:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [neutron]
+ # ...
+ url = http://controller:9696
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+Finalize installation
+---------------------
+
+
+
+
+#. Restart the Compute service:
+
+ .. code-block:: console
+
+ # service nova-compute restart
+
+ .. end
+
+#. Restart the Linux bridge agent:
+
+ .. code-block:: console
+
+ # service neutron-linuxbridge-agent restart
+
+ .. end
+
diff --git a/doc/install-guide/source/neutron-compute-install.rst b/doc/install-guide/source/neutron-compute-install.rst
index 7b75e6efff..a33a92bb43 100644
--- a/doc/install-guide/source/neutron-compute-install.rst
+++ b/doc/install-guide/source/neutron-compute-install.rst
@@ -1,247 +1,9 @@
Install and configure compute node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The compute node handles connectivity and :term:`security groups ` for instances.
-
-.. only:: ubuntu or debian
-
- Install the components
- ----------------------
-
- .. code-block:: console
-
- # apt install neutron-linuxbridge-agent
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- Install the components
- ----------------------
-
- .. todo:
-
- https://bugzilla.redhat.com/show_bug.cgi?id=1334626
-
- .. code-block:: console
-
- # yum install openstack-neutron-linuxbridge ebtables ipset
-
- .. end
-
-.. endonly
-
-.. only:: obs
-
- Install the components
- ----------------------
-
- .. code-block:: console
-
- # zypper install --no-recommends \
- openstack-neutron-linuxbridge-agent bridge-utils
-
- .. end
-
-.. endonly
-
-Configure the common component
-------------------------------
-
-The Networking common component configuration includes the
-authentication mechanism, message queue, and plug-in.
-
-.. include:: shared/note_configuration_vary_by_distribution.rst
-
-* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
- actions:
-
- * In the ``[database]`` section, comment out any ``connection`` options
- because compute nodes do not directly access the database.
-
- * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
- message queue access:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- transport_url = rabbit://openstack:RABBIT_PASS@controller
-
- .. end
-
- Replace ``RABBIT_PASS`` with the password you chose for the ``openstack``
- account in RabbitMQ.
-
- * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
- Identity service access:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- auth_strategy = keystone
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = neutron
- password = NEUTRON_PASS
-
- .. end
-
- Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
- user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
- .. only:: rdo
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/lib/neutron/tmp
-
- .. end
-
- .. endonly
-
-
-Configure networking options
-----------------------------
-
-Choose the same networking option that you chose for the controller node to
-configure services specific to it. Afterwards, return here and proceed to
-:ref:`neutron-compute-compute`.
-
.. toctree::
- :maxdepth: 1
- neutron-compute-install-option1.rst
- neutron-compute-install-option2.rst
-
-.. _neutron-compute-compute:
-
-Configure the Compute service to use the Networking service
------------------------------------------------------------
-
-* Edit the ``/etc/nova/nova.conf`` file and complete the following actions:
-
- * In the ``[neutron]`` section, configure access parameters:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [neutron]
- # ...
- url = http://controller:9696
- auth_url = http://controller:35357
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- region_name = RegionOne
- project_name = service
- username = neutron
- password = NEUTRON_PASS
-
- .. end
-
- Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
- user in the Identity service.
-
-Finalize installation
----------------------
-
-.. only:: rdo
-
- #. Restart the Compute service:
-
- .. code-block:: console
-
- # systemctl restart openstack-nova-compute.service
-
- .. end
-
- #. Start the Linux bridge agent and configure it to start when the
- system boots:
-
- .. code-block:: console
-
- # systemctl enable neutron-linuxbridge-agent.service
- # systemctl start neutron-linuxbridge-agent.service
-
- .. end
-
-.. endonly
-
-.. only:: obs
-
- #. The Networking service initialization scripts expect the variable
- ``NEUTRON_PLUGIN_CONF`` in the ``/etc/sysconfig/neutron`` file to
- reference the ML2 plug-in configuration file. Ensure that the
- ``/etc/sysconfig/neutron`` file contains the following:
-
- .. path /etc/sysconfig/neutron
- .. code-block:: ini
-
- NEUTRON_PLUGIN_CONF="/etc/neutron/plugins/ml2/ml2_conf.ini"
-
- .. end
-
- #. Restart the Compute service:
-
- .. code-block:: console
-
- # systemctl restart openstack-nova-compute.service
-
- .. end
-
- #. Start the Linux Bridge agent and configure it to start when the
- system boots:
-
- .. code-block:: console
-
- # systemctl enable openstack-neutron-linuxbridge-agent.service
- # systemctl start openstack-neutron-linuxbridge-agent.service
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Restart the Compute service:
-
- .. code-block:: console
-
- # service nova-compute restart
-
- .. end
-
- #. Restart the Linux bridge agent:
-
- .. code-block:: console
-
- # service neutron-linuxbridge-agent restart
-
- .. end
-
-.. endonly
+ neutron-compute-install-debian
+ neutron-compute-install-obs
+ neutron-compute-install-rdo
+ neutron-compute-install-ubuntu
diff --git a/doc/install-guide/source/neutron-controller-install-debian.rst b/doc/install-guide/source/neutron-controller-install-debian.rst
new file mode 100644
index 0000000000..b44094c1f6
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-debian.rst
@@ -0,0 +1,314 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Prerequisites
+-------------
+
+Before you configure the OpenStack Networking (neutron) service, you
+must create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``neutron`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)] CREATE DATABASE neutron;
+
+ .. end
+
+ * Grant proper access to the ``neutron`` database, replacing
+ ``NEUTRON_DBPASS`` with a suitable password:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
+ IDENTIFIED BY 'NEUTRON_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
+ IDENTIFIED BY 'NEUTRON_DBPASS';
+
+ .. end
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to admin-only CLI
+ commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``neutron`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt neutron
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | fdb0f541e28141719b6a43c8944bf1fb |
+ | name | neutron |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``neutron`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user neutron admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``neutron`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name neutron \
+ --description "OpenStack Networking" network
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Networking |
+ | enabled | True |
+ | id | f71529314dab4a4d8eca427e701d209e |
+ | name | neutron |
+ | type | network |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Networking service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ network public http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 85d80a6d02fc4b7683f611d7fc1493a3 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ network internal http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 09753b537ac74422a68d2d791cf3714f |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ network admin http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 1ee14289c9374dffb5db92a5c112fc4e |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ .. end
+
+Configure networking options
+----------------------------
+
+You can deploy the Networking service using one of two architectures
+represented by options 1 and 2.
+
+Option 1 deploys the simplest possible architecture that only supports
+attaching instances to provider (external) networks. No self-service (private)
+networks, routers, or floating IP addresses. Only the ``admin`` or other
+privileged user can manage provider networks.
+
+Option 2 augments option 1 with layer-3 services that support attaching
+instances to self-service networks. The ``demo`` or other unprivileged
+user can manage self-service networks including routers that provide
+connectivity between self-service and provider networks. Additionally,
+floating IP addresses provide connectivity to instances using self-service
+networks from external networks such as the Internet.
+
+Self-service networks typically use overlay networks. Overlay network
+protocols such as VXLAN include additional headers that increase overhead
+and decrease space available for the payload or user data. Without knowledge
+of the virtual network infrastructure, instances attempt to send packets
+using the default Ethernet :term:`maximum transmission unit (MTU)` of 1500
+bytes. The Networking service automatically provides the correct MTU value
+to instances via DHCP. However, some cloud images do not use DHCP or ignore
+the DHCP MTU option and require configuration using metadata or a script.
+
+.. note::
+
+ Option 2 also supports attaching instances to provider networks.
+
+Choose one of the following networking options to configure services
+specific to it. Afterwards, return here and proceed to
+:ref:`neutron-controller-metadata-agent-debian`.
+
+.. toctree::
+ :maxdepth: 1
+
+ neutron-controller-install-option1.rst
+ neutron-controller-install-option2.rst
+
+.. _neutron-controller-metadata-agent-debian:
+
+Configure the metadata agent
+----------------------------
+
+The :term:`metadata agent ` provides configuration information
+such as credentials to instances.
+
+* Edit the ``/etc/neutron/metadata_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the metadata host and shared
+ secret:
+
+ .. path /etc/neutron/metadata_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ nova_metadata_ip = controller
+ metadata_proxy_shared_secret = METADATA_SECRET
+
+ .. end
+
+ Replace ``METADATA_SECRET`` with a suitable secret for the metadata proxy.
+
+Configure the Compute service to use the Networking service
+-----------------------------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and perform the following actions:
+
+ * In the ``[neutron]`` section, configure access parameters, enable the
+ metadata proxy, and configure the secret:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [neutron]
+ # ...
+ url = http://controller:9696
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+ service_metadata_proxy = true
+ metadata_proxy_shared_secret = METADATA_SECRET
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ Replace ``METADATA_SECRET`` with the secret you chose for the metadata
+ proxy.
+
+Finalize installation
+---------------------
+
+
+
+
+#. Populate the database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
+ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
+
+ .. end
+
+ .. note::
+
+ Database population occurs later for Networking because the script
+ requires complete server and plug-in configuration files.
+
+#. Restart the Compute API service:
+
+ .. code-block:: console
+
+ # service nova-api restart
+
+ .. end
+
+#. Restart the Networking services.
+
+ For both networking options:
+
+ .. code-block:: console
+
+ # service neutron-server restart
+ # service neutron-linuxbridge-agent restart
+ # service neutron-dhcp-agent restart
+ # service neutron-metadata-agent restart
+
+ .. end
+
+ For networking option 2, also restart the layer-3 service:
+
+ .. code-block:: console
+
+ # service neutron-l3-agent restart
+
+ .. end
+
diff --git a/doc/install-guide/source/neutron-controller-install-obs.rst b/doc/install-guide/source/neutron-controller-install-obs.rst
new file mode 100644
index 0000000000..e5a0571685
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-obs.rst
@@ -0,0 +1,319 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Prerequisites
+-------------
+
+Before you configure the OpenStack Networking (neutron) service, you
+must create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``neutron`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)] CREATE DATABASE neutron;
+
+ .. end
+
+ * Grant proper access to the ``neutron`` database, replacing
+ ``NEUTRON_DBPASS`` with a suitable password:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
+ IDENTIFIED BY 'NEUTRON_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
+ IDENTIFIED BY 'NEUTRON_DBPASS';
+
+ .. end
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to admin-only CLI
+ commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``neutron`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt neutron
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | fdb0f541e28141719b6a43c8944bf1fb |
+ | name | neutron |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``neutron`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user neutron admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``neutron`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name neutron \
+ --description "OpenStack Networking" network
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Networking |
+ | enabled | True |
+ | id | f71529314dab4a4d8eca427e701d209e |
+ | name | neutron |
+ | type | network |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Networking service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ network public http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 85d80a6d02fc4b7683f611d7fc1493a3 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ network internal http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 09753b537ac74422a68d2d791cf3714f |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ network admin http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 1ee14289c9374dffb5db92a5c112fc4e |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ .. end
+
+Configure networking options
+----------------------------
+
+You can deploy the Networking service using one of two architectures
+represented by options 1 and 2.
+
+Option 1 deploys the simplest possible architecture that only supports
+attaching instances to provider (external) networks. No self-service (private)
+networks, routers, or floating IP addresses. Only the ``admin`` or other
+privileged user can manage provider networks.
+
+Option 2 augments option 1 with layer-3 services that support attaching
+instances to self-service networks. The ``demo`` or other unprivileged
+user can manage self-service networks including routers that provide
+connectivity between self-service and provider networks. Additionally,
+floating IP addresses provide connectivity to instances using self-service
+networks from external networks such as the Internet.
+
+Self-service networks typically use overlay networks. Overlay network
+protocols such as VXLAN include additional headers that increase overhead
+and decrease space available for the payload or user data. Without knowledge
+of the virtual network infrastructure, instances attempt to send packets
+using the default Ethernet :term:`maximum transmission unit (MTU)` of 1500
+bytes. The Networking service automatically provides the correct MTU value
+to instances via DHCP. However, some cloud images do not use DHCP or ignore
+the DHCP MTU option and require configuration using metadata or a script.
+
+.. note::
+
+ Option 2 also supports attaching instances to provider networks.
+
+Choose one of the following networking options to configure services
+specific to it. Afterwards, return here and proceed to
+:ref:`neutron-controller-metadata-agent-obs`.
+
+.. toctree::
+ :maxdepth: 1
+
+ neutron-controller-install-option1.rst
+ neutron-controller-install-option2.rst
+
+.. _neutron-controller-metadata-agent-obs:
+
+Configure the metadata agent
+----------------------------
+
+The :term:`metadata agent ` provides configuration information
+such as credentials to instances.
+
+* Edit the ``/etc/neutron/metadata_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the metadata host and shared
+ secret:
+
+ .. path /etc/neutron/metadata_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ nova_metadata_ip = controller
+ metadata_proxy_shared_secret = METADATA_SECRET
+
+ .. end
+
+ Replace ``METADATA_SECRET`` with a suitable secret for the metadata proxy.
+
+Configure the Compute service to use the Networking service
+-----------------------------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and perform the following actions:
+
+ * In the ``[neutron]`` section, configure access parameters, enable the
+ metadata proxy, and configure the secret:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [neutron]
+ # ...
+ url = http://controller:9696
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+ service_metadata_proxy = true
+ metadata_proxy_shared_secret = METADATA_SECRET
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ Replace ``METADATA_SECRET`` with the secret you chose for the metadata
+ proxy.
+
+Finalize installation
+---------------------
+
+
+
+.. note::
+
+ SLES enables apparmor by default and restricts dnsmasq. You need to
+ either completely disable apparmor or disable only the dnsmasq
+ profile:
+
+ .. code-block:: console
+
+ # ln -s /etc/apparmor.d/usr.sbin.dnsmasq /etc/apparmor.d/disable/
+ # systemctl restart apparmor
+
+ .. end
+
+#. Restart the Compute API service:
+
+ .. code-block:: console
+
+ # systemctl restart openstack-nova-api.service
+
+ .. end
+
+#. Start the Networking services and configure them to start when the system
+ boots.
+
+ For both networking options:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-neutron.service \
+ openstack-neutron-linuxbridge-agent.service \
+ openstack-neutron-dhcp-agent.service \
+ openstack-neutron-metadata-agent.service
+ # systemctl start openstack-neutron.service \
+ openstack-neutron-linuxbridge-agent.service \
+ openstack-neutron-dhcp-agent.service \
+ openstack-neutron-metadata-agent.service
+
+ .. end
+
+ For networking option 2, also enable and start the layer-3 service:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-neutron-l3-agent.service
+ # systemctl start openstack-neutron-l3-agent.service
+
+ .. end
+
+
diff --git a/doc/install-guide/source/neutron-controller-install-option1-debian.rst b/doc/install-guide/source/neutron-controller-install-option1-debian.rst
new file mode 100644
index 0000000000..23d958ba4f
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-option1-debian.rst
@@ -0,0 +1,287 @@
+Networking Option 1: Provider networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install and configure the Networking components on the *controller* node.
+
+Install the components
+----------------------
+
+
+.. code-block:: console
+
+ # apt install neutron-server neutron-linuxbridge-agent \
+ neutron-dhcp-agent neutron-metadata-agent neutron-l3-agent
+
+.. end
+
+Configure the server component
+------------------------------
+
+The Networking server component configuration includes the database,
+authentication mechanism, message queue, topology change notifications,
+and plug-in.
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
+
+ .. end
+
+ Replace ``NEUTRON_DBPASS`` with the password you chose for the
+ database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
+ plug-in and disable additional plug-ins:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ core_plugin = ml2
+ service_plugins =
+
+ .. end
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
+ notify Compute of network topology changes:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ notify_nova_on_port_status_changes = true
+ notify_nova_on_port_data_changes = true
+
+ [nova]
+ # ...
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova``
+ user in the Identity service.
+
+Configure the Modular Layer 2 (ML2) plug-in
+-------------------------------------------
+
+The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
+and switching) virtual networking infrastructure for instances.
+
+* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
+ following actions:
+
+ * In the ``[ml2]`` section, enable flat and VLAN networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ type_drivers = flat,vlan
+
+ .. end
+
+ * In the ``[ml2]`` section, disable self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ tenant_network_types =
+
+ .. end
+
+ * In the ``[ml2]`` section, enable the Linux bridge mechanism:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ mechanism_drivers = linuxbridge
+
+ .. end
+
+ .. warning::
+
+ After you configure the ML2 plug-in, removing values in the
+ ``type_drivers`` option can lead to database inconsistency.
+
+ * In the ``[ml2]`` section, enable the port security extension driver:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ extension_drivers = port_security
+
+ .. end
+
+ * In the ``[ml2_type_flat]`` section, configure the provider virtual
+ network as a flat network:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_flat]
+ # ...
+ flat_networks = provider
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
+ efficiency of security group rules:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_ipset = true
+
+ .. end
+
+Configure the Linux bridge agent
+--------------------------------
+
+The Linux bridge agent builds layer-2 (bridging and switching) virtual
+networking infrastructure for instances and handles security groups.
+
+* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
+ complete the following actions:
+
+ * In the ``[linux_bridge]`` section, map the provider virtual network to the
+ provider physical network interface:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [linux_bridge]
+ physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
+
+ .. end
+
+ Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
+ provider physical network interface. See :ref:`environment-networking`
+ for more information.
+
+ * In the ``[vxlan]`` section, disable VXLAN overlay networks:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [vxlan]
+ enable_vxlan = false
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable security groups and
+ configure the Linux bridge :term:`iptables` firewall driver:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_security_group = true
+ firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+ .. end
+
+Configure the DHCP agent
+------------------------
+
+The :term:`DHCP agent` provides DHCP services for virtual networks.
+
+* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
+ Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
+ networks can access metadata over the network:
+
+ .. path /etc/neutron/dhcp_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+ dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+ enable_isolated_metadata = true
+
+ .. end
+
+Return to *Networking controller node configuration*.
diff --git a/doc/install-guide/source/neutron-controller-install-option1-obs.rst b/doc/install-guide/source/neutron-controller-install-option1-obs.rst
new file mode 100644
index 0000000000..d5489eee48
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-option1-obs.rst
@@ -0,0 +1,289 @@
+Networking Option 1: Provider networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install and configure the Networking components on the *controller* node.
+
+Install the components
+----------------------
+
+
+.. code-block:: console
+
+ # zypper install --no-recommends openstack-neutron \
+ openstack-neutron-server openstack-neutron-linuxbridge-agent \
+ openstack-neutron-dhcp-agent openstack-neutron-metadata-agent \
+ bridge-utils
+
+.. end
+
+Configure the server component
+------------------------------
+
+The Networking server component configuration includes the database,
+authentication mechanism, message queue, topology change notifications,
+and plug-in.
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
+
+ .. end
+
+ Replace ``NEUTRON_DBPASS`` with the password you chose for the
+ database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
+ plug-in and disable additional plug-ins:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ core_plugin = ml2
+ service_plugins =
+
+ .. end
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
+ notify Compute of network topology changes:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ notify_nova_on_port_status_changes = true
+ notify_nova_on_port_data_changes = true
+
+ [nova]
+ # ...
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova``
+ user in the Identity service.
+
+Configure the Modular Layer 2 (ML2) plug-in
+-------------------------------------------
+
+The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
+and switching) virtual networking infrastructure for instances.
+
+* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
+ following actions:
+
+ * In the ``[ml2]`` section, enable flat and VLAN networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ type_drivers = flat,vlan
+
+ .. end
+
+ * In the ``[ml2]`` section, disable self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ tenant_network_types =
+
+ .. end
+
+ * In the ``[ml2]`` section, enable the Linux bridge mechanism:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ mechanism_drivers = linuxbridge
+
+ .. end
+
+ .. warning::
+
+ After you configure the ML2 plug-in, removing values in the
+ ``type_drivers`` option can lead to database inconsistency.
+
+ * In the ``[ml2]`` section, enable the port security extension driver:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ extension_drivers = port_security
+
+ .. end
+
+ * In the ``[ml2_type_flat]`` section, configure the provider virtual
+ network as a flat network:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_flat]
+ # ...
+ flat_networks = provider
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
+ efficiency of security group rules:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_ipset = true
+
+ .. end
+
+Configure the Linux bridge agent
+--------------------------------
+
+The Linux bridge agent builds layer-2 (bridging and switching) virtual
+networking infrastructure for instances and handles security groups.
+
+* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
+ complete the following actions:
+
+ * In the ``[linux_bridge]`` section, map the provider virtual network to the
+ provider physical network interface:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [linux_bridge]
+ physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
+
+ .. end
+
+ Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
+ provider physical network interface. See :ref:`environment-networking`
+ for more information.
+
+ * In the ``[vxlan]`` section, disable VXLAN overlay networks:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [vxlan]
+ enable_vxlan = false
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable security groups and
+ configure the Linux bridge :term:`iptables` firewall driver:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_security_group = true
+ firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+ .. end
+
+Configure the DHCP agent
+------------------------
+
+The :term:`DHCP agent` provides DHCP services for virtual networks.
+
+* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
+ Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
+ networks can access metadata over the network:
+
+ .. path /etc/neutron/dhcp_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+ dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+ enable_isolated_metadata = true
+
+ .. end
+
+Return to *Networking controller node configuration*.
diff --git a/doc/install-guide/source/neutron-controller-install-option1-rdo.rst b/doc/install-guide/source/neutron-controller-install-option1-rdo.rst
new file mode 100644
index 0000000000..dc67e942b6
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-option1-rdo.rst
@@ -0,0 +1,299 @@
+Networking Option 1: Provider networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install and configure the Networking components on the *controller* node.
+
+Install the components
+----------------------
+
+
+.. code-block:: console
+
+ # yum install openstack-neutron openstack-neutron-ml2 \
+ openstack-neutron-linuxbridge ebtables
+
+.. end
+
+Configure the server component
+------------------------------
+
+The Networking server component configuration includes the database,
+authentication mechanism, message queue, topology change notifications,
+and plug-in.
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
+
+ .. end
+
+ Replace ``NEUTRON_DBPASS`` with the password you chose for the
+ database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
+ plug-in and disable additional plug-ins:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ core_plugin = ml2
+ service_plugins =
+
+ .. end
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
+ notify Compute of network topology changes:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ notify_nova_on_port_status_changes = true
+ notify_nova_on_port_data_changes = true
+
+ [nova]
+ # ...
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova``
+ user in the Identity service.
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/neutron/tmp
+
+ .. end
+
+Configure the Modular Layer 2 (ML2) plug-in
+-------------------------------------------
+
+The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
+and switching) virtual networking infrastructure for instances.
+
+* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
+ following actions:
+
+ * In the ``[ml2]`` section, enable flat and VLAN networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ type_drivers = flat,vlan
+
+ .. end
+
+ * In the ``[ml2]`` section, disable self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ tenant_network_types =
+
+ .. end
+
+ * In the ``[ml2]`` section, enable the Linux bridge mechanism:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ mechanism_drivers = linuxbridge
+
+ .. end
+
+ .. warning::
+
+ After you configure the ML2 plug-in, removing values in the
+ ``type_drivers`` option can lead to database inconsistency.
+
+ * In the ``[ml2]`` section, enable the port security extension driver:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ extension_drivers = port_security
+
+ .. end
+
+ * In the ``[ml2_type_flat]`` section, configure the provider virtual
+ network as a flat network:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_flat]
+ # ...
+ flat_networks = provider
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
+ efficiency of security group rules:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_ipset = true
+
+ .. end
+
+Configure the Linux bridge agent
+--------------------------------
+
+The Linux bridge agent builds layer-2 (bridging and switching) virtual
+networking infrastructure for instances and handles security groups.
+
+* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
+ complete the following actions:
+
+ * In the ``[linux_bridge]`` section, map the provider virtual network to the
+ provider physical network interface:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [linux_bridge]
+ physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
+
+ .. end
+
+ Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
+ provider physical network interface. See :ref:`environment-networking`
+ for more information.
+
+ * In the ``[vxlan]`` section, disable VXLAN overlay networks:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [vxlan]
+ enable_vxlan = false
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable security groups and
+ configure the Linux bridge :term:`iptables` firewall driver:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_security_group = true
+ firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+ .. end
+
+Configure the DHCP agent
+------------------------
+
+The :term:`DHCP agent` provides DHCP services for virtual networks.
+
+* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
+ Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
+ networks can access metadata over the network:
+
+ .. path /etc/neutron/dhcp_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+ dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+ enable_isolated_metadata = true
+
+ .. end
+
+Return to *Networking controller node configuration*.
diff --git a/doc/install-guide/source/neutron-controller-install-option1-ubuntu.rst b/doc/install-guide/source/neutron-controller-install-option1-ubuntu.rst
new file mode 100644
index 0000000000..dba2650232
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-option1-ubuntu.rst
@@ -0,0 +1,288 @@
+Networking Option 1: Provider networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install and configure the Networking components on the *controller* node.
+
+Install the components
+----------------------
+
+
+.. code-block:: console
+
+ # apt install neutron-server neutron-plugin-ml2 \
+ neutron-linuxbridge-agent neutron-dhcp-agent \
+ neutron-metadata-agent
+
+.. end
+
+Configure the server component
+------------------------------
+
+The Networking server component configuration includes the database,
+authentication mechanism, message queue, topology change notifications,
+and plug-in.
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
+
+ .. end
+
+ Replace ``NEUTRON_DBPASS`` with the password you chose for the
+ database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
+ plug-in and disable additional plug-ins:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ core_plugin = ml2
+ service_plugins =
+
+ .. end
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
+ notify Compute of network topology changes:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ notify_nova_on_port_status_changes = true
+ notify_nova_on_port_data_changes = true
+
+ [nova]
+ # ...
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova``
+ user in the Identity service.
+
+Configure the Modular Layer 2 (ML2) plug-in
+-------------------------------------------
+
+The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
+and switching) virtual networking infrastructure for instances.
+
+* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
+ following actions:
+
+ * In the ``[ml2]`` section, enable flat and VLAN networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ type_drivers = flat,vlan
+
+ .. end
+
+ * In the ``[ml2]`` section, disable self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ tenant_network_types =
+
+ .. end
+
+ * In the ``[ml2]`` section, enable the Linux bridge mechanism:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ mechanism_drivers = linuxbridge
+
+ .. end
+
+ .. warning::
+
+ After you configure the ML2 plug-in, removing values in the
+ ``type_drivers`` option can lead to database inconsistency.
+
+ * In the ``[ml2]`` section, enable the port security extension driver:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ extension_drivers = port_security
+
+ .. end
+
+ * In the ``[ml2_type_flat]`` section, configure the provider virtual
+ network as a flat network:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_flat]
+ # ...
+ flat_networks = provider
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
+ efficiency of security group rules:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_ipset = true
+
+ .. end
+
+Configure the Linux bridge agent
+--------------------------------
+
+The Linux bridge agent builds layer-2 (bridging and switching) virtual
+networking infrastructure for instances and handles security groups.
+
+* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
+ complete the following actions:
+
+ * In the ``[linux_bridge]`` section, map the provider virtual network to the
+ provider physical network interface:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [linux_bridge]
+ physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
+
+ .. end
+
+ Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
+ provider physical network interface. See :ref:`environment-networking`
+ for more information.
+
+ * In the ``[vxlan]`` section, disable VXLAN overlay networks:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [vxlan]
+ enable_vxlan = false
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable security groups and
+ configure the Linux bridge :term:`iptables` firewall driver:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_security_group = true
+ firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+ .. end
+
+Configure the DHCP agent
+------------------------
+
+The :term:`DHCP agent` provides DHCP services for virtual networks.
+
+* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
+ Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
+ networks can access metadata over the network:
+
+ .. path /etc/neutron/dhcp_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+ dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+ enable_isolated_metadata = true
+
+ .. end
+
+Return to *Networking controller node configuration*.
diff --git a/doc/install-guide/source/neutron-controller-install-option1.rst b/doc/install-guide/source/neutron-controller-install-option1.rst
index 424232024c..47983e7667 100644
--- a/doc/install-guide/source/neutron-controller-install-option1.rst
+++ b/doc/install-guide/source/neutron-controller-install-option1.rst
@@ -3,331 +3,7 @@ Networking Option 1: Provider networks
Install and configure the Networking components on the *controller* node.
-Install the components
-----------------------
+.. toctree::
+ :glob:
-.. only:: ubuntu
-
- .. code-block:: console
-
- # apt install neutron-server neutron-plugin-ml2 \
- neutron-linuxbridge-agent neutron-dhcp-agent \
- neutron-metadata-agent
-
- .. end
-
-.. only:: debian
-
- .. code-block:: console
-
- # apt install neutron-server neutron-linuxbridge-agent \
- neutron-dhcp-agent neutron-metadata-agent neutron-l3-agent
-
- .. end
-
-.. only:: rdo
-
- .. code-block:: console
-
- # yum install openstack-neutron openstack-neutron-ml2 \
- openstack-neutron-linuxbridge ebtables
-
- .. end
-
-.. only:: obs
-
- .. code-block:: console
-
- # zypper install --no-recommends openstack-neutron \
- openstack-neutron-server openstack-neutron-linuxbridge-agent \
- openstack-neutron-dhcp-agent openstack-neutron-metadata-agent \
- bridge-utils
-
- .. end
-
-Configure the server component
-------------------------------
-
-The Networking server component configuration includes the database,
-authentication mechanism, message queue, topology change notifications,
-and plug-in.
-
-.. include:: shared/note_configuration_vary_by_distribution.rst
-
-* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
- actions:
-
- * In the ``[database]`` section, configure database access:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [database]
- # ...
- connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
-
- .. end
-
- Replace ``NEUTRON_DBPASS`` with the password you chose for the
- database.
-
- .. note::
-
- Comment out or remove any other ``connection`` options in the
- ``[database]`` section.
-
- * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
- plug-in and disable additional plug-ins:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- core_plugin = ml2
- service_plugins =
-
- .. end
-
- * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
- message queue access:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- transport_url = rabbit://openstack:RABBIT_PASS@controller
-
- .. end
-
- Replace ``RABBIT_PASS`` with the password you chose for the
- ``openstack`` account in RabbitMQ.
-
- * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
- Identity service access:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- auth_strategy = keystone
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = neutron
- password = NEUTRON_PASS
-
- .. end
-
- Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
- user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
- * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
- notify Compute of network topology changes:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- notify_nova_on_port_status_changes = true
- notify_nova_on_port_data_changes = true
-
- [nova]
- # ...
- auth_url = http://controller:35357
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- region_name = RegionOne
- project_name = service
- username = nova
- password = NOVA_PASS
-
- .. end
-
- Replace ``NOVA_PASS`` with the password you chose for the ``nova``
- user in the Identity service.
-
- .. only:: rdo
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/lib/neutron/tmp
-
- .. end
-
-Configure the Modular Layer 2 (ML2) plug-in
--------------------------------------------
-
-The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
-and switching) virtual networking infrastructure for instances.
-
-* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
- following actions:
-
- * In the ``[ml2]`` section, enable flat and VLAN networks:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2]
- # ...
- type_drivers = flat,vlan
-
- .. end
-
- * In the ``[ml2]`` section, disable self-service networks:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2]
- # ...
- tenant_network_types =
-
- .. end
-
- * In the ``[ml2]`` section, enable the Linux bridge mechanism:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2]
- # ...
- mechanism_drivers = linuxbridge
-
- .. end
-
- .. warning::
-
- After you configure the ML2 plug-in, removing values in the
- ``type_drivers`` option can lead to database inconsistency.
-
- * In the ``[ml2]`` section, enable the port security extension driver:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2]
- # ...
- extension_drivers = port_security
-
- .. end
-
- * In the ``[ml2_type_flat]`` section, configure the provider virtual
- network as a flat network:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2_type_flat]
- # ...
- flat_networks = provider
-
- .. end
-
- * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
- efficiency of security group rules:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [securitygroup]
- # ...
- enable_ipset = true
-
- .. end
-
-Configure the Linux bridge agent
---------------------------------
-
-The Linux bridge agent builds layer-2 (bridging and switching) virtual
-networking infrastructure for instances and handles security groups.
-
-* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
- complete the following actions:
-
- * In the ``[linux_bridge]`` section, map the provider virtual network to the
- provider physical network interface:
-
- .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
- .. code-block:: ini
-
- [linux_bridge]
- physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
-
- .. end
-
- Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
- provider physical network interface. See :ref:`environment-networking`
- for more information.
-
- * In the ``[vxlan]`` section, disable VXLAN overlay networks:
-
- .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
- .. code-block:: ini
-
- [vxlan]
- enable_vxlan = false
-
- .. end
-
- * In the ``[securitygroup]`` section, enable security groups and
- configure the Linux bridge :term:`iptables` firewall driver:
-
- .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
- .. code-block:: ini
-
- [securitygroup]
- # ...
- enable_security_group = true
- firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
-
- .. end
-
-Configure the DHCP agent
-------------------------
-
-The :term:`DHCP agent` provides DHCP services for virtual networks.
-
-* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
- actions:
-
- * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
- Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
- networks can access metadata over the network:
-
- .. path /etc/neutron/dhcp_agent.ini
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- interface_driver = linuxbridge
- dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
- enable_isolated_metadata = true
-
- .. end
-
-Return to
-:ref:`Networking controller node configuration
-`.
+ neutron-controller-install-option1-*
diff --git a/doc/install-guide/source/neutron-controller-install-option2-debian.rst b/doc/install-guide/source/neutron-controller-install-option2-debian.rst
new file mode 100644
index 0000000000..7286cf8e36
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-option2-debian.rst
@@ -0,0 +1,335 @@
+Networking Option 2: Self-service networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install and configure the Networking components on the *controller* node.
+
+Install the components
+----------------------
+
+
+
+
+
+#. .. code-block:: console
+
+ # apt install neutron-server neutron-linuxbridge-agent \
+ neutron-dhcp-agent neutron-metadata-agent neutron-l3-agent
+
+ .. end
+
+
+Configure the server component
+------------------------------
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
+
+ .. end
+
+ Replace ``NEUTRON_DBPASS`` with the password you chose for the
+ database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
+ plug-in, router service, and overlapping IP addresses:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ core_plugin = ml2
+ service_plugins = router
+ allow_overlapping_ips = true
+
+ .. end
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
+ notify Compute of network topology changes:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ notify_nova_on_port_status_changes = true
+ notify_nova_on_port_data_changes = true
+
+ [nova]
+ # ...
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova``
+ user in the Identity service.
+
+Configure the Modular Layer 2 (ML2) plug-in
+-------------------------------------------
+
+The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
+and switching) virtual networking infrastructure for instances.
+
+* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
+ following actions:
+
+ * In the ``[ml2]`` section, enable flat, VLAN, and VXLAN networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ type_drivers = flat,vlan,vxlan
+
+ .. end
+
+ * In the ``[ml2]`` section, enable VXLAN self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ tenant_network_types = vxlan
+
+ .. end
+
+ * In the ``[ml2]`` section, enable the Linux bridge and layer-2 population
+ mechanisms:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ mechanism_drivers = linuxbridge,l2population
+
+ .. end
+
+ .. warning::
+
+ After you configure the ML2 plug-in, removing values in the
+ ``type_drivers`` option can lead to database inconsistency.
+
+ .. note::
+
+ The Linux bridge agent only supports VXLAN overlay networks.
+
+ * In the ``[ml2]`` section, enable the port security extension driver:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ extension_drivers = port_security
+
+ .. end
+
+ * In the ``[ml2_type_flat]`` section, configure the provider virtual
+ network as a flat network:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_flat]
+ # ...
+ flat_networks = provider
+
+ .. end
+
+ * In the ``[ml2_type_vxlan]`` section, configure the VXLAN network identifier
+ range for self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_vxlan]
+ # ...
+ vni_ranges = 1:1000
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
+ efficiency of security group rules:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_ipset = true
+
+ .. end
+
+Configure the Linux bridge agent
+--------------------------------
+
+The Linux bridge agent builds layer-2 (bridging and switching) virtual
+networking infrastructure for instances and handles security groups.
+
+* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
+ complete the following actions:
+
+ * In the ``[linux_bridge]`` section, map the provider virtual network to the
+ provider physical network interface:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [linux_bridge]
+ physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
+
+ .. end
+
+ Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
+ provider physical network interface. See :ref:`environment-networking`
+ for more information.
+
+ * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the
+ IP address of the physical network interface that handles overlay
+ networks, and enable layer-2 population:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [vxlan]
+ enable_vxlan = true
+ local_ip = OVERLAY_INTERFACE_IP_ADDRESS
+ l2_population = true
+
+ .. end
+
+ Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
+ underlying physical network interface that handles overlay networks. The
+ example architecture uses the management interface to tunnel traffic to
+ the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with
+ the management IP address of the controller node. See
+ :ref:`environment-networking` for more information.
+
+ * In the ``[securitygroup]`` section, enable security groups and
+ configure the Linux bridge :term:`iptables` firewall driver:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_security_group = true
+ firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+ .. end
+
+Configure the layer-3 agent
+---------------------------
+
+The :term:`Layer-3 (L3) agent` provides routing and NAT services for
+self-service virtual networks.
+
+* Edit the ``/etc/neutron/l3_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver
+ and external network bridge:
+
+ .. path /etc/neutron/l3_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+
+ .. end
+
+Configure the DHCP agent
+------------------------
+
+The :term:`DHCP agent` provides DHCP services for virtual networks.
+
+* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
+ Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
+ networks can access metadata over the network:
+
+ .. path /etc/neutron/dhcp_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+ dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+ enable_isolated_metadata = true
+
+ .. end
+
+Return to *Networking controller node configuration*.
diff --git a/doc/install-guide/source/neutron-controller-install-option2-obs.rst b/doc/install-guide/source/neutron-controller-install-option2-obs.rst
new file mode 100644
index 0000000000..4993b7034d
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-option2-obs.rst
@@ -0,0 +1,337 @@
+Networking Option 2: Self-service networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install and configure the Networking components on the *controller* node.
+
+Install the components
+----------------------
+
+
+
+
+.. code-block:: console
+
+ # zypper install --no-recommends openstack-neutron \
+ openstack-neutron-server openstack-neutron-linuxbridge-agent \
+ openstack-neutron-l3-agent openstack-neutron-dhcp-agent \
+ openstack-neutron-metadata-agent bridge-utils
+
+.. end
+
+
+
+Configure the server component
+------------------------------
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
+
+ .. end
+
+ Replace ``NEUTRON_DBPASS`` with the password you chose for the
+ database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
+ plug-in, router service, and overlapping IP addresses:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ core_plugin = ml2
+ service_plugins = router
+ allow_overlapping_ips = true
+
+ .. end
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
+ notify Compute of network topology changes:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ notify_nova_on_port_status_changes = true
+ notify_nova_on_port_data_changes = true
+
+ [nova]
+ # ...
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova``
+ user in the Identity service.
+
+Configure the Modular Layer 2 (ML2) plug-in
+-------------------------------------------
+
+The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
+and switching) virtual networking infrastructure for instances.
+
+* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
+ following actions:
+
+ * In the ``[ml2]`` section, enable flat, VLAN, and VXLAN networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ type_drivers = flat,vlan,vxlan
+
+ .. end
+
+ * In the ``[ml2]`` section, enable VXLAN self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ tenant_network_types = vxlan
+
+ .. end
+
+ * In the ``[ml2]`` section, enable the Linux bridge and layer-2 population
+ mechanisms:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ mechanism_drivers = linuxbridge,l2population
+
+ .. end
+
+ .. warning::
+
+ After you configure the ML2 plug-in, removing values in the
+ ``type_drivers`` option can lead to database inconsistency.
+
+ .. note::
+
+ The Linux bridge agent only supports VXLAN overlay networks.
+
+ * In the ``[ml2]`` section, enable the port security extension driver:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ extension_drivers = port_security
+
+ .. end
+
+ * In the ``[ml2_type_flat]`` section, configure the provider virtual
+ network as a flat network:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_flat]
+ # ...
+ flat_networks = provider
+
+ .. end
+
+ * In the ``[ml2_type_vxlan]`` section, configure the VXLAN network identifier
+ range for self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_vxlan]
+ # ...
+ vni_ranges = 1:1000
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
+ efficiency of security group rules:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_ipset = true
+
+ .. end
+
+Configure the Linux bridge agent
+--------------------------------
+
+The Linux bridge agent builds layer-2 (bridging and switching) virtual
+networking infrastructure for instances and handles security groups.
+
+* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
+ complete the following actions:
+
+ * In the ``[linux_bridge]`` section, map the provider virtual network to the
+ provider physical network interface:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [linux_bridge]
+ physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
+
+ .. end
+
+ Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
+ provider physical network interface. See :ref:`environment-networking`
+ for more information.
+
+ * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the
+ IP address of the physical network interface that handles overlay
+ networks, and enable layer-2 population:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [vxlan]
+ enable_vxlan = true
+ local_ip = OVERLAY_INTERFACE_IP_ADDRESS
+ l2_population = true
+
+ .. end
+
+ Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
+ underlying physical network interface that handles overlay networks. The
+ example architecture uses the management interface to tunnel traffic to
+ the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with
+ the management IP address of the controller node. See
+ :ref:`environment-networking` for more information.
+
+ * In the ``[securitygroup]`` section, enable security groups and
+ configure the Linux bridge :term:`iptables` firewall driver:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_security_group = true
+ firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+ .. end
+
+Configure the layer-3 agent
+---------------------------
+
+The :term:`Layer-3 (L3) agent` provides routing and NAT services for
+self-service virtual networks.
+
+* Edit the ``/etc/neutron/l3_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver
+ and external network bridge:
+
+ .. path /etc/neutron/l3_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+
+ .. end
+
+Configure the DHCP agent
+------------------------
+
+The :term:`DHCP agent` provides DHCP services for virtual networks.
+
+* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
+ Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
+ networks can access metadata over the network:
+
+ .. path /etc/neutron/dhcp_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+ dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+ enable_isolated_metadata = true
+
+ .. end
+
+Return to *Networking controller node configuration*.
diff --git a/doc/install-guide/source/neutron-controller-install-option2-rdo.rst b/doc/install-guide/source/neutron-controller-install-option2-rdo.rst
new file mode 100644
index 0000000000..c966754fb2
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-option2-rdo.rst
@@ -0,0 +1,347 @@
+Networking Option 2: Self-service networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install and configure the Networking components on the *controller* node.
+
+Install the components
+----------------------
+
+
+
+.. code-block:: console
+
+ # yum install openstack-neutron openstack-neutron-ml2 \
+ openstack-neutron-linuxbridge ebtables
+
+.. end
+
+
+
+
+Configure the server component
+------------------------------
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
+
+ .. end
+
+ Replace ``NEUTRON_DBPASS`` with the password you chose for the
+ database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
+ plug-in, router service, and overlapping IP addresses:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ core_plugin = ml2
+ service_plugins = router
+ allow_overlapping_ips = true
+
+ .. end
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
+ notify Compute of network topology changes:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ notify_nova_on_port_status_changes = true
+ notify_nova_on_port_data_changes = true
+
+ [nova]
+ # ...
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova``
+ user in the Identity service.
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/neutron/tmp
+
+ .. end
+
+Configure the Modular Layer 2 (ML2) plug-in
+-------------------------------------------
+
+The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
+and switching) virtual networking infrastructure for instances.
+
+* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
+ following actions:
+
+ * In the ``[ml2]`` section, enable flat, VLAN, and VXLAN networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ type_drivers = flat,vlan,vxlan
+
+ .. end
+
+ * In the ``[ml2]`` section, enable VXLAN self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ tenant_network_types = vxlan
+
+ .. end
+
+ * In the ``[ml2]`` section, enable the Linux bridge and layer-2 population
+ mechanisms:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ mechanism_drivers = linuxbridge,l2population
+
+ .. end
+
+ .. warning::
+
+ After you configure the ML2 plug-in, removing values in the
+ ``type_drivers`` option can lead to database inconsistency.
+
+ .. note::
+
+ The Linux bridge agent only supports VXLAN overlay networks.
+
+ * In the ``[ml2]`` section, enable the port security extension driver:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ extension_drivers = port_security
+
+ .. end
+
+ * In the ``[ml2_type_flat]`` section, configure the provider virtual
+ network as a flat network:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_flat]
+ # ...
+ flat_networks = provider
+
+ .. end
+
+ * In the ``[ml2_type_vxlan]`` section, configure the VXLAN network identifier
+ range for self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_vxlan]
+ # ...
+ vni_ranges = 1:1000
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
+ efficiency of security group rules:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_ipset = true
+
+ .. end
+
+Configure the Linux bridge agent
+--------------------------------
+
+The Linux bridge agent builds layer-2 (bridging and switching) virtual
+networking infrastructure for instances and handles security groups.
+
+* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
+ complete the following actions:
+
+ * In the ``[linux_bridge]`` section, map the provider virtual network to the
+ provider physical network interface:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [linux_bridge]
+ physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
+
+ .. end
+
+ Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
+ provider physical network interface. See :ref:`environment-networking`
+ for more information.
+
+ * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the
+ IP address of the physical network interface that handles overlay
+ networks, and enable layer-2 population:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [vxlan]
+ enable_vxlan = true
+ local_ip = OVERLAY_INTERFACE_IP_ADDRESS
+ l2_population = true
+
+ .. end
+
+ Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
+ underlying physical network interface that handles overlay networks. The
+ example architecture uses the management interface to tunnel traffic to
+ the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with
+ the management IP address of the controller node. See
+ :ref:`environment-networking` for more information.
+
+ * In the ``[securitygroup]`` section, enable security groups and
+ configure the Linux bridge :term:`iptables` firewall driver:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_security_group = true
+ firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+ .. end
+
+Configure the layer-3 agent
+---------------------------
+
+The :term:`Layer-3 (L3) agent` provides routing and NAT services for
+self-service virtual networks.
+
+* Edit the ``/etc/neutron/l3_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver
+ and external network bridge:
+
+ .. path /etc/neutron/l3_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+
+ .. end
+
+Configure the DHCP agent
+------------------------
+
+The :term:`DHCP agent` provides DHCP services for virtual networks.
+
+* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
+ Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
+ networks can access metadata over the network:
+
+ .. path /etc/neutron/dhcp_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+ dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+ enable_isolated_metadata = true
+
+ .. end
+
+Return to *Networking controller node configuration*.
diff --git a/doc/install-guide/source/neutron-controller-install-option2-ubuntu.rst b/doc/install-guide/source/neutron-controller-install-option2-ubuntu.rst
new file mode 100644
index 0000000000..2560e728e4
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-option2-ubuntu.rst
@@ -0,0 +1,336 @@
+Networking Option 2: Self-service networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Install and configure the Networking components on the *controller* node.
+
+Install the components
+----------------------
+
+
+.. code-block:: console
+
+ # apt install neutron-server neutron-plugin-ml2 \
+ neutron-linuxbridge-agent neutron-l3-agent neutron-dhcp-agent \
+ neutron-metadata-agent
+
+.. end
+
+
+
+
+
+Configure the server component
+------------------------------
+
+* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [database]
+ # ...
+ connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
+
+ .. end
+
+ Replace ``NEUTRON_DBPASS`` with the password you chose for the
+ database.
+
+ .. note::
+
+ Comment out or remove any other ``connection`` options in the
+ ``[database]`` section.
+
+ * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
+ plug-in, router service, and overlapping IP addresses:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ core_plugin = ml2
+ service_plugins = router
+ allow_overlapping_ips = true
+
+ .. end
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in RabbitMQ.
+
+ * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
+ Identity service access:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
+ notify Compute of network topology changes:
+
+ .. path /etc/neutron/neutron.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ notify_nova_on_port_status_changes = true
+ notify_nova_on_port_data_changes = true
+
+ [nova]
+ # ...
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the ``nova``
+ user in the Identity service.
+
+Configure the Modular Layer 2 (ML2) plug-in
+-------------------------------------------
+
+The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
+and switching) virtual networking infrastructure for instances.
+
+* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
+ following actions:
+
+ * In the ``[ml2]`` section, enable flat, VLAN, and VXLAN networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ type_drivers = flat,vlan,vxlan
+
+ .. end
+
+ * In the ``[ml2]`` section, enable VXLAN self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ tenant_network_types = vxlan
+
+ .. end
+
+ * In the ``[ml2]`` section, enable the Linux bridge and layer-2 population
+ mechanisms:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ mechanism_drivers = linuxbridge,l2population
+
+ .. end
+
+ .. warning::
+
+ After you configure the ML2 plug-in, removing values in the
+ ``type_drivers`` option can lead to database inconsistency.
+
+ .. note::
+
+ The Linux bridge agent only supports VXLAN overlay networks.
+
+ * In the ``[ml2]`` section, enable the port security extension driver:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2]
+ # ...
+ extension_drivers = port_security
+
+ .. end
+
+ * In the ``[ml2_type_flat]`` section, configure the provider virtual
+ network as a flat network:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_flat]
+ # ...
+ flat_networks = provider
+
+ .. end
+
+ * In the ``[ml2_type_vxlan]`` section, configure the VXLAN network identifier
+ range for self-service networks:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [ml2_type_vxlan]
+ # ...
+ vni_ranges = 1:1000
+
+ .. end
+
+ * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
+ efficiency of security group rules:
+
+ .. path /etc/neutron/plugins/ml2/ml2_conf.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_ipset = true
+
+ .. end
+
+Configure the Linux bridge agent
+--------------------------------
+
+The Linux bridge agent builds layer-2 (bridging and switching) virtual
+networking infrastructure for instances and handles security groups.
+
+* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
+ complete the following actions:
+
+ * In the ``[linux_bridge]`` section, map the provider virtual network to the
+ provider physical network interface:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [linux_bridge]
+ physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
+
+ .. end
+
+ Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
+ provider physical network interface. See :ref:`environment-networking`
+ for more information.
+
+ * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the
+ IP address of the physical network interface that handles overlay
+ networks, and enable layer-2 population:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [vxlan]
+ enable_vxlan = true
+ local_ip = OVERLAY_INTERFACE_IP_ADDRESS
+ l2_population = true
+
+ .. end
+
+ Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
+ underlying physical network interface that handles overlay networks. The
+ example architecture uses the management interface to tunnel traffic to
+ the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with
+ the management IP address of the controller node. See
+ :ref:`environment-networking` for more information.
+
+ * In the ``[securitygroup]`` section, enable security groups and
+ configure the Linux bridge :term:`iptables` firewall driver:
+
+ .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
+ .. code-block:: ini
+
+ [securitygroup]
+ # ...
+ enable_security_group = true
+ firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+
+ .. end
+
+Configure the layer-3 agent
+---------------------------
+
+The :term:`Layer-3 (L3) agent` provides routing and NAT services for
+self-service virtual networks.
+
+* Edit the ``/etc/neutron/l3_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver
+ and external network bridge:
+
+ .. path /etc/neutron/l3_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+
+ .. end
+
+Configure the DHCP agent
+------------------------
+
+The :term:`DHCP agent` provides DHCP services for virtual networks.
+
+* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
+ Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
+ networks can access metadata over the network:
+
+ .. path /etc/neutron/dhcp_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ interface_driver = linuxbridge
+ dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+ enable_isolated_metadata = true
+
+ .. end
+
+Return to *Networking controller node configuration*.
diff --git a/doc/install-guide/source/neutron-controller-install-option2.rst b/doc/install-guide/source/neutron-controller-install-option2.rst
index 16656eccdb..c588305cc3 100644
--- a/doc/install-guide/source/neutron-controller-install-option2.rst
+++ b/doc/install-guide/source/neutron-controller-install-option2.rst
@@ -3,383 +3,7 @@ Networking Option 2: Self-service networks
Install and configure the Networking components on the *controller* node.
-Install the components
-----------------------
+.. toctree::
+ :glob:
-.. only:: ubuntu
-
- .. code-block:: console
-
- # apt install neutron-server neutron-plugin-ml2 \
- neutron-linuxbridge-agent neutron-l3-agent neutron-dhcp-agent \
- neutron-metadata-agent
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- .. code-block:: console
-
- # yum install openstack-neutron openstack-neutron-ml2 \
- openstack-neutron-linuxbridge ebtables
-
- .. end
-
-.. endonly
-
-.. only:: obs
-
- .. code-block:: console
-
- # zypper install --no-recommends openstack-neutron \
- openstack-neutron-server openstack-neutron-linuxbridge-agent \
- openstack-neutron-l3-agent openstack-neutron-dhcp-agent \
- openstack-neutron-metadata-agent bridge-utils
-
- .. end
-
-.. endonly
-
-.. only:: debian
-
- #. .. code-block:: console
-
- # apt install neutron-server neutron-linuxbridge-agent \
- neutron-dhcp-agent neutron-metadata-agent neutron-l3-agent
-
- .. end
-
-.. endonly
-
-Configure the server component
-------------------------------
-
-* Edit the ``/etc/neutron/neutron.conf`` file and complete the following
- actions:
-
- * In the ``[database]`` section, configure database access:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [database]
- # ...
- connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
-
- .. end
-
- Replace ``NEUTRON_DBPASS`` with the password you chose for the
- database.
-
- .. note::
-
- Comment out or remove any other ``connection`` options in the
- ``[database]`` section.
-
- * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2)
- plug-in, router service, and overlapping IP addresses:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- core_plugin = ml2
- service_plugins = router
- allow_overlapping_ips = true
-
- .. end
-
- * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
- message queue access:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- transport_url = rabbit://openstack:RABBIT_PASS@controller
-
- .. end
-
- Replace ``RABBIT_PASS`` with the password you chose for the
- ``openstack`` account in RabbitMQ.
-
- * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure
- Identity service access:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- auth_strategy = keystone
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = neutron
- password = NEUTRON_PASS
-
- .. end
-
- Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
- user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
- * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to
- notify Compute of network topology changes:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- notify_nova_on_port_status_changes = true
- notify_nova_on_port_data_changes = true
-
- [nova]
- # ...
- auth_url = http://controller:35357
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- region_name = RegionOne
- project_name = service
- username = nova
- password = NOVA_PASS
-
- .. end
-
- Replace ``NOVA_PASS`` with the password you chose for the ``nova``
- user in the Identity service.
-
- .. only:: rdo
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/neutron/neutron.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/lib/neutron/tmp
-
- .. end
-
-Configure the Modular Layer 2 (ML2) plug-in
--------------------------------------------
-
-The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging
-and switching) virtual networking infrastructure for instances.
-
-* Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the
- following actions:
-
- * In the ``[ml2]`` section, enable flat, VLAN, and VXLAN networks:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2]
- # ...
- type_drivers = flat,vlan,vxlan
-
- .. end
-
- * In the ``[ml2]`` section, enable VXLAN self-service networks:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2]
- # ...
- tenant_network_types = vxlan
-
- .. end
-
- * In the ``[ml2]`` section, enable the Linux bridge and layer-2 population
- mechanisms:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2]
- # ...
- mechanism_drivers = linuxbridge,l2population
-
- .. end
-
- .. warning::
-
- After you configure the ML2 plug-in, removing values in the
- ``type_drivers`` option can lead to database inconsistency.
-
- .. note::
-
- The Linux bridge agent only supports VXLAN overlay networks.
-
- * In the ``[ml2]`` section, enable the port security extension driver:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2]
- # ...
- extension_drivers = port_security
-
- .. end
-
- * In the ``[ml2_type_flat]`` section, configure the provider virtual
- network as a flat network:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2_type_flat]
- # ...
- flat_networks = provider
-
- .. end
-
- * In the ``[ml2_type_vxlan]`` section, configure the VXLAN network identifier
- range for self-service networks:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [ml2_type_vxlan]
- # ...
- vni_ranges = 1:1000
-
- .. end
-
- * In the ``[securitygroup]`` section, enable :term:`ipset` to increase
- efficiency of security group rules:
-
- .. path /etc/neutron/plugins/ml2/ml2_conf.ini
- .. code-block:: ini
-
- [securitygroup]
- # ...
- enable_ipset = true
-
- .. end
-
-Configure the Linux bridge agent
---------------------------------
-
-The Linux bridge agent builds layer-2 (bridging and switching) virtual
-networking infrastructure for instances and handles security groups.
-
-* Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and
- complete the following actions:
-
- * In the ``[linux_bridge]`` section, map the provider virtual network to the
- provider physical network interface:
-
- .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
- .. code-block:: ini
-
- [linux_bridge]
- physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
-
- .. end
-
- Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
- provider physical network interface. See :ref:`environment-networking`
- for more information.
-
- * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the
- IP address of the physical network interface that handles overlay
- networks, and enable layer-2 population:
-
- .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
- .. code-block:: ini
-
- [vxlan]
- enable_vxlan = true
- local_ip = OVERLAY_INTERFACE_IP_ADDRESS
- l2_population = true
-
- .. end
-
- Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
- underlying physical network interface that handles overlay networks. The
- example architecture uses the management interface to tunnel traffic to
- the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with
- the management IP address of the controller node. See
- :ref:`environment-networking` for more information.
-
- * In the ``[securitygroup]`` section, enable security groups and
- configure the Linux bridge :term:`iptables` firewall driver:
-
- .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini
- .. code-block:: ini
-
- [securitygroup]
- # ...
- enable_security_group = true
- firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
-
- .. end
-
-Configure the layer-3 agent
----------------------------
-
-The :term:`Layer-3 (L3) agent` provides routing and NAT services for
-self-service virtual networks.
-
-* Edit the ``/etc/neutron/l3_agent.ini`` file and complete the following
- actions:
-
- * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver
- and external network bridge:
-
- .. path /etc/neutron/l3_agent.ini
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- interface_driver = linuxbridge
-
- .. end
-
-Configure the DHCP agent
-------------------------
-
-The :term:`DHCP agent` provides DHCP services for virtual networks.
-
-* Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following
- actions:
-
- * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver,
- Dnsmasq DHCP driver, and enable isolated metadata so instances on provider
- networks can access metadata over the network:
-
- .. path /etc/neutron/dhcp_agent.ini
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- interface_driver = linuxbridge
- dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
- enable_isolated_metadata = true
-
- .. end
-
-Return to
-:ref:`Networking controller node configuration
-`.
+ neutron-controller-install-option2-*
diff --git a/doc/install-guide/source/neutron-controller-install-rdo.rst b/doc/install-guide/source/neutron-controller-install-rdo.rst
new file mode 100644
index 0000000000..5363a7d71c
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-rdo.rst
@@ -0,0 +1,329 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Prerequisites
+-------------
+
+Before you configure the OpenStack Networking (neutron) service, you
+must create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``neutron`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)] CREATE DATABASE neutron;
+
+ .. end
+
+ * Grant proper access to the ``neutron`` database, replacing
+ ``NEUTRON_DBPASS`` with a suitable password:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
+ IDENTIFIED BY 'NEUTRON_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
+ IDENTIFIED BY 'NEUTRON_DBPASS';
+
+ .. end
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to admin-only CLI
+ commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``neutron`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt neutron
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | fdb0f541e28141719b6a43c8944bf1fb |
+ | name | neutron |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``neutron`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user neutron admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``neutron`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name neutron \
+ --description "OpenStack Networking" network
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Networking |
+ | enabled | True |
+ | id | f71529314dab4a4d8eca427e701d209e |
+ | name | neutron |
+ | type | network |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Networking service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ network public http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 85d80a6d02fc4b7683f611d7fc1493a3 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ network internal http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 09753b537ac74422a68d2d791cf3714f |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ network admin http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 1ee14289c9374dffb5db92a5c112fc4e |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ .. end
+
+Configure networking options
+----------------------------
+
+You can deploy the Networking service using one of two architectures
+represented by options 1 and 2.
+
+Option 1 deploys the simplest possible architecture that only supports
+attaching instances to provider (external) networks. No self-service (private)
+networks, routers, or floating IP addresses. Only the ``admin`` or other
+privileged user can manage provider networks.
+
+Option 2 augments option 1 with layer-3 services that support attaching
+instances to self-service networks. The ``demo`` or other unprivileged
+user can manage self-service networks including routers that provide
+connectivity between self-service and provider networks. Additionally,
+floating IP addresses provide connectivity to instances using self-service
+networks from external networks such as the Internet.
+
+Self-service networks typically use overlay networks. Overlay network
+protocols such as VXLAN include additional headers that increase overhead
+and decrease space available for the payload or user data. Without knowledge
+of the virtual network infrastructure, instances attempt to send packets
+using the default Ethernet :term:`maximum transmission unit (MTU)` of 1500
+bytes. The Networking service automatically provides the correct MTU value
+to instances via DHCP. However, some cloud images do not use DHCP or ignore
+the DHCP MTU option and require configuration using metadata or a script.
+
+.. note::
+
+ Option 2 also supports attaching instances to provider networks.
+
+Choose one of the following networking options to configure services
+specific to it. Afterwards, return here and proceed to
+:ref:`neutron-controller-metadata-agent-rdo`.
+
+.. toctree::
+ :maxdepth: 1
+
+ neutron-controller-install-option1.rst
+ neutron-controller-install-option2.rst
+
+.. _neutron-controller-metadata-agent-rdo:
+
+Configure the metadata agent
+----------------------------
+
+The :term:`metadata agent ` provides configuration information
+such as credentials to instances.
+
+* Edit the ``/etc/neutron/metadata_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the metadata host and shared
+ secret:
+
+ .. path /etc/neutron/metadata_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ nova_metadata_ip = controller
+ metadata_proxy_shared_secret = METADATA_SECRET
+
+ .. end
+
+ Replace ``METADATA_SECRET`` with a suitable secret for the metadata proxy.
+
+Configure the Compute service to use the Networking service
+-----------------------------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and perform the following actions:
+
+ * In the ``[neutron]`` section, configure access parameters, enable the
+ metadata proxy, and configure the secret:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [neutron]
+ # ...
+ url = http://controller:9696
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+ service_metadata_proxy = true
+ metadata_proxy_shared_secret = METADATA_SECRET
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ Replace ``METADATA_SECRET`` with the secret you chose for the metadata
+ proxy.
+
+Finalize installation
+---------------------
+
+
+#. The Networking service initialization scripts expect a symbolic link
+ ``/etc/neutron/plugin.ini`` pointing to the ML2 plug-in configuration
+ file, ``/etc/neutron/plugins/ml2/ml2_conf.ini``. If this symbolic
+ link does not exist, create it using the following command:
+
+ .. code-block:: console
+
+ # ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
+
+ .. end
+
+#. Populate the database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
+ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
+
+ .. end
+
+ .. note::
+
+ Database population occurs later for Networking because the script
+ requires complete server and plug-in configuration files.
+
+#. Restart the Compute API service:
+
+ .. code-block:: console
+
+ # systemctl restart openstack-nova-api.service
+
+ .. end
+
+#. Start the Networking services and configure them to start when the system
+ boots.
+
+ For both networking options:
+
+ .. code-block:: console
+
+ # systemctl enable neutron-server.service \
+ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
+ neutron-metadata-agent.service
+ # systemctl start neutron-server.service \
+ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
+ neutron-metadata-agent.service
+
+ .. end
+
+ For networking option 2, also enable and start the layer-3 service:
+
+ .. code-block:: console
+
+ # systemctl enable neutron-l3-agent.service
+ # systemctl start neutron-l3-agent.service
+
+ .. end
+
+
+
diff --git a/doc/install-guide/source/neutron-controller-install-ubuntu.rst b/doc/install-guide/source/neutron-controller-install-ubuntu.rst
new file mode 100644
index 0000000000..a939a69bac
--- /dev/null
+++ b/doc/install-guide/source/neutron-controller-install-ubuntu.rst
@@ -0,0 +1,314 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Prerequisites
+-------------
+
+Before you configure the OpenStack Networking (neutron) service, you
+must create a database, service credentials, and API endpoints.
+
+#. To create the database, complete these steps:
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ # mysql
+
+ .. end
+
+
+
+ * Create the ``neutron`` database:
+
+ .. code-block:: console
+
+ MariaDB [(none)] CREATE DATABASE neutron;
+
+ .. end
+
+ * Grant proper access to the ``neutron`` database, replacing
+ ``NEUTRON_DBPASS`` with a suitable password:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
+ IDENTIFIED BY 'NEUTRON_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
+ IDENTIFIED BY 'NEUTRON_DBPASS';
+
+ .. end
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to admin-only CLI
+ commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``neutron`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt neutron
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | fdb0f541e28141719b6a43c8944bf1fb |
+ | name | neutron |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``neutron`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user neutron admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``neutron`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name neutron \
+ --description "OpenStack Networking" network
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Networking |
+ | enabled | True |
+ | id | f71529314dab4a4d8eca427e701d209e |
+ | name | neutron |
+ | type | network |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Networking service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ network public http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 85d80a6d02fc4b7683f611d7fc1493a3 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ network internal http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 09753b537ac74422a68d2d791cf3714f |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ network admin http://controller:9696
+
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 1ee14289c9374dffb5db92a5c112fc4e |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | f71529314dab4a4d8eca427e701d209e |
+ | service_name | neutron |
+ | service_type | network |
+ | url | http://controller:9696 |
+ +--------------+----------------------------------+
+
+ .. end
+
+Configure networking options
+----------------------------
+
+You can deploy the Networking service using one of two architectures
+represented by options 1 and 2.
+
+Option 1 deploys the simplest possible architecture that only supports
+attaching instances to provider (external) networks. No self-service (private)
+networks, routers, or floating IP addresses. Only the ``admin`` or other
+privileged user can manage provider networks.
+
+Option 2 augments option 1 with layer-3 services that support attaching
+instances to self-service networks. The ``demo`` or other unprivileged
+user can manage self-service networks including routers that provide
+connectivity between self-service and provider networks. Additionally,
+floating IP addresses provide connectivity to instances using self-service
+networks from external networks such as the Internet.
+
+Self-service networks typically use overlay networks. Overlay network
+protocols such as VXLAN include additional headers that increase overhead
+and decrease space available for the payload or user data. Without knowledge
+of the virtual network infrastructure, instances attempt to send packets
+using the default Ethernet :term:`maximum transmission unit (MTU)` of 1500
+bytes. The Networking service automatically provides the correct MTU value
+to instances via DHCP. However, some cloud images do not use DHCP or ignore
+the DHCP MTU option and require configuration using metadata or a script.
+
+.. note::
+
+ Option 2 also supports attaching instances to provider networks.
+
+Choose one of the following networking options to configure services
+specific to it. Afterwards, return here and proceed to
+:ref:`neutron-controller-metadata-agent-ubuntu`.
+
+.. toctree::
+ :maxdepth: 1
+
+ neutron-controller-install-option1.rst
+ neutron-controller-install-option2.rst
+
+.. _neutron-controller-metadata-agent-ubuntu:
+
+Configure the metadata agent
+----------------------------
+
+The :term:`metadata agent ` provides configuration information
+such as credentials to instances.
+
+* Edit the ``/etc/neutron/metadata_agent.ini`` file and complete the following
+ actions:
+
+ * In the ``[DEFAULT]`` section, configure the metadata host and shared
+ secret:
+
+ .. path /etc/neutron/metadata_agent.ini
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ nova_metadata_ip = controller
+ metadata_proxy_shared_secret = METADATA_SECRET
+
+ .. end
+
+ Replace ``METADATA_SECRET`` with a suitable secret for the metadata proxy.
+
+Configure the Compute service to use the Networking service
+-----------------------------------------------------------
+
+* Edit the ``/etc/nova/nova.conf`` file and perform the following actions:
+
+ * In the ``[neutron]`` section, configure access parameters, enable the
+ metadata proxy, and configure the secret:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [neutron]
+ # ...
+ url = http://controller:9696
+ auth_url = http://controller:35357
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ region_name = RegionOne
+ project_name = service
+ username = neutron
+ password = NEUTRON_PASS
+ service_metadata_proxy = true
+ metadata_proxy_shared_secret = METADATA_SECRET
+
+ .. end
+
+ Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
+ user in the Identity service.
+
+ Replace ``METADATA_SECRET`` with the secret you chose for the metadata
+ proxy.
+
+Finalize installation
+---------------------
+
+
+
+
+#. Populate the database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
+ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
+
+ .. end
+
+ .. note::
+
+ Database population occurs later for Networking because the script
+ requires complete server and plug-in configuration files.
+
+#. Restart the Compute API service:
+
+ .. code-block:: console
+
+ # service nova-api restart
+
+ .. end
+
+#. Restart the Networking services.
+
+ For both networking options:
+
+ .. code-block:: console
+
+ # service neutron-server restart
+ # service neutron-linuxbridge-agent restart
+ # service neutron-dhcp-agent restart
+ # service neutron-metadata-agent restart
+
+ .. end
+
+ For networking option 2, also restart the layer-3 service:
+
+ .. code-block:: console
+
+ # service neutron-l3-agent restart
+
+ .. end
+
diff --git a/doc/install-guide/source/neutron-controller-install.rst b/doc/install-guide/source/neutron-controller-install.rst
index aef968cdb6..38d077cfe2 100644
--- a/doc/install-guide/source/neutron-controller-install.rst
+++ b/doc/install-guide/source/neutron-controller-install.rst
@@ -1,442 +1,9 @@
Install and configure controller node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Prerequisites
--------------
-
-Before you configure the OpenStack Networking (neutron) service, you
-must create a database, service credentials, and API endpoints.
-
-#. To create the database, complete these steps:
-
- .. only:: ubuntu
-
- * Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- # mysql
-
- .. end
-
- .. endonly
-
- .. only:: rdo or debian or obs
-
- * Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- $ mysql -u root -p
-
- .. end
-
- .. endonly
-
- * Create the ``neutron`` database:
-
- .. code-block:: console
-
- MariaDB [(none)] CREATE DATABASE neutron;
-
- .. end
-
- * Grant proper access to the ``neutron`` database, replacing
- ``NEUTRON_DBPASS`` with a suitable password:
-
- .. code-block:: console
-
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
- IDENTIFIED BY 'NEUTRON_DBPASS';
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
- IDENTIFIED BY 'NEUTRON_DBPASS';
-
- .. end
-
- * Exit the database access client.
-
-#. Source the ``admin`` credentials to gain access to admin-only CLI
- commands:
-
- .. code-block:: console
-
- $ . admin-openrc
-
- .. end
-
-#. To create the service credentials, complete these steps:
-
- * Create the ``neutron`` user:
-
- .. code-block:: console
-
- $ openstack user create --domain default --password-prompt neutron
-
- User Password:
- Repeat User Password:
- +---------------------+----------------------------------+
- | Field | Value |
- +---------------------+----------------------------------+
- | domain_id | default |
- | enabled | True |
- | id | fdb0f541e28141719b6a43c8944bf1fb |
- | name | neutron |
- | options | {} |
- | password_expires_at | None |
- +---------------------+----------------------------------+
-
- .. end
-
- * Add the ``admin`` role to the ``neutron`` user:
-
- .. code-block:: console
-
- $ openstack role add --project service --user neutron admin
-
- .. end
-
- .. note::
-
- This command provides no output.
-
- * Create the ``neutron`` service entity:
-
- .. code-block:: console
-
- $ openstack service create --name neutron \
- --description "OpenStack Networking" network
-
- +-------------+----------------------------------+
- | Field | Value |
- +-------------+----------------------------------+
- | description | OpenStack Networking |
- | enabled | True |
- | id | f71529314dab4a4d8eca427e701d209e |
- | name | neutron |
- | type | network |
- +-------------+----------------------------------+
-
- .. end
-
-#. Create the Networking service API endpoints:
-
- .. code-block:: console
-
- $ openstack endpoint create --region RegionOne \
- network public http://controller:9696
-
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | 85d80a6d02fc4b7683f611d7fc1493a3 |
- | interface | public |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | f71529314dab4a4d8eca427e701d209e |
- | service_name | neutron |
- | service_type | network |
- | url | http://controller:9696 |
- +--------------+----------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- network internal http://controller:9696
-
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | 09753b537ac74422a68d2d791cf3714f |
- | interface | internal |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | f71529314dab4a4d8eca427e701d209e |
- | service_name | neutron |
- | service_type | network |
- | url | http://controller:9696 |
- +--------------+----------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- network admin http://controller:9696
-
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | 1ee14289c9374dffb5db92a5c112fc4e |
- | interface | admin |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | f71529314dab4a4d8eca427e701d209e |
- | service_name | neutron |
- | service_type | network |
- | url | http://controller:9696 |
- +--------------+----------------------------------+
-
- .. end
-
-Configure networking options
-----------------------------
-
-You can deploy the Networking service using one of two architectures
-represented by options 1 and 2.
-
-Option 1 deploys the simplest possible architecture that only supports
-attaching instances to provider (external) networks. No self-service (private)
-networks, routers, or floating IP addresses. Only the ``admin`` or other
-privileged user can manage provider networks.
-
-Option 2 augments option 1 with layer-3 services that support attaching
-instances to self-service networks. The ``demo`` or other unprivileged
-user can manage self-service networks including routers that provide
-connectivity between self-service and provider networks. Additionally,
-floating IP addresses provide connectivity to instances using self-service
-networks from external networks such as the Internet.
-
-Self-service networks typically use overlay networks. Overlay network
-protocols such as VXLAN include additional headers that increase overhead
-and decrease space available for the payload or user data. Without knowledge
-of the virtual network infrastructure, instances attempt to send packets
-using the default Ethernet :term:`maximum transmission unit (MTU)` of 1500
-bytes. The Networking service automatically provides the correct MTU value
-to instances via DHCP. However, some cloud images do not use DHCP or ignore
-the DHCP MTU option and require configuration using metadata or a script.
-
-.. note::
-
- Option 2 also supports attaching instances to provider networks.
-
-Choose one of the following networking options to configure services
-specific to it. Afterwards, return here and proceed to
-:ref:`neutron-controller-metadata-agent`.
-
.. toctree::
- :maxdepth: 1
- neutron-controller-install-option1.rst
- neutron-controller-install-option2.rst
-
-.. _neutron-controller-metadata-agent:
-
-Configure the metadata agent
-----------------------------
-
-The :term:`metadata agent ` provides configuration information
-such as credentials to instances.
-
-* Edit the ``/etc/neutron/metadata_agent.ini`` file and complete the following
- actions:
-
- * In the ``[DEFAULT]`` section, configure the metadata host and shared
- secret:
-
- .. path /etc/neutron/metadata_agent.ini
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- nova_metadata_ip = controller
- metadata_proxy_shared_secret = METADATA_SECRET
-
- .. end
-
- Replace ``METADATA_SECRET`` with a suitable secret for the metadata proxy.
-
-Configure the Compute service to use the Networking service
------------------------------------------------------------
-
-* Edit the ``/etc/nova/nova.conf`` file and perform the following actions:
-
- * In the ``[neutron]`` section, configure access parameters, enable the
- metadata proxy, and configure the secret:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [neutron]
- # ...
- url = http://controller:9696
- auth_url = http://controller:35357
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- region_name = RegionOne
- project_name = service
- username = neutron
- password = NEUTRON_PASS
- service_metadata_proxy = true
- metadata_proxy_shared_secret = METADATA_SECRET
-
- .. end
-
- Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron``
- user in the Identity service.
-
- Replace ``METADATA_SECRET`` with the secret you chose for the metadata
- proxy.
-
-Finalize installation
----------------------
-
-.. only:: rdo
-
- #. The Networking service initialization scripts expect a symbolic link
- ``/etc/neutron/plugin.ini`` pointing to the ML2 plug-in configuration
- file, ``/etc/neutron/plugins/ml2/ml2_conf.ini``. If this symbolic
- link does not exist, create it using the following command:
-
- .. code-block:: console
-
- # ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
-
- .. end
-
- #. Populate the database:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
- --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
-
- .. end
-
- .. note::
-
- Database population occurs later for Networking because the script
- requires complete server and plug-in configuration files.
-
- #. Restart the Compute API service:
-
- .. code-block:: console
-
- # systemctl restart openstack-nova-api.service
-
- .. end
-
- #. Start the Networking services and configure them to start when the system
- boots.
-
- For both networking options:
-
- .. code-block:: console
-
- # systemctl enable neutron-server.service \
- neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
- neutron-metadata-agent.service
- # systemctl start neutron-server.service \
- neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
- neutron-metadata-agent.service
-
- .. end
-
- For networking option 2, also enable and start the layer-3 service:
-
- .. code-block:: console
-
- # systemctl enable neutron-l3-agent.service
- # systemctl start neutron-l3-agent.service
-
- .. end
-
-.. endonly
-
-.. only:: obs
-
- .. note::
-
- SLES enables apparmor by default and restricts dnsmasq. You need to
- either completely disable apparmor or disable only the dnsmasq
- profile:
-
- .. code-block:: console
-
- # ln -s /etc/apparmor.d/usr.sbin.dnsmasq /etc/apparmor.d/disable/
- # systemctl restart apparmor
-
- .. end
-
- #. Restart the Compute API service:
-
- .. code-block:: console
-
- # systemctl restart openstack-nova-api.service
-
- .. end
-
- #. Start the Networking services and configure them to start when the system
- boots.
-
- For both networking options:
-
- .. code-block:: console
-
- # systemctl enable openstack-neutron.service \
- openstack-neutron-linuxbridge-agent.service \
- openstack-neutron-dhcp-agent.service \
- openstack-neutron-metadata-agent.service
- # systemctl start openstack-neutron.service \
- openstack-neutron-linuxbridge-agent.service \
- openstack-neutron-dhcp-agent.service \
- openstack-neutron-metadata-agent.service
-
- .. end
-
- For networking option 2, also enable and start the layer-3 service:
-
- .. code-block:: console
-
- # systemctl enable openstack-neutron-l3-agent.service
- # systemctl start openstack-neutron-l3-agent.service
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Populate the database:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
- --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
-
- .. end
-
- .. note::
-
- Database population occurs later for Networking because the script
- requires complete server and plug-in configuration files.
-
- #. Restart the Compute API service:
-
- .. code-block:: console
-
- # service nova-api restart
-
- .. end
-
- #. Restart the Networking services.
-
- For both networking options:
-
- .. code-block:: console
-
- # service neutron-server restart
- # service neutron-linuxbridge-agent restart
- # service neutron-dhcp-agent restart
- # service neutron-metadata-agent restart
-
- .. end
-
- For networking option 2, also restart the layer-3 service:
-
- .. code-block:: console
-
- # service neutron-l3-agent restart
-
- .. end
-
-.. endonly
+ neutron-controller-install-debian
+ neutron-controller-install-obs
+ neutron-controller-install-rdo
+ neutron-controller-install-ubuntu
diff --git a/doc/install-guide/source/nova-compute-install-debian.rst b/doc/install-guide/source/nova-compute-install-debian.rst
new file mode 100644
index 0000000000..fcd6a10dca
--- /dev/null
+++ b/doc/install-guide/source/nova-compute-install-debian.rst
@@ -0,0 +1,298 @@
+Install and configure a compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Compute
+service on a compute node. The service supports several
+:term:`hypervisors ` to deploy :term:`instances `
+or :term:`VMs `. For simplicity, this configuration
+uses the :term:`QEMU ` hypervisor with the
+:term:`KVM ` extension
+on compute nodes that support hardware acceleration for virtual machines.
+On legacy hardware, this configuration uses the generic QEMU hypervisor.
+You can follow these instructions with minor modifications to horizontally
+scale your environment with additional compute nodes.
+
+.. note::
+
+ This section assumes that you are following the instructions in
+ this guide step-by-step to configure the first compute node. If you
+ want to configure additional compute nodes, prepare them in a similar
+ fashion to the first compute node in the :ref:`example architectures
+ ` section. Each additional compute node
+ requires a unique IP address.
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install nova-compute
+
+ .. end
+
+
+
+Respond to prompts for debconf.
+
+.. :doc:`database management `,
+ :doc:`Identity service credentials `,
+ and :doc:`message broker credentials `. Make
+ sure that you do not activate database management handling by debconf,
+ as a compute node should not access the central database.
+
+
+2. Edit the ``/etc/nova/nova.conf`` file and
+ complete the following actions:
+
+
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for
+ the ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[api]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the
+ ``nova`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+* In the ``[DEFAULT]`` section, check that the ``my_ip`` option
+ is correctly set (this value is handled by the config and postinst
+ scripts of the ``nova-common`` package using debconf):
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
+
+ .. end
+
+ Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
+ of the management network interface on your compute node,
+ typically 10.0.0.31 for the first node in the
+ :ref:`example architecture `.
+
+
+
+ * In the ``[vnc]`` section, enable and configure remote console access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [vnc]
+ # ...
+ enabled = True
+ vncserver_listen = 0.0.0.0
+ vncserver_proxyclient_address = $my_ip
+ novncproxy_base_url = http://controller:6080/vnc_auto.html
+
+ .. end
+
+ The server component listens on all IP addresses and the proxy
+ component only listens on the management interface IP address of
+ the compute node. The base URL indicates the location where you
+ can use a web browser to access remote consoles of instances
+ on this compute node.
+
+ .. note::
+
+ If the web browser to access remote consoles resides on
+ a host that cannot resolve the ``controller`` hostname,
+ you must replace ``controller`` with the management
+ interface IP address of the controller node.
+
+ * In the ``[glance]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [glance]
+ # ...
+ api_servers = http://controller:9292
+
+ .. end
+
+
+
+
+
+ * In the ``[placement]`` section, configure the Placement API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [placement]
+ # ...
+ os_region_name = RegionOne
+ project_domain_name = Default
+ project_name = service
+ auth_type = password
+ user_domain_name = Default
+ auth_url = http://controller:35357/v3
+ username = placement
+ password = PLACEMENT_PASS
+
+ Replace ``PLACEMENT_PASS`` with the password you choose for the
+ ``placement`` user in the Identity service. Comment out any other options
+ in the ``[placement]`` section.
+
+
+3. Ensure the kernel module ``nbd`` is loaded.
+
+ .. code-block:: console
+
+ # modprobe nbd
+
+ .. end
+
+4. Ensure the module loads on every boot by adding ``nbd``
+ to the ``/etc/modules-load.d/nbd.conf`` file.
+
+
+Finalize installation
+---------------------
+
+#. Determine whether your compute node supports hardware acceleration
+ for virtual machines:
+
+ .. code-block:: console
+
+ $ egrep -c '(vmx|svm)' /proc/cpuinfo
+
+ .. end
+
+ If this command returns a value of ``one or greater``, your compute
+ node supports hardware acceleration which typically requires no
+ additional configuration.
+
+ If this command returns a value of ``zero``, your compute node does
+ not support hardware acceleration and you must configure ``libvirt``
+ to use QEMU instead of KVM.
+
+
+
+
+* Replace the ``nova-compute-kvm`` package with ``nova-compute-qemu``
+ which automatically changes the ``/etc/nova/nova-compute.conf``
+ file and installs the necessary dependencies:
+
+ .. code-block:: console
+
+ # apt install nova-compute-qemu
+
+ .. end
+
+
+
+
+2. Restart the Compute service:
+
+ .. code-block:: console
+
+ # service nova-compute restart
+
+ .. end
+
+
+.. note::
+
+ If the ``nova-compute`` service fails to start, check
+ ``/var/log/nova/nova-compute.log``. The error message
+ ``AMQP server on controller:5672 is unreachable`` likely indicates that
+ the firewall on the controller node is preventing access to port 5672.
+ Configure the firewall to open port 5672 on the controller node and
+ restart ``nova-compute`` service on the compute node.
+
+Add the compute node to the cell database
+-----------------------------------------
+
+.. important::
+
+ Run the following commands on the **controller** node.
+
+#. Source the admin credentials to enable admin-only CLI commands, then
+ confirm there are compute hosts in the database:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ $ openstack compute service list --service nova-compute
+ +----+-------+--------------+------+-------+---------+----------------------------+
+ | ID | Host | Binary | Zone | State | Status | Updated At |
+ +----+-------+--------------+------+-------+---------+----------------------------+
+ | 1 | node1 | nova-compute | nova | up | enabled | 2017-04-14T15:30:44.000000 |
+ +----+-------+--------------+------+-------+---------+----------------------------+
+
+#. Discover compute hosts:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
+
+ Found 2 cell mappings.
+ Skipping cell0 since it does not contain hosts.
+ Getting compute nodes from cell 'cell1': ad5a5985-a719-4567-98d8-8d148aaae4bc
+ Found 1 computes in cell: ad5a5985-a719-4567-98d8-8d148aaae4bc
+ Checking host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
+ Creating host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
+
+ .. note::
+
+ When you add new compute nodes, you must run ``nova-manage cell_v2
+ discover_hosts`` on the controller node to register those new compute
+ nodes. Alternatively, you can set an appropriate interval in
+ ``/etc/nova/nova.conf``:
+
+ .. code-block:: ini
+
+ [scheduler]
+ discover_hosts_in_cells_interval = 300
diff --git a/doc/install-guide/source/nova-compute-install-obs.rst b/doc/install-guide/source/nova-compute-install-obs.rst
new file mode 100644
index 0000000000..31dd0488ee
--- /dev/null
+++ b/doc/install-guide/source/nova-compute-install-obs.rst
@@ -0,0 +1,347 @@
+Install and configure a compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Compute
+service on a compute node. The service supports several
+:term:`hypervisors ` to deploy :term:`instances `
+or :term:`VMs `. For simplicity, this configuration
+uses the :term:`QEMU ` hypervisor with the
+:term:`KVM ` extension
+on compute nodes that support hardware acceleration for virtual machines.
+On legacy hardware, this configuration uses the generic QEMU hypervisor.
+You can follow these instructions with minor modifications to horizontally
+scale your environment with additional compute nodes.
+
+.. note::
+
+ This section assumes that you are following the instructions in
+ this guide step-by-step to configure the first compute node. If you
+ want to configure additional compute nodes, prepare them in a similar
+ fashion to the first compute node in the :ref:`example architectures
+ ` section. Each additional compute node
+ requires a unique IP address.
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # zypper install openstack-nova-compute genisoimage qemu-kvm libvirt
+
+ .. end
+
+
+
+
+
+2. Edit the ``/etc/nova/nova.conf`` file and
+ complete the following actions:
+
+
+* In the ``[DEFAULT]`` section, enable only the compute and
+ metadata APIs:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ enabled_apis = osapi_compute,metadata
+
+ .. end
+
+
+
+* In the ``[DEFAULT]`` section, set the ``compute_driver``:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ compute_driver = libvirt.LibvirtDriver
+
+ .. end
+
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for
+ the ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[api]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the
+ ``nova`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+
+* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
+
+ .. end
+
+ Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
+ of the management network interface on your compute node,
+ typically 10.0.0.31 for the first node in the
+ :ref:`example architecture `.
+
+* In the ``[DEFAULT]`` section, enable support for the Networking service:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ use_neutron = True
+ firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+ .. end
+
+ .. note::
+
+ By default, Compute uses an internal firewall service. Since
+ Networking includes a firewall service, you must disable the Compute
+ firewall service by using the
+ ``nova.virt.firewall.NoopFirewallDriver`` firewall driver.
+
+
+ * In the ``[vnc]`` section, enable and configure remote console access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [vnc]
+ # ...
+ enabled = True
+ vncserver_listen = 0.0.0.0
+ vncserver_proxyclient_address = $my_ip
+ novncproxy_base_url = http://controller:6080/vnc_auto.html
+
+ .. end
+
+ The server component listens on all IP addresses and the proxy
+ component only listens on the management interface IP address of
+ the compute node. The base URL indicates the location where you
+ can use a web browser to access remote consoles of instances
+ on this compute node.
+
+ .. note::
+
+ If the web browser to access remote consoles resides on
+ a host that cannot resolve the ``controller`` hostname,
+ you must replace ``controller`` with the management
+ interface IP address of the controller node.
+
+ * In the ``[glance]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [glance]
+ # ...
+ api_servers = http://controller:9292
+
+ .. end
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/run/nova
+
+ .. end
+
+
+
+
+
+ * In the ``[placement]`` section, configure the Placement API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [placement]
+ # ...
+ os_region_name = RegionOne
+ project_domain_name = Default
+ project_name = service
+ auth_type = password
+ user_domain_name = Default
+ auth_url = http://controller:35357/v3
+ username = placement
+ password = PLACEMENT_PASS
+
+ Replace ``PLACEMENT_PASS`` with the password you choose for the
+ ``placement`` user in the Identity service. Comment out any other options
+ in the ``[placement]`` section.
+
+
+3. Ensure the kernel module ``nbd`` is loaded.
+
+ .. code-block:: console
+
+ # modprobe nbd
+
+ .. end
+
+4. Ensure the module loads on every boot by adding ``nbd``
+ to the ``/etc/modules-load.d/nbd.conf`` file.
+
+
+Finalize installation
+---------------------
+
+#. Determine whether your compute node supports hardware acceleration
+ for virtual machines:
+
+ .. code-block:: console
+
+ $ egrep -c '(vmx|svm)' /proc/cpuinfo
+
+ .. end
+
+ If this command returns a value of ``one or greater``, your compute
+ node supports hardware acceleration which typically requires no
+ additional configuration.
+
+ If this command returns a value of ``zero``, your compute node does
+ not support hardware acceleration and you must configure ``libvirt``
+ to use QEMU instead of KVM.
+
+
+* Edit the ``[libvirt]`` section in the
+ ``/etc/nova/nova.conf`` file as follows:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [libvirt]
+ # ...
+ virt_type = qemu
+
+ .. end
+
+
+
+
+
+2. Start the Compute service including its dependencies and configure
+ them to start automatically when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable libvirtd.service openstack-nova-compute.service
+ # systemctl start libvirtd.service openstack-nova-compute.service
+
+ .. end
+
+
+
+.. note::
+
+ If the ``nova-compute`` service fails to start, check
+ ``/var/log/nova/nova-compute.log``. The error message
+ ``AMQP server on controller:5672 is unreachable`` likely indicates that
+ the firewall on the controller node is preventing access to port 5672.
+ Configure the firewall to open port 5672 on the controller node and
+ restart ``nova-compute`` service on the compute node.
+
+Add the compute node to the cell database
+-----------------------------------------
+
+.. important::
+
+ Run the following commands on the **controller** node.
+
+#. Source the admin credentials to enable admin-only CLI commands, then
+ confirm there are compute hosts in the database:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ $ openstack compute service list --service nova-compute
+ +----+-------+--------------+------+-------+---------+----------------------------+
+ | ID | Host | Binary | Zone | State | Status | Updated At |
+ +----+-------+--------------+------+-------+---------+----------------------------+
+ | 1 | node1 | nova-compute | nova | up | enabled | 2017-04-14T15:30:44.000000 |
+ +----+-------+--------------+------+-------+---------+----------------------------+
+
+#. Discover compute hosts:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
+
+ Found 2 cell mappings.
+ Skipping cell0 since it does not contain hosts.
+ Getting compute nodes from cell 'cell1': ad5a5985-a719-4567-98d8-8d148aaae4bc
+ Found 1 computes in cell: ad5a5985-a719-4567-98d8-8d148aaae4bc
+ Checking host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
+ Creating host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
+
+ .. note::
+
+ When you add new compute nodes, you must run ``nova-manage cell_v2
+ discover_hosts`` on the controller node to register those new compute
+ nodes. Alternatively, you can set an appropriate interval in
+ ``/etc/nova/nova.conf``:
+
+ .. code-block:: ini
+
+ [scheduler]
+ discover_hosts_in_cells_interval = 300
diff --git a/doc/install-guide/source/nova-compute-install-rdo.rst b/doc/install-guide/source/nova-compute-install-rdo.rst
new file mode 100644
index 0000000000..ba09ef3c7e
--- /dev/null
+++ b/doc/install-guide/source/nova-compute-install-rdo.rst
@@ -0,0 +1,323 @@
+Install and configure a compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Compute
+service on a compute node. The service supports several
+:term:`hypervisors ` to deploy :term:`instances `
+or :term:`VMs `. For simplicity, this configuration
+uses the :term:`QEMU ` hypervisor with the
+:term:`KVM ` extension
+on compute nodes that support hardware acceleration for virtual machines.
+On legacy hardware, this configuration uses the generic QEMU hypervisor.
+You can follow these instructions with minor modifications to horizontally
+scale your environment with additional compute nodes.
+
+.. note::
+
+ This section assumes that you are following the instructions in
+ this guide step-by-step to configure the first compute node. If you
+ want to configure additional compute nodes, prepare them in a similar
+ fashion to the first compute node in the :ref:`example architectures
+ ` section. Each additional compute node
+ requires a unique IP address.
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # yum install openstack-nova-compute
+
+ .. end
+
+
+
+
+2. Edit the ``/etc/nova/nova.conf`` file and
+ complete the following actions:
+
+
+* In the ``[DEFAULT]`` section, enable only the compute and
+ metadata APIs:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ enabled_apis = osapi_compute,metadata
+
+ .. end
+
+
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for
+ the ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[api]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the
+ ``nova`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+
+* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
+
+ .. end
+
+ Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
+ of the management network interface on your compute node,
+ typically 10.0.0.31 for the first node in the
+ :ref:`example architecture `.
+
+* In the ``[DEFAULT]`` section, enable support for the Networking service:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ use_neutron = True
+ firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+ .. end
+
+ .. note::
+
+ By default, Compute uses an internal firewall service. Since
+ Networking includes a firewall service, you must disable the Compute
+ firewall service by using the
+ ``nova.virt.firewall.NoopFirewallDriver`` firewall driver.
+
+
+ * In the ``[vnc]`` section, enable and configure remote console access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [vnc]
+ # ...
+ enabled = True
+ vncserver_listen = 0.0.0.0
+ vncserver_proxyclient_address = $my_ip
+ novncproxy_base_url = http://controller:6080/vnc_auto.html
+
+ .. end
+
+ The server component listens on all IP addresses and the proxy
+ component only listens on the management interface IP address of
+ the compute node. The base URL indicates the location where you
+ can use a web browser to access remote consoles of instances
+ on this compute node.
+
+ .. note::
+
+ If the web browser to access remote consoles resides on
+ a host that cannot resolve the ``controller`` hostname,
+ you must replace ``controller`` with the management
+ interface IP address of the controller node.
+
+ * In the ``[glance]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [glance]
+ # ...
+ api_servers = http://controller:9292
+
+ .. end
+
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/nova/tmp
+
+ .. end
+
+
+
+
+ * In the ``[placement]`` section, configure the Placement API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [placement]
+ # ...
+ os_region_name = RegionOne
+ project_domain_name = Default
+ project_name = service
+ auth_type = password
+ user_domain_name = Default
+ auth_url = http://controller:35357/v3
+ username = placement
+ password = PLACEMENT_PASS
+
+ Replace ``PLACEMENT_PASS`` with the password you choose for the
+ ``placement`` user in the Identity service. Comment out any other options
+ in the ``[placement]`` section.
+
+
+Finalize installation
+---------------------
+
+#. Determine whether your compute node supports hardware acceleration
+ for virtual machines:
+
+ .. code-block:: console
+
+ $ egrep -c '(vmx|svm)' /proc/cpuinfo
+
+ .. end
+
+ If this command returns a value of ``one or greater``, your compute
+ node supports hardware acceleration which typically requires no
+ additional configuration.
+
+ If this command returns a value of ``zero``, your compute node does
+ not support hardware acceleration and you must configure ``libvirt``
+ to use QEMU instead of KVM.
+
+
+* Edit the ``[libvirt]`` section in the
+ ``/etc/nova/nova.conf`` file as follows:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [libvirt]
+ # ...
+ virt_type = qemu
+
+ .. end
+
+
+
+
+
+2. Start the Compute service including its dependencies and configure
+ them to start automatically when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable libvirtd.service openstack-nova-compute.service
+ # systemctl start libvirtd.service openstack-nova-compute.service
+
+ .. end
+
+
+
+.. note::
+
+ If the ``nova-compute`` service fails to start, check
+ ``/var/log/nova/nova-compute.log``. The error message
+ ``AMQP server on controller:5672 is unreachable`` likely indicates that
+ the firewall on the controller node is preventing access to port 5672.
+ Configure the firewall to open port 5672 on the controller node and
+ restart ``nova-compute`` service on the compute node.
+
+Add the compute node to the cell database
+-----------------------------------------
+
+.. important::
+
+ Run the following commands on the **controller** node.
+
+#. Source the admin credentials to enable admin-only CLI commands, then
+ confirm there are compute hosts in the database:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ $ openstack compute service list --service nova-compute
+ +----+-------+--------------+------+-------+---------+----------------------------+
+ | ID | Host | Binary | Zone | State | Status | Updated At |
+ +----+-------+--------------+------+-------+---------+----------------------------+
+ | 1 | node1 | nova-compute | nova | up | enabled | 2017-04-14T15:30:44.000000 |
+ +----+-------+--------------+------+-------+---------+----------------------------+
+
+#. Discover compute hosts:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
+
+ Found 2 cell mappings.
+ Skipping cell0 since it does not contain hosts.
+ Getting compute nodes from cell 'cell1': ad5a5985-a719-4567-98d8-8d148aaae4bc
+ Found 1 computes in cell: ad5a5985-a719-4567-98d8-8d148aaae4bc
+ Checking host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
+ Creating host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
+
+ .. note::
+
+ When you add new compute nodes, you must run ``nova-manage cell_v2
+ discover_hosts`` on the controller node to register those new compute
+ nodes. Alternatively, you can set an appropriate interval in
+ ``/etc/nova/nova.conf``:
+
+ .. code-block:: ini
+
+ [scheduler]
+ discover_hosts_in_cells_interval = 300
diff --git a/doc/install-guide/source/nova-compute-install-ubuntu.rst b/doc/install-guide/source/nova-compute-install-ubuntu.rst
new file mode 100644
index 0000000000..bf7c97a0bc
--- /dev/null
+++ b/doc/install-guide/source/nova-compute-install-ubuntu.rst
@@ -0,0 +1,316 @@
+Install and configure a compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the Compute
+service on a compute node. The service supports several
+:term:`hypervisors ` to deploy :term:`instances `
+or :term:`VMs `. For simplicity, this configuration
+uses the :term:`QEMU ` hypervisor with the
+:term:`KVM ` extension
+on compute nodes that support hardware acceleration for virtual machines.
+On legacy hardware, this configuration uses the generic QEMU hypervisor.
+You can follow these instructions with minor modifications to horizontally
+scale your environment with additional compute nodes.
+
+.. note::
+
+ This section assumes that you are following the instructions in
+ this guide step-by-step to configure the first compute node. If you
+ want to configure additional compute nodes, prepare them in a similar
+ fashion to the first compute node in the :ref:`example architectures
+ ` section. Each additional compute node
+ requires a unique IP address.
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install nova-compute
+
+ .. end
+
+
+
+2. Edit the ``/etc/nova/nova.conf`` file and
+ complete the following actions:
+
+
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for
+ the ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[api]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the
+ ``nova`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+
+
+* In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
+
+ .. end
+
+ Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
+ of the management network interface on your compute node,
+ typically 10.0.0.31 for the first node in the
+ :ref:`example architecture `.
+
+* In the ``[DEFAULT]`` section, enable support for the Networking service:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ use_neutron = True
+ firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+ .. end
+
+ .. note::
+
+ By default, Compute uses an internal firewall service. Since
+ Networking includes a firewall service, you must disable the Compute
+ firewall service by using the
+ ``nova.virt.firewall.NoopFirewallDriver`` firewall driver.
+
+
+ * In the ``[vnc]`` section, enable and configure remote console access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [vnc]
+ # ...
+ enabled = True
+ vncserver_listen = 0.0.0.0
+ vncserver_proxyclient_address = $my_ip
+ novncproxy_base_url = http://controller:6080/vnc_auto.html
+
+ .. end
+
+ The server component listens on all IP addresses and the proxy
+ component only listens on the management interface IP address of
+ the compute node. The base URL indicates the location where you
+ can use a web browser to access remote consoles of instances
+ on this compute node.
+
+ .. note::
+
+ If the web browser to access remote consoles resides on
+ a host that cannot resolve the ``controller`` hostname,
+ you must replace ``controller`` with the management
+ interface IP address of the controller node.
+
+ * In the ``[glance]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [glance]
+ # ...
+ api_servers = http://controller:9292
+
+ .. end
+
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/nova/tmp
+
+ .. end
+
+
+
+.. todo:
+
+ https://bugs.launchpad.net/ubuntu/+source/nova/+bug/1506667
+
+* Due to a packaging bug, remove the ``log_dir`` option from the
+ ``[DEFAULT]`` section.
+
+
+
+ * In the ``[placement]`` section, configure the Placement API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [placement]
+ # ...
+ os_region_name = RegionOne
+ project_domain_name = Default
+ project_name = service
+ auth_type = password
+ user_domain_name = Default
+ auth_url = http://controller:35357/v3
+ username = placement
+ password = PLACEMENT_PASS
+
+ Replace ``PLACEMENT_PASS`` with the password you choose for the
+ ``placement`` user in the Identity service. Comment out any other options
+ in the ``[placement]`` section.
+
+
+Finalize installation
+---------------------
+
+#. Determine whether your compute node supports hardware acceleration
+ for virtual machines:
+
+ .. code-block:: console
+
+ $ egrep -c '(vmx|svm)' /proc/cpuinfo
+
+ .. end
+
+ If this command returns a value of ``one or greater``, your compute
+ node supports hardware acceleration which typically requires no
+ additional configuration.
+
+ If this command returns a value of ``zero``, your compute node does
+ not support hardware acceleration and you must configure ``libvirt``
+ to use QEMU instead of KVM.
+
+
+
+* Edit the ``[libvirt]`` section in the
+ ``/etc/nova/nova-compute.conf`` file as follows:
+
+ .. path /etc/nova/nova-compute.conf
+ .. code-block:: ini
+
+ [libvirt]
+ # ...
+ virt_type = qemu
+
+ .. end
+
+
+
+
+
+2. Restart the Compute service:
+
+ .. code-block:: console
+
+ # service nova-compute restart
+
+ .. end
+
+
+.. note::
+
+ If the ``nova-compute`` service fails to start, check
+ ``/var/log/nova/nova-compute.log``. The error message
+ ``AMQP server on controller:5672 is unreachable`` likely indicates that
+ the firewall on the controller node is preventing access to port 5672.
+ Configure the firewall to open port 5672 on the controller node and
+ restart ``nova-compute`` service on the compute node.
+
+Add the compute node to the cell database
+-----------------------------------------
+
+.. important::
+
+ Run the following commands on the **controller** node.
+
+#. Source the admin credentials to enable admin-only CLI commands, then
+ confirm there are compute hosts in the database:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ $ openstack compute service list --service nova-compute
+ +----+-------+--------------+------+-------+---------+----------------------------+
+ | ID | Host | Binary | Zone | State | Status | Updated At |
+ +----+-------+--------------+------+-------+---------+----------------------------+
+ | 1 | node1 | nova-compute | nova | up | enabled | 2017-04-14T15:30:44.000000 |
+ +----+-------+--------------+------+-------+---------+----------------------------+
+
+#. Discover compute hosts:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
+
+ Found 2 cell mappings.
+ Skipping cell0 since it does not contain hosts.
+ Getting compute nodes from cell 'cell1': ad5a5985-a719-4567-98d8-8d148aaae4bc
+ Found 1 computes in cell: ad5a5985-a719-4567-98d8-8d148aaae4bc
+ Checking host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
+ Creating host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
+
+ .. note::
+
+ When you add new compute nodes, you must run ``nova-manage cell_v2
+ discover_hosts`` on the controller node to register those new compute
+ nodes. Alternatively, you can set an appropriate interval in
+ ``/etc/nova/nova.conf``:
+
+ .. code-block:: ini
+
+ [scheduler]
+ discover_hosts_in_cells_interval = 300
diff --git a/doc/install-guide/source/nova-compute-install.rst b/doc/install-guide/source/nova-compute-install.rst
index 2fee4b9276..078af2191f 100644
--- a/doc/install-guide/source/nova-compute-install.rst
+++ b/doc/install-guide/source/nova-compute-install.rst
@@ -21,460 +21,7 @@ scale your environment with additional compute nodes.
` section. Each additional compute node
requires a unique IP address.
-Install and configure components
---------------------------------
+.. toctree::
+ :glob:
-.. include:: shared/note_configuration_vary_by_distribution.rst
-
-.. only:: obs
-
- #. Install the packages:
-
- .. code-block:: console
-
- # zypper install openstack-nova-compute genisoimage qemu-kvm libvirt
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- #. Install the packages:
-
- .. code-block:: console
-
- # yum install openstack-nova-compute
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- #. Install the packages:
-
- .. code-block:: console
-
- # apt install nova-compute
-
- .. end
-
-.. endonly
-
- .. only:: debian
-
- Respond to prompts for debconf.
-
- .. :doc:`database management `,
- :doc:`Identity service credentials `,
- and :doc:`message broker credentials `. Make
- sure that you do not activate database management handling by debconf,
- as a compute node should not access the central database.
-
- .. endonly
-
-2. Edit the ``/etc/nova/nova.conf`` file and
- complete the following actions:
-
- .. only:: rdo or obs
-
- * In the ``[DEFAULT]`` section, enable only the compute and
- metadata APIs:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- enabled_apis = osapi_compute,metadata
-
- .. end
-
- .. endonly
-
- .. only:: obs
-
- * In the ``[DEFAULT]`` section, set the ``compute_driver``:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- compute_driver = libvirt.LibvirtDriver
-
- .. end
-
- .. endonly
-
- * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
- message queue access:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- transport_url = rabbit://openstack:RABBIT_PASS@controller
-
- .. end
-
- Replace ``RABBIT_PASS`` with the password you chose for
- the ``openstack`` account in ``RabbitMQ``.
-
- * In the ``[api]`` and ``[keystone_authtoken]`` sections,
- configure Identity service access:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [api]
- # ...
- auth_strategy = keystone
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = nova
- password = NOVA_PASS
-
- .. end
-
- Replace ``NOVA_PASS`` with the password you chose for the
- ``nova`` user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
- .. only:: debian
-
- * In the ``[DEFAULT]`` section, check that the ``my_ip`` option
- is correctly set (this value is handled by the config and postinst
- scripts of the ``nova-common`` package using debconf):
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
-
- .. end
-
- Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
- of the management network interface on your compute node,
- typically 10.0.0.31 for the first node in the
- :ref:`example architecture `.
-
- .. endonly
-
- .. only:: obs or rdo or ubuntu
-
- * In the ``[DEFAULT]`` section, configure the ``my_ip`` option:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS
-
- .. end
-
- Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address
- of the management network interface on your compute node,
- typically 10.0.0.31 for the first node in the
- :ref:`example architecture `.
-
- * In the ``[DEFAULT]`` section, enable support for the Networking service:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- use_neutron = True
- firewall_driver = nova.virt.firewall.NoopFirewallDriver
-
- .. end
-
- .. note::
-
- By default, Compute uses an internal firewall service. Since
- Networking includes a firewall service, you must disable the Compute
- firewall service by using the
- ``nova.virt.firewall.NoopFirewallDriver`` firewall driver.
-
- .. endonly
-
- * In the ``[vnc]`` section, enable and configure remote console access:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [vnc]
- # ...
- enabled = True
- vncserver_listen = 0.0.0.0
- vncserver_proxyclient_address = $my_ip
- novncproxy_base_url = http://controller:6080/vnc_auto.html
-
- .. end
-
- The server component listens on all IP addresses and the proxy
- component only listens on the management interface IP address of
- the compute node. The base URL indicates the location where you
- can use a web browser to access remote consoles of instances
- on this compute node.
-
- .. note::
-
- If the web browser to access remote consoles resides on
- a host that cannot resolve the ``controller`` hostname,
- you must replace ``controller`` with the management
- interface IP address of the controller node.
-
- * In the ``[glance]`` section, configure the location of the
- Image service API:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [glance]
- # ...
- api_servers = http://controller:9292
-
- .. end
-
- .. only:: obs
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/run/nova
-
- .. end
-
- .. endonly
-
- .. only:: rdo or ubuntu
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/lib/nova/tmp
-
- .. end
-
- .. endonly
-
- .. only:: ubuntu
-
- .. todo:
-
- https://bugs.launchpad.net/ubuntu/+source/nova/+bug/1506667
-
- * Due to a packaging bug, remove the ``log_dir`` option from the
- ``[DEFAULT]`` section.
-
- .. endonly
-
-
- * In the ``[placement]`` section, configure the Placement API:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [placement]
- # ...
- os_region_name = RegionOne
- project_domain_name = Default
- project_name = service
- auth_type = password
- user_domain_name = Default
- auth_url = http://controller:35357/v3
- username = placement
- password = PLACEMENT_PASS
-
- Replace ``PLACEMENT_PASS`` with the password you choose for the
- ``placement`` user in the Identity service. Comment out any other options
- in the ``[placement]`` section.
-
-.. only:: obs or debian
-
- 3. Ensure the kernel module ``nbd`` is loaded.
-
- .. code-block:: console
-
- # modprobe nbd
-
- .. end
-
- 4. Ensure the module loads on every boot by adding ``nbd``
- to the ``/etc/modules-load.d/nbd.conf`` file.
-
-.. endonly
-
-Finalize installation
----------------------
-
-#. Determine whether your compute node supports hardware acceleration
- for virtual machines:
-
- .. code-block:: console
-
- $ egrep -c '(vmx|svm)' /proc/cpuinfo
-
- .. end
-
- If this command returns a value of ``one or greater``, your compute
- node supports hardware acceleration which typically requires no
- additional configuration.
-
- If this command returns a value of ``zero``, your compute node does
- not support hardware acceleration and you must configure ``libvirt``
- to use QEMU instead of KVM.
-
- .. only:: obs or rdo
-
- * Edit the ``[libvirt]`` section in the
- ``/etc/nova/nova.conf`` file as follows:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [libvirt]
- # ...
- virt_type = qemu
-
- .. end
-
- .. endonly
-
- .. only:: ubuntu
-
- * Edit the ``[libvirt]`` section in the
- ``/etc/nova/nova-compute.conf`` file as follows:
-
- .. path /etc/nova/nova-compute.conf
- .. code-block:: ini
-
- [libvirt]
- # ...
- virt_type = qemu
-
- .. end
-
- .. endonly
-
- .. only:: debian
-
- * Replace the ``nova-compute-kvm`` package with ``nova-compute-qemu``
- which automatically changes the ``/etc/nova/nova-compute.conf``
- file and installs the necessary dependencies:
-
- .. code-block:: console
-
- # apt install nova-compute-qemu
-
- .. end
-
- .. endonly
-
-.. only:: obs or rdo
-
- 2. Start the Compute service including its dependencies and configure
- them to start automatically when the system boots:
-
- .. code-block:: console
-
- # systemctl enable libvirtd.service openstack-nova-compute.service
- # systemctl start libvirtd.service openstack-nova-compute.service
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- 2. Restart the Compute service:
-
- .. code-block:: console
-
- # service nova-compute restart
-
- .. end
-
-.. endonly
-
-.. note::
-
- If the ``nova-compute`` service fails to start, check
- ``/var/log/nova/nova-compute.log``. The error message
- ``AMQP server on controller:5672 is unreachable`` likely indicates that
- the firewall on the controller node is preventing access to port 5672.
- Configure the firewall to open port 5672 on the controller node and
- restart ``nova-compute`` service on the compute node.
-
-Add the compute node to the cell database
------------------------------------------
-
-.. important::
-
- Run the following commands on the **controller** node.
-
-#. Source the admin credentials to enable admin-only CLI commands, then
- confirm there are compute hosts in the database:
-
- .. code-block:: console
-
- $ . admin-openrc
-
- $ openstack compute service list --service nova-compute
- +----+-------+--------------+------+-------+---------+----------------------------+
- | ID | Host | Binary | Zone | State | Status | Updated At |
- +----+-------+--------------+------+-------+---------+----------------------------+
- | 1 | node1 | nova-compute | nova | up | enabled | 2017-04-14T15:30:44.000000 |
- +----+-------+--------------+------+-------+---------+----------------------------+
-
-#. Discover compute hosts:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
-
- Found 2 cell mappings.
- Skipping cell0 since it does not contain hosts.
- Getting compute nodes from cell 'cell1': ad5a5985-a719-4567-98d8-8d148aaae4bc
- Found 1 computes in cell: ad5a5985-a719-4567-98d8-8d148aaae4bc
- Checking host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
- Creating host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3
-
- .. note::
-
- When you add new compute nodes, you must run ``nova-manage cell_v2
- discover_hosts`` on the controller node to register those new compute
- nodes. Alternatively, you can set an appropriate interval in
- ``/etc/nova/nova.conf``:
-
- .. code-block:: ini
-
- [scheduler]
- discover_hosts_in_cells_interval = 300
+ nova-compute-install-*
diff --git a/doc/install-guide/source/nova-controller-install-debian.rst b/doc/install-guide/source/nova-controller-install-debian.rst
new file mode 100644
index 0000000000..163a14487e
--- /dev/null
+++ b/doc/install-guide/source/nova-controller-install-debian.rst
@@ -0,0 +1,594 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the
+Compute service, code-named nova, on the controller node.
+
+Prerequisites
+-------------
+
+Before you install and configure the Compute service, you must
+create databases, service credentials, and API endpoints.
+
+#. To create the databases, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE nova_api;
+ MariaDB [(none)]> CREATE DATABASE nova;
+ MariaDB [(none)]> CREATE DATABASE nova_cell0;
+
+ .. end
+
+ * Grant proper access to the databases:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ .. end
+
+ Replace ``NOVA_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. Create the Compute service credentials:
+
+ * Create the ``nova`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt nova
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 8a7dbf5279404537b1c7b86c033620fe |
+ | name | nova |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``nova`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user nova admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``nova`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name nova \
+ --description "OpenStack Compute" compute
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Compute |
+ | enabled | True |
+ | id | 060d59eac51b4594815603d75a00aba2 |
+ | name | nova |
+ | type | compute |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Compute API service endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ compute public http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | 3c1caa473bfe4390a11e7177894bcc7b |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ compute internal http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | e3c918de680746a586eac1f2d9bc10ab |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ compute admin http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | 38f7af91666a47cfb97b4dc790b94424 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ .. end
+
+#. Create a Placement service user using your chosen ``PLACEMENT_PASS``:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt placement
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | fa742015a6494a949f67629884fc7ec8 |
+ | name | placement |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+#. Add the Placement user to the service project with the admin role:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user placement admin
+
+ .. note::
+
+ This command provides no output.
+
+#. Create the Placement API entry in the service catalog:
+
+ .. code-block:: console
+
+ $ openstack service create --name placement --description "Placement API" placement
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | Placement API |
+ | enabled | True |
+ | id | 2d1a27022e6e4185b86adac4444c495f |
+ | name | placement |
+ | type | placement |
+ +-------------+----------------------------------+
+
+#. Create the Placement API service endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne placement public http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 2b1b2637908b4137a9c2e0470487cbc0 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne placement internal http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 02bcda9a150a4bd7993ff4879df971ab |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne placement admin http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 3d71177b9e0f406f98cbff198d74b182 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install nova-api nova-conductor nova-consoleauth \
+ nova-consoleproxy nova-scheduler
+
+ .. end
+
+ .. note::
+
+ ``nova-api-metadata`` is included in the ``nova-api`` package,
+ and can be selected through debconf.
+
+ .. note::
+
+ A unique ``nova-consoleproxy`` package provides the
+ ``nova-novncproxy``, ``nova-spicehtml5proxy``, and
+ ``nova-xvpvncproxy`` packages. To select packages, edit the
+ ``/etc/default/nova-consoleproxy`` file or use the debconf interface.
+ You can also manually edit the ``/etc/default/nova-consoleproxy``
+ file, and stop and start the console daemons.
+
+
+2. Edit the ``/etc/nova/nova.conf`` file and
+ complete the following actions:
+
+
+ * In the ``[api_database]`` and ``[database]`` sections, configure
+ database access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api_database]
+ # ...
+ connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
+
+ [database]
+ # ...
+ connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
+
+ .. end
+
+ Replace ``NOVA_DBPASS`` with the password you chose for
+ the Compute databases.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[api]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the
+ ``nova`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
+ use the management interface IP address of the controller node:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = 10.0.0.11
+
+ .. end
+
+
+* In the ``[vnc]`` section, configure the VNC proxy to use the management
+ interface IP address of the controller node:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [vnc]
+ enabled = true
+ # ...
+ vncserver_listen = $my_ip
+ vncserver_proxyclient_address = $my_ip
+
+ .. end
+
+
+* In the ``[spice]`` section, disable spice:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [spice]
+ enabled = false
+
+ .. end
+
+
+* In the ``[glance]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [glance]
+ # ...
+ api_servers = http://controller:9292
+
+ .. end
+
+
+
+
+
+* In the ``[placement]`` section, configure the Placement API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [placement]
+ # ...
+ os_region_name = RegionOne
+ project_domain_name = Default
+ project_name = service
+ auth_type = password
+ user_domain_name = Default
+ auth_url = http://controller:35357/v3
+ username = placement
+ password = PLACEMENT_PASS
+
+ Replace ``PLACEMENT_PASS`` with the password you choose for the
+ ``placement`` user in the Identity service. Comment out any other options in
+ the ``[placement]`` section.
+
+
+
+3. Populate the nova-api database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage api_db sync" nova
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+4. Register the ``cell0`` database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
+
+ .. end
+
+5. Create the ``cell1`` cell:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
+ 109e1d4b-536a-40d0-83c6-5f121b82b650
+
+ .. end
+
+6. Populate the nova database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage db sync" nova
+
+7. Verify nova cell0 and cell1 are registered correctly:
+
+ .. code-block:: console
+
+ # nova-manage cell_v2 list_cells
+ +-------+--------------------------------------+
+ | Name | UUID |
+ +-------+--------------------------------------+
+ | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 |
+ | cell0 | 00000000-0000-0000-0000-000000000000 |
+ +-------+--------------------------------------+
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+
+
+* Shutdown ``nova-spicehtml5proxy``:
+
+ .. code-block:: console
+
+ # service nova-spicehtml5proxy stop
+
+ .. end
+
+* Select novnc startup in ``/etc/default/nova-consoleproxy``:
+
+ .. path /etc/default/nova-consoleproxy
+ .. code-block:: ini
+
+ NOVA_CONSOLE_PROXY_TYPE=novnc
+
+ .. end
+
+* Add a systemd service file for nova-novncproxy in
+ ``/lib/systemd/system/nova-novncproxy.service``:
+
+ .. path /lib/systemd/system/nova-novncproxy.service:
+ .. code-block:: ini
+
+ [Unit]
+ Description=OpenStack Compute NoVNC proxy
+ After=postgresql.service mysql.service keystone.service rabbitmq-server.service ntp.service
+
+ Documentation=man:nova-novncproxy(1)
+
+ [Service]
+ User=nova
+ Group=nova
+ Type=simple
+ WorkingDirectory=/var/lib/nova
+ PermissionsStartOnly=true
+ ExecStartPre=/bin/mkdir -p /var/lock/nova /var/log/nova /var/lib/nova
+ ExecStartPre=/bin/chown nova:nova /var/lock/nova /var/lib/nova
+ ExecStartPre=/bin/chown nova:adm /var/log/nova
+ ExecStart=/etc/init.d/nova-novncproxy systemd-start
+ Restart=on-failure
+ LimitNOFILE=65535
+ TimeoutStopSec=65
+
+ [Install]
+ WantedBy=multi-user.target
+
+ .. end
+
+* Start the noVNC proxy:
+
+ .. code-block:: console
+
+ # systemctl daemon-reload
+ # systemctl enable nova-novncproxy
+ # service start nova-novncproxy
+
+ .. end
+
+* Restart the other Compute services:
+
+ .. code-block:: console
+
+ # service nova-api restart
+ # service nova-consoleauth restart
+ # service nova-scheduler restart
+ # service nova-conductor restart
+
+ .. end
+
+
+
+* Restart the Compute services:
+
+ .. code-block:: console
+
+ # service nova-api restart
+ # service nova-consoleauth restart
+ # service nova-scheduler restart
+ # service nova-conductor restart
+ # service nova-novncproxy restart
+
+ .. end
+
diff --git a/doc/install-guide/source/nova-controller-install-obs.rst b/doc/install-guide/source/nova-controller-install-obs.rst
new file mode 100644
index 0000000000..9940188907
--- /dev/null
+++ b/doc/install-guide/source/nova-controller-install-obs.rst
@@ -0,0 +1,565 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the
+Compute service, code-named nova, on the controller node.
+
+Prerequisites
+-------------
+
+Before you install and configure the Compute service, you must
+create databases, service credentials, and API endpoints.
+
+#. To create the databases, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE nova_api;
+ MariaDB [(none)]> CREATE DATABASE nova;
+ MariaDB [(none)]> CREATE DATABASE nova_cell0;
+
+ .. end
+
+ * Grant proper access to the databases:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ .. end
+
+ Replace ``NOVA_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. Create the Compute service credentials:
+
+ * Create the ``nova`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt nova
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 8a7dbf5279404537b1c7b86c033620fe |
+ | name | nova |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``nova`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user nova admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``nova`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name nova \
+ --description "OpenStack Compute" compute
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Compute |
+ | enabled | True |
+ | id | 060d59eac51b4594815603d75a00aba2 |
+ | name | nova |
+ | type | compute |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Compute API service endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ compute public http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | 3c1caa473bfe4390a11e7177894bcc7b |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ compute internal http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | e3c918de680746a586eac1f2d9bc10ab |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ compute admin http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | 38f7af91666a47cfb97b4dc790b94424 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ .. end
+
+#. Create a Placement service user using your chosen ``PLACEMENT_PASS``:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt placement
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | fa742015a6494a949f67629884fc7ec8 |
+ | name | placement |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+#. Add the Placement user to the service project with the admin role:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user placement admin
+
+ .. note::
+
+ This command provides no output.
+
+#. Create the Placement API entry in the service catalog:
+
+ .. code-block:: console
+
+ $ openstack service create --name placement --description "Placement API" placement
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | Placement API |
+ | enabled | True |
+ | id | 2d1a27022e6e4185b86adac4444c495f |
+ | name | placement |
+ | type | placement |
+ +-------------+----------------------------------+
+
+#. Create the Placement API service endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne placement public http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 2b1b2637908b4137a9c2e0470487cbc0 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne placement internal http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 02bcda9a150a4bd7993ff4879df971ab |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne placement admin http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 3d71177b9e0f406f98cbff198d74b182 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+.. note::
+
+ As of the Newton release, SUSE OpenStack packages are shipped
+ with the upstream default configuration files. For example,
+ ``/etc/nova/nova.conf`` has customizations in
+ ``/etc/nova/nova.conf.d/010-nova.conf``. While the following
+ instructions modify the default configuration file, adding a new file
+ in ``/etc/nova/nova.conf.d`` achieves the same result.
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # zypper install openstack-nova-api openstack-nova-scheduler \
+ openstack-nova-conductor openstack-nova-consoleauth \
+ openstack-nova-novncproxy openstack-nova-placement-api \
+ iptables
+
+ .. end
+
+
+
+
+
+2. Edit the ``/etc/nova/nova.conf`` file and
+ complete the following actions:
+
+
+* In the ``[DEFAULT]`` section, enable only the compute and metadata
+ APIs:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ enabled_apis = osapi_compute,metadata
+
+ .. end
+
+
+ * In the ``[api_database]`` and ``[database]`` sections, configure
+ database access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api_database]
+ # ...
+ connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
+
+ [database]
+ # ...
+ connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
+
+ .. end
+
+ Replace ``NOVA_DBPASS`` with the password you chose for
+ the Compute databases.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[api]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the
+ ``nova`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
+ use the management interface IP address of the controller node:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = 10.0.0.11
+
+ .. end
+
+
+* In the ``[DEFAULT]`` section, enable support for the Networking service:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ use_neutron = True
+ firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+ .. end
+
+ .. note::
+
+ By default, Compute uses an internal firewall driver. Since the
+ Networking service includes a firewall driver, you must disable the
+ Compute firewall driver by using the
+ ``nova.virt.firewall.NoopFirewallDriver`` firewall driver.
+
+
+* In the ``[vnc]`` section, configure the VNC proxy to use the management
+ interface IP address of the controller node:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [vnc]
+ enabled = true
+ # ...
+ vncserver_listen = $my_ip
+ vncserver_proxyclient_address = $my_ip
+
+ .. end
+
+
+* In the ``[glance]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [glance]
+ # ...
+ api_servers = http://controller:9292
+
+ .. end
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+.. path /etc/nova/nova.conf
+.. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/run/nova
+
+.. end
+
+
+
+
+
+* In the ``[placement]`` section, configure the Placement API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [placement]
+ # ...
+ os_region_name = RegionOne
+ project_domain_name = Default
+ project_name = service
+ auth_type = password
+ user_domain_name = Default
+ auth_url = http://controller:35357/v3
+ username = placement
+ password = PLACEMENT_PASS
+
+ Replace ``PLACEMENT_PASS`` with the password you choose for the
+ ``placement`` user in the Identity service. Comment out any other options in
+ the ``[placement]`` section.
+
+
+
+3. Populate the nova-api database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage api_db sync" nova
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+4. Register the ``cell0`` database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
+
+ .. end
+
+5. Create the ``cell1`` cell:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
+ 109e1d4b-536a-40d0-83c6-5f121b82b650
+
+ .. end
+
+6. Populate the nova database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage db sync" nova
+
+7. Verify nova cell0 and cell1 are registered correctly:
+
+ .. code-block:: console
+
+ # nova-manage cell_v2 list_cells
+ +-------+--------------------------------------+
+ | Name | UUID |
+ +-------+--------------------------------------+
+ | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 |
+ | cell0 | 00000000-0000-0000-0000-000000000000 |
+ +-------+--------------------------------------+
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+* Enable the placement API Apache vhost:
+
+ .. code-block:: console
+
+ # mv /etc/apache2/vhosts.d/nova-placement-api.conf.sample /etc/apache2/vhosts.d/nova-placement-api.conf
+ # systemctl reload apache2.service
+
+* Start the Compute services and configure them to start
+ when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-nova-api.service \
+ openstack-nova-consoleauth.service openstack-nova-scheduler.service \
+ openstack-nova-conductor.service openstack-nova-novncproxy.service
+ # systemctl start openstack-nova-api.service \
+ openstack-nova-consoleauth.service openstack-nova-scheduler.service \
+ openstack-nova-conductor.service openstack-nova-novncproxy.service
+
+ .. end
+
+
+
+
diff --git a/doc/install-guide/source/nova-controller-install-rdo.rst b/doc/install-guide/source/nova-controller-install-rdo.rst
new file mode 100644
index 0000000000..cd0bd5f49c
--- /dev/null
+++ b/doc/install-guide/source/nova-controller-install-rdo.rst
@@ -0,0 +1,572 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the
+Compute service, code-named nova, on the controller node.
+
+Prerequisites
+-------------
+
+Before you install and configure the Compute service, you must
+create databases, service credentials, and API endpoints.
+
+#. To create the databases, complete these steps:
+
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ .. end
+
+
+ * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE nova_api;
+ MariaDB [(none)]> CREATE DATABASE nova;
+ MariaDB [(none)]> CREATE DATABASE nova_cell0;
+
+ .. end
+
+ * Grant proper access to the databases:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ .. end
+
+ Replace ``NOVA_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. Create the Compute service credentials:
+
+ * Create the ``nova`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt nova
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 8a7dbf5279404537b1c7b86c033620fe |
+ | name | nova |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``nova`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user nova admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``nova`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name nova \
+ --description "OpenStack Compute" compute
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Compute |
+ | enabled | True |
+ | id | 060d59eac51b4594815603d75a00aba2 |
+ | name | nova |
+ | type | compute |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Compute API service endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ compute public http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | 3c1caa473bfe4390a11e7177894bcc7b |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ compute internal http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | e3c918de680746a586eac1f2d9bc10ab |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ compute admin http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | 38f7af91666a47cfb97b4dc790b94424 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ .. end
+
+#. Create a Placement service user using your chosen ``PLACEMENT_PASS``:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt placement
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | fa742015a6494a949f67629884fc7ec8 |
+ | name | placement |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+#. Add the Placement user to the service project with the admin role:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user placement admin
+
+ .. note::
+
+ This command provides no output.
+
+#. Create the Placement API entry in the service catalog:
+
+ .. code-block:: console
+
+ $ openstack service create --name placement --description "Placement API" placement
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | Placement API |
+ | enabled | True |
+ | id | 2d1a27022e6e4185b86adac4444c495f |
+ | name | placement |
+ | type | placement |
+ +-------------+----------------------------------+
+
+#. Create the Placement API service endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne placement public http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 2b1b2637908b4137a9c2e0470487cbc0 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne placement internal http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 02bcda9a150a4bd7993ff4879df971ab |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne placement admin http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 3d71177b9e0f406f98cbff198d74b182 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # yum install openstack-nova-api openstack-nova-conductor \
+ openstack-nova-console openstack-nova-novncproxy \
+ openstack-nova-scheduler openstack-nova-placement-api
+
+ .. end
+
+
+
+
+2. Edit the ``/etc/nova/nova.conf`` file and
+ complete the following actions:
+
+
+* In the ``[DEFAULT]`` section, enable only the compute and metadata
+ APIs:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ enabled_apis = osapi_compute,metadata
+
+ .. end
+
+
+ * In the ``[api_database]`` and ``[database]`` sections, configure
+ database access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api_database]
+ # ...
+ connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
+
+ [database]
+ # ...
+ connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
+
+ .. end
+
+ Replace ``NOVA_DBPASS`` with the password you chose for
+ the Compute databases.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[api]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the
+ ``nova`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
+ use the management interface IP address of the controller node:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = 10.0.0.11
+
+ .. end
+
+
+* In the ``[DEFAULT]`` section, enable support for the Networking service:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ use_neutron = True
+ firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+ .. end
+
+ .. note::
+
+ By default, Compute uses an internal firewall driver. Since the
+ Networking service includes a firewall driver, you must disable the
+ Compute firewall driver by using the
+ ``nova.virt.firewall.NoopFirewallDriver`` firewall driver.
+
+
+* In the ``[vnc]`` section, configure the VNC proxy to use the management
+ interface IP address of the controller node:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [vnc]
+ enabled = true
+ # ...
+ vncserver_listen = $my_ip
+ vncserver_proxyclient_address = $my_ip
+
+ .. end
+
+
+* In the ``[glance]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [glance]
+ # ...
+ api_servers = http://controller:9292
+
+ .. end
+
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/nova/tmp
+
+ .. end
+
+
+
+
+* In the ``[placement]`` section, configure the Placement API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [placement]
+ # ...
+ os_region_name = RegionOne
+ project_domain_name = Default
+ project_name = service
+ auth_type = password
+ user_domain_name = Default
+ auth_url = http://controller:35357/v3
+ username = placement
+ password = PLACEMENT_PASS
+
+ Replace ``PLACEMENT_PASS`` with the password you choose for the
+ ``placement`` user in the Identity service. Comment out any other options in
+ the ``[placement]`` section.
+
+
+* Due to a `packaging bug
+ `_, you must enable
+ access to the Placement API by adding the following configuration to
+ ``/etc/httpd/conf.d/00-nova-placement-api.conf``:
+
+ .. path /etc/httpd/conf.d/00-nova-placement-api.conf
+ .. code-block:: ini
+
+
+ = 2.4>
+ Require all granted
+
+
+ Order allow,deny
+ Allow from all
+
+
+
+* Restart the httpd service:
+
+ .. code-block:: console
+
+ # systemctl restart httpd
+
+
+
+3. Populate the nova-api database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage api_db sync" nova
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+4. Register the ``cell0`` database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
+
+ .. end
+
+5. Create the ``cell1`` cell:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
+ 109e1d4b-536a-40d0-83c6-5f121b82b650
+
+ .. end
+
+6. Populate the nova database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage db sync" nova
+
+7. Verify nova cell0 and cell1 are registered correctly:
+
+ .. code-block:: console
+
+ # nova-manage cell_v2 list_cells
+ +-------+--------------------------------------+
+ | Name | UUID |
+ +-------+--------------------------------------+
+ | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 |
+ | cell0 | 00000000-0000-0000-0000-000000000000 |
+ +-------+--------------------------------------+
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+
+* Start the Compute services and configure them to start
+ when the system boots:
+
+ .. code-block:: console
+
+ # systemctl enable openstack-nova-api.service \
+ openstack-nova-consoleauth.service openstack-nova-scheduler.service \
+ openstack-nova-conductor.service openstack-nova-novncproxy.service
+ # systemctl start openstack-nova-api.service \
+ openstack-nova-consoleauth.service openstack-nova-scheduler.service \
+ openstack-nova-conductor.service openstack-nova-novncproxy.service
+
+ .. end
+
+
+
diff --git a/doc/install-guide/source/nova-controller-install-ubuntu.rst b/doc/install-guide/source/nova-controller-install-ubuntu.rst
new file mode 100644
index 0000000000..5bb71a7e54
--- /dev/null
+++ b/doc/install-guide/source/nova-controller-install-ubuntu.rst
@@ -0,0 +1,539 @@
+Install and configure controller node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how to install and configure the
+Compute service, code-named nova, on the controller node.
+
+Prerequisites
+-------------
+
+Before you install and configure the Compute service, you must
+create databases, service credentials, and API endpoints.
+
+#. To create the databases, complete these steps:
+
+
+* Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ # mysql
+
+ .. end
+
+
+
+ * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> CREATE DATABASE nova_api;
+ MariaDB [(none)]> CREATE DATABASE nova;
+ MariaDB [(none)]> CREATE DATABASE nova_cell0;
+
+ .. end
+
+ * Grant proper access to the databases:
+
+ .. code-block:: console
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+ MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
+ IDENTIFIED BY 'NOVA_DBPASS';
+
+ .. end
+
+ Replace ``NOVA_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+ .. end
+
+#. Create the Compute service credentials:
+
+ * Create the ``nova`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt nova
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | 8a7dbf5279404537b1c7b86c033620fe |
+ | name | nova |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+ .. end
+
+ * Add the ``admin`` role to the ``nova`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user nova admin
+
+ .. end
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``nova`` service entity:
+
+ .. code-block:: console
+
+ $ openstack service create --name nova \
+ --description "OpenStack Compute" compute
+
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | OpenStack Compute |
+ | enabled | True |
+ | id | 060d59eac51b4594815603d75a00aba2 |
+ | name | nova |
+ | type | compute |
+ +-------------+----------------------------------+
+
+ .. end
+
+#. Create the Compute API service endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ compute public http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | 3c1caa473bfe4390a11e7177894bcc7b |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ compute internal http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | e3c918de680746a586eac1f2d9bc10ab |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ compute admin http://controller:8774/v2.1
+
+ +--------------+-------------------------------------------+
+ | Field | Value |
+ +--------------+-------------------------------------------+
+ | enabled | True |
+ | id | 38f7af91666a47cfb97b4dc790b94424 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 060d59eac51b4594815603d75a00aba2 |
+ | service_name | nova |
+ | service_type | compute |
+ | url | http://controller:8774/v2.1 |
+ +--------------+-------------------------------------------+
+
+ .. end
+
+#. Create a Placement service user using your chosen ``PLACEMENT_PASS``:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt placement
+
+ User Password:
+ Repeat User Password:
+ +---------------------+----------------------------------+
+ | Field | Value |
+ +---------------------+----------------------------------+
+ | domain_id | default |
+ | enabled | True |
+ | id | fa742015a6494a949f67629884fc7ec8 |
+ | name | placement |
+ | options | {} |
+ | password_expires_at | None |
+ +---------------------+----------------------------------+
+
+#. Add the Placement user to the service project with the admin role:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user placement admin
+
+ .. note::
+
+ This command provides no output.
+
+#. Create the Placement API entry in the service catalog:
+
+ .. code-block:: console
+
+ $ openstack service create --name placement --description "Placement API" placement
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | Placement API |
+ | enabled | True |
+ | id | 2d1a27022e6e4185b86adac4444c495f |
+ | name | placement |
+ | type | placement |
+ +-------------+----------------------------------+
+
+#. Create the Placement API service endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne placement public http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 2b1b2637908b4137a9c2e0470487cbc0 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne placement internal http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 02bcda9a150a4bd7993ff4879df971ab |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne placement admin http://controller:8778
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 3d71177b9e0f406f98cbff198d74b182 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 2d1a27022e6e4185b86adac4444c495f |
+ | service_name | placement |
+ | service_type | placement |
+ | url | http://controller:8778 |
+ +--------------+----------------------------------+
+
+Install and configure components
+--------------------------------
+
+.. include:: shared/note_configuration_vary_by_distribution.rst
+
+
+
+
+
+#. Install the packages:
+
+ .. code-block:: console
+
+ # apt install nova-api nova-conductor nova-consoleauth \
+ nova-novncproxy nova-scheduler nova-placement-api
+
+ .. end
+
+
+
+2. Edit the ``/etc/nova/nova.conf`` file and
+ complete the following actions:
+
+
+ * In the ``[api_database]`` and ``[database]`` sections, configure
+ database access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api_database]
+ # ...
+ connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
+
+ [database]
+ # ...
+ connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
+
+ .. end
+
+ Replace ``NOVA_DBPASS`` with the password you chose for
+ the Compute databases.
+
+ * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
+ message queue access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ .. end
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[api]`` and ``[keystone_authtoken]`` sections,
+ configure Identity service access:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [api]
+ # ...
+ auth_strategy = keystone
+
+ [keystone_authtoken]
+ # ...
+ auth_uri = http://controller:5000
+ auth_url = http://controller:35357
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = nova
+ password = NOVA_PASS
+
+ .. end
+
+ Replace ``NOVA_PASS`` with the password you chose for the
+ ``nova`` user in the Identity service.
+
+ .. note::
+
+ Comment out or remove any other options in the
+ ``[keystone_authtoken]`` section.
+
+ * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
+ use the management interface IP address of the controller node:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ my_ip = 10.0.0.11
+
+ .. end
+
+
+* In the ``[DEFAULT]`` section, enable support for the Networking service:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [DEFAULT]
+ # ...
+ use_neutron = True
+ firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+ .. end
+
+ .. note::
+
+ By default, Compute uses an internal firewall driver. Since the
+ Networking service includes a firewall driver, you must disable the
+ Compute firewall driver by using the
+ ``nova.virt.firewall.NoopFirewallDriver`` firewall driver.
+
+
+* In the ``[vnc]`` section, configure the VNC proxy to use the management
+ interface IP address of the controller node:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [vnc]
+ enabled = true
+ # ...
+ vncserver_listen = $my_ip
+ vncserver_proxyclient_address = $my_ip
+
+ .. end
+
+
+* In the ``[glance]`` section, configure the location of the
+ Image service API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [glance]
+ # ...
+ api_servers = http://controller:9292
+
+ .. end
+
+
+
+
+* In the ``[oslo_concurrency]`` section, configure the lock path:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [oslo_concurrency]
+ # ...
+ lock_path = /var/lib/nova/tmp
+
+ .. end
+
+
+
+.. todo:
+
+ https://bugs.launchpad.net/ubuntu/+source/nova/+bug/1506667
+
+* Due to a packaging bug, remove the ``log_dir`` option from the
+ ``[DEFAULT]`` section.
+
+
+* In the ``[placement]`` section, configure the Placement API:
+
+ .. path /etc/nova/nova.conf
+ .. code-block:: ini
+
+ [placement]
+ # ...
+ os_region_name = RegionOne
+ project_domain_name = Default
+ project_name = service
+ auth_type = password
+ user_domain_name = Default
+ auth_url = http://controller:35357/v3
+ username = placement
+ password = PLACEMENT_PASS
+
+ Replace ``PLACEMENT_PASS`` with the password you choose for the
+ ``placement`` user in the Identity service. Comment out any other options in
+ the ``[placement]`` section.
+
+
+
+3. Populate the nova-api database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage api_db sync" nova
+
+ .. end
+
+ .. note::
+
+ Ignore any deprecation messages in this output.
+
+4. Register the ``cell0`` database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
+
+ .. end
+
+5. Create the ``cell1`` cell:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
+ 109e1d4b-536a-40d0-83c6-5f121b82b650
+
+ .. end
+
+6. Populate the nova database:
+
+ .. code-block:: console
+
+ # su -s /bin/sh -c "nova-manage db sync" nova
+
+7. Verify nova cell0 and cell1 are registered correctly:
+
+ .. code-block:: console
+
+ # nova-manage cell_v2 list_cells
+ +-------+--------------------------------------+
+ | Name | UUID |
+ +-------+--------------------------------------+
+ | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 |
+ | cell0 | 00000000-0000-0000-0000-000000000000 |
+ +-------+--------------------------------------+
+
+ .. end
+
+
+Finalize installation
+---------------------
+
+
+
+
+
+* Restart the Compute services:
+
+ .. code-block:: console
+
+ # service nova-api restart
+ # service nova-consoleauth restart
+ # service nova-scheduler restart
+ # service nova-conductor restart
+ # service nova-novncproxy restart
+
+ .. end
+
diff --git a/doc/install-guide/source/nova-controller-install.rst b/doc/install-guide/source/nova-controller-install.rst
index 34fae72683..2321b0b256 100644
--- a/doc/install-guide/source/nova-controller-install.rst
+++ b/doc/install-guide/source/nova-controller-install.rst
@@ -4,823 +4,7 @@ Install and configure controller node
This section describes how to install and configure the
Compute service, code-named nova, on the controller node.
-Prerequisites
--------------
+.. toctree::
+ :glob:
-Before you install and configure the Compute service, you must
-create databases, service credentials, and API endpoints.
-
-#. To create the databases, complete these steps:
-
- .. only:: ubuntu
-
- * Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- # mysql
-
- .. end
-
- .. endonly
-
- .. only:: rdo or debian or obs
-
- * Use the database access client to connect to the database
- server as the ``root`` user:
-
- .. code-block:: console
-
- $ mysql -u root -p
-
- .. end
-
- .. endonly
-
- * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases:
-
- .. code-block:: console
-
- MariaDB [(none)]> CREATE DATABASE nova_api;
- MariaDB [(none)]> CREATE DATABASE nova;
- MariaDB [(none)]> CREATE DATABASE nova_cell0;
-
- .. end
-
- * Grant proper access to the databases:
-
- .. code-block:: console
-
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
- IDENTIFIED BY 'NOVA_DBPASS';
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
- IDENTIFIED BY 'NOVA_DBPASS';
-
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
- IDENTIFIED BY 'NOVA_DBPASS';
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
- IDENTIFIED BY 'NOVA_DBPASS';
-
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
- IDENTIFIED BY 'NOVA_DBPASS';
- MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
- IDENTIFIED BY 'NOVA_DBPASS';
-
- .. end
-
- Replace ``NOVA_DBPASS`` with a suitable password.
-
- * Exit the database access client.
-
-#. Source the ``admin`` credentials to gain access to
- admin-only CLI commands:
-
- .. code-block:: console
-
- $ . admin-openrc
-
- .. end
-
-#. Create the Compute service credentials:
-
- * Create the ``nova`` user:
-
- .. code-block:: console
-
- $ openstack user create --domain default --password-prompt nova
-
- User Password:
- Repeat User Password:
- +---------------------+----------------------------------+
- | Field | Value |
- +---------------------+----------------------------------+
- | domain_id | default |
- | enabled | True |
- | id | 8a7dbf5279404537b1c7b86c033620fe |
- | name | nova |
- | options | {} |
- | password_expires_at | None |
- +---------------------+----------------------------------+
-
- .. end
-
- * Add the ``admin`` role to the ``nova`` user:
-
- .. code-block:: console
-
- $ openstack role add --project service --user nova admin
-
- .. end
-
- .. note::
-
- This command provides no output.
-
- * Create the ``nova`` service entity:
-
- .. code-block:: console
-
- $ openstack service create --name nova \
- --description "OpenStack Compute" compute
-
- +-------------+----------------------------------+
- | Field | Value |
- +-------------+----------------------------------+
- | description | OpenStack Compute |
- | enabled | True |
- | id | 060d59eac51b4594815603d75a00aba2 |
- | name | nova |
- | type | compute |
- +-------------+----------------------------------+
-
- .. end
-
-#. Create the Compute API service endpoints:
-
- .. code-block:: console
-
- $ openstack endpoint create --region RegionOne \
- compute public http://controller:8774/v2.1
-
- +--------------+-------------------------------------------+
- | Field | Value |
- +--------------+-------------------------------------------+
- | enabled | True |
- | id | 3c1caa473bfe4390a11e7177894bcc7b |
- | interface | public |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 060d59eac51b4594815603d75a00aba2 |
- | service_name | nova |
- | service_type | compute |
- | url | http://controller:8774/v2.1 |
- +--------------+-------------------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- compute internal http://controller:8774/v2.1
-
- +--------------+-------------------------------------------+
- | Field | Value |
- +--------------+-------------------------------------------+
- | enabled | True |
- | id | e3c918de680746a586eac1f2d9bc10ab |
- | interface | internal |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 060d59eac51b4594815603d75a00aba2 |
- | service_name | nova |
- | service_type | compute |
- | url | http://controller:8774/v2.1 |
- +--------------+-------------------------------------------+
-
- $ openstack endpoint create --region RegionOne \
- compute admin http://controller:8774/v2.1
-
- +--------------+-------------------------------------------+
- | Field | Value |
- +--------------+-------------------------------------------+
- | enabled | True |
- | id | 38f7af91666a47cfb97b4dc790b94424 |
- | interface | admin |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 060d59eac51b4594815603d75a00aba2 |
- | service_name | nova |
- | service_type | compute |
- | url | http://controller:8774/v2.1 |
- +--------------+-------------------------------------------+
-
- .. end
-
-#. Create a Placement service user using your chosen ``PLACEMENT_PASS``:
-
- .. code-block:: console
-
- $ openstack user create --domain default --password-prompt placement
-
- User Password:
- Repeat User Password:
- +---------------------+----------------------------------+
- | Field | Value |
- +---------------------+----------------------------------+
- | domain_id | default |
- | enabled | True |
- | id | fa742015a6494a949f67629884fc7ec8 |
- | name | placement |
- | options | {} |
- | password_expires_at | None |
- +---------------------+----------------------------------+
-
-#. Add the Placement user to the service project with the admin role:
-
- .. code-block:: console
-
- $ openstack role add --project service --user placement admin
-
- .. note::
-
- This command provides no output.
-
-#. Create the Placement API entry in the service catalog:
-
- .. code-block:: console
-
- $ openstack service create --name placement --description "Placement API" placement
- +-------------+----------------------------------+
- | Field | Value |
- +-------------+----------------------------------+
- | description | Placement API |
- | enabled | True |
- | id | 2d1a27022e6e4185b86adac4444c495f |
- | name | placement |
- | type | placement |
- +-------------+----------------------------------+
-
-#. Create the Placement API service endpoints:
-
- .. code-block:: console
-
- $ openstack endpoint create --region RegionOne placement public http://controller:8778
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | 2b1b2637908b4137a9c2e0470487cbc0 |
- | interface | public |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 2d1a27022e6e4185b86adac4444c495f |
- | service_name | placement |
- | service_type | placement |
- | url | http://controller:8778 |
- +--------------+----------------------------------+
-
- $ openstack endpoint create --region RegionOne placement internal http://controller:8778
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | 02bcda9a150a4bd7993ff4879df971ab |
- | interface | internal |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 2d1a27022e6e4185b86adac4444c495f |
- | service_name | placement |
- | service_type | placement |
- | url | http://controller:8778 |
- +--------------+----------------------------------+
-
- $ openstack endpoint create --region RegionOne placement admin http://controller:8778
- +--------------+----------------------------------+
- | Field | Value |
- +--------------+----------------------------------+
- | enabled | True |
- | id | 3d71177b9e0f406f98cbff198d74b182 |
- | interface | admin |
- | region | RegionOne |
- | region_id | RegionOne |
- | service_id | 2d1a27022e6e4185b86adac4444c495f |
- | service_name | placement |
- | service_type | placement |
- | url | http://controller:8778 |
- +--------------+----------------------------------+
-
-Install and configure components
---------------------------------
-
-.. include:: shared/note_configuration_vary_by_distribution.rst
-
-.. only:: obs
-
- .. note::
-
- As of the Newton release, SUSE OpenStack packages are shipped
- with the upstream default configuration files. For example,
- ``/etc/nova/nova.conf`` has customizations in
- ``/etc/nova/nova.conf.d/010-nova.conf``. While the following
- instructions modify the default configuration file, adding a new file
- in ``/etc/nova/nova.conf.d`` achieves the same result.
-
-.. endonly
-
-.. only:: obs
-
- #. Install the packages:
-
- .. code-block:: console
-
- # zypper install openstack-nova-api openstack-nova-scheduler \
- openstack-nova-conductor openstack-nova-consoleauth \
- openstack-nova-novncproxy openstack-nova-placement-api \
- iptables
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- #. Install the packages:
-
- .. code-block:: console
-
- # yum install openstack-nova-api openstack-nova-conductor \
- openstack-nova-console openstack-nova-novncproxy \
- openstack-nova-scheduler openstack-nova-placement-api
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu
-
- #. Install the packages:
-
- .. code-block:: console
-
- # apt install nova-api nova-conductor nova-consoleauth \
- nova-novncproxy nova-scheduler nova-placement-api
-
- .. end
-
-.. endonly
-
-.. only:: debian
-
- #. Install the packages:
-
- .. code-block:: console
-
- # apt install nova-api nova-conductor nova-consoleauth \
- nova-consoleproxy nova-scheduler
-
- .. end
-
- .. note::
-
- ``nova-api-metadata`` is included in the ``nova-api`` package,
- and can be selected through debconf.
-
- .. note::
-
- A unique ``nova-consoleproxy`` package provides the
- ``nova-novncproxy``, ``nova-spicehtml5proxy``, and
- ``nova-xvpvncproxy`` packages. To select packages, edit the
- ``/etc/default/nova-consoleproxy`` file or use the debconf interface.
- You can also manually edit the ``/etc/default/nova-consoleproxy``
- file, and stop and start the console daemons.
-
-.. endonly
-
-2. Edit the ``/etc/nova/nova.conf`` file and
- complete the following actions:
-
- .. only:: rdo or obs
-
- * In the ``[DEFAULT]`` section, enable only the compute and metadata
- APIs:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- enabled_apis = osapi_compute,metadata
-
- .. end
-
- .. endonly
-
- * In the ``[api_database]`` and ``[database]`` sections, configure
- database access:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [api_database]
- # ...
- connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
-
- [database]
- # ...
- connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
-
- .. end
-
- Replace ``NOVA_DBPASS`` with the password you chose for
- the Compute databases.
-
- * In the ``[DEFAULT]`` section, configure ``RabbitMQ``
- message queue access:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- transport_url = rabbit://openstack:RABBIT_PASS@controller
-
- .. end
-
- Replace ``RABBIT_PASS`` with the password you chose for the
- ``openstack`` account in ``RabbitMQ``.
-
- * In the ``[api]`` and ``[keystone_authtoken]`` sections,
- configure Identity service access:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [api]
- # ...
- auth_strategy = keystone
-
- [keystone_authtoken]
- # ...
- auth_uri = http://controller:5000
- auth_url = http://controller:35357
- memcached_servers = controller:11211
- auth_type = password
- project_domain_name = default
- user_domain_name = default
- project_name = service
- username = nova
- password = NOVA_PASS
-
- .. end
-
- Replace ``NOVA_PASS`` with the password you chose for the
- ``nova`` user in the Identity service.
-
- .. note::
-
- Comment out or remove any other options in the
- ``[keystone_authtoken]`` section.
-
- * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to
- use the management interface IP address of the controller node:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- my_ip = 10.0.0.11
-
- .. end
-
-.. only:: obs or rdo or ubuntu
-
- * In the ``[DEFAULT]`` section, enable support for the Networking service:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- use_neutron = True
- firewall_driver = nova.virt.firewall.NoopFirewallDriver
-
- .. end
-
- .. note::
-
- By default, Compute uses an internal firewall driver. Since the
- Networking service includes a firewall driver, you must disable the
- Compute firewall driver by using the
- ``nova.virt.firewall.NoopFirewallDriver`` firewall driver.
-
-.. endonly
-
-* In the ``[vnc]`` section, configure the VNC proxy to use the management
- interface IP address of the controller node:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [vnc]
- enabled = true
- # ...
- vncserver_listen = $my_ip
- vncserver_proxyclient_address = $my_ip
-
- .. end
-
-.. only:: debian
-
- * In the ``[spice]`` section, disable spice:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [spice]
- enabled = false
-
- .. end
-
-.. endonly
-
-* In the ``[glance]`` section, configure the location of the
- Image service API:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [glance]
- # ...
- api_servers = http://controller:9292
-
- .. end
-
-.. only:: obs
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/run/nova
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/lib/nova/tmp
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu
-
- * In the ``[oslo_concurrency]`` section, configure the lock path:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [oslo_concurrency]
- # ...
- lock_path = /var/lib/nova/tmp
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu
-
- .. todo:
-
- https://bugs.launchpad.net/ubuntu/+source/nova/+bug/1506667
-
- * Due to a packaging bug, remove the ``log_dir`` option from the
- ``[DEFAULT]`` section.
-
-.. endonly
-
-* In the ``[placement]`` section, configure the Placement API:
-
- .. path /etc/nova/nova.conf
- .. code-block:: ini
-
- [placement]
- # ...
- os_region_name = RegionOne
- project_domain_name = Default
- project_name = service
- auth_type = password
- user_domain_name = Default
- auth_url = http://controller:35357/v3
- username = placement
- password = PLACEMENT_PASS
-
- Replace ``PLACEMENT_PASS`` with the password you choose for the
- ``placement`` user in the Identity service. Comment out any other options in
- the ``[placement]`` section.
-
-.. only:: rdo
-
- * Due to a `packaging bug
- `_, you must enable
- access to the Placement API by adding the following configuration to
- ``/etc/httpd/conf.d/00-nova-placement-api.conf``:
-
- .. path /etc/httpd/conf.d/00-nova-placement-api.conf
- .. code-block:: ini
-
-
- = 2.4>
- Require all granted
-
-
- Order allow,deny
- Allow from all
-
-
-
- * Restart the httpd service:
-
- .. code-block:: console
-
- # systemctl restart httpd
-
-.. endonly
-
-.. only:: rdo or ubuntu or debian or obs
-
- 3. Populate the nova-api database:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "nova-manage api_db sync" nova
-
- .. end
-
- .. note::
-
- Ignore any deprecation messages in this output.
-
- 4. Register the ``cell0`` database:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
-
- .. end
-
- 5. Create the ``cell1`` cell:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
- 109e1d4b-536a-40d0-83c6-5f121b82b650
-
- .. end
-
- 6. Populate the nova database:
-
- .. code-block:: console
-
- # su -s /bin/sh -c "nova-manage db sync" nova
-
- 7. Verify nova cell0 and cell1 are registered correctly:
-
- .. code-block:: console
-
- # nova-manage cell_v2 list_cells
- +-------+--------------------------------------+
- | Name | UUID |
- +-------+--------------------------------------+
- | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 |
- | cell0 | 00000000-0000-0000-0000-000000000000 |
- +-------+--------------------------------------+
-
- .. end
-
-.. endonly
-
-Finalize installation
----------------------
-
-.. only:: obs
-
- * Enable the placement API Apache vhost:
-
- .. code-block:: console
-
- # mv /etc/apache2/vhosts.d/nova-placement-api.conf.sample /etc/apache2/vhosts.d/nova-placement-api.conf
- # systemctl reload apache2.service
-
- * Start the Compute services and configure them to start
- when the system boots:
-
- .. code-block:: console
-
- # systemctl enable openstack-nova-api.service \
- openstack-nova-consoleauth.service openstack-nova-scheduler.service \
- openstack-nova-conductor.service openstack-nova-novncproxy.service
- # systemctl start openstack-nova-api.service \
- openstack-nova-consoleauth.service openstack-nova-scheduler.service \
- openstack-nova-conductor.service openstack-nova-novncproxy.service
-
- .. end
-
-.. endonly
-
-.. only:: rdo
-
- * Start the Compute services and configure them to start
- when the system boots:
-
- .. code-block:: console
-
- # systemctl enable openstack-nova-api.service \
- openstack-nova-consoleauth.service openstack-nova-scheduler.service \
- openstack-nova-conductor.service openstack-nova-novncproxy.service
- # systemctl start openstack-nova-api.service \
- openstack-nova-consoleauth.service openstack-nova-scheduler.service \
- openstack-nova-conductor.service openstack-nova-novncproxy.service
-
- .. end
-
-.. endonly
-
-.. only:: debian
-
- * Shutdown ``nova-spicehtml5proxy``:
-
- .. code-block:: console
-
- # service nova-spicehtml5proxy stop
-
- .. end
-
- * Select novnc startup in ``/etc/default/nova-consoleproxy``:
-
- .. path /etc/default/nova-consoleproxy
- .. code-block:: ini
-
- NOVA_CONSOLE_PROXY_TYPE=novnc
-
- .. end
-
- * Add a systemd service file for nova-novncproxy in
- ``/lib/systemd/system/nova-novncproxy.service``:
-
- .. path /lib/systemd/system/nova-novncproxy.service:
- .. code-block:: ini
-
- [Unit]
- Description=OpenStack Compute NoVNC proxy
- After=postgresql.service mysql.service keystone.service rabbitmq-server.service ntp.service
-
- Documentation=man:nova-novncproxy(1)
-
- [Service]
- User=nova
- Group=nova
- Type=simple
- WorkingDirectory=/var/lib/nova
- PermissionsStartOnly=true
- ExecStartPre=/bin/mkdir -p /var/lock/nova /var/log/nova /var/lib/nova
- ExecStartPre=/bin/chown nova:nova /var/lock/nova /var/lib/nova
- ExecStartPre=/bin/chown nova:adm /var/log/nova
- ExecStart=/etc/init.d/nova-novncproxy systemd-start
- Restart=on-failure
- LimitNOFILE=65535
- TimeoutStopSec=65
-
- [Install]
- WantedBy=multi-user.target
-
- .. end
-
- * Start the noVNC proxy:
-
- .. code-block:: console
-
- # systemctl daemon-reload
- # systemctl enable nova-novncproxy
- # service start nova-novncproxy
-
- .. end
-
- * Restart the other Compute services:
-
- .. code-block:: console
-
- # service nova-api restart
- # service nova-consoleauth restart
- # service nova-scheduler restart
- # service nova-conductor restart
-
- .. end
-
-.. endonly
-
-.. only:: ubuntu or debian
-
- * Restart the Compute services:
-
- .. code-block:: console
-
- # service nova-api restart
- # service nova-consoleauth restart
- # service nova-scheduler restart
- # service nova-conductor restart
- # service nova-novncproxy restart
-
- .. end
-
-.. endonly
+ nova-controller-install-*
diff --git a/tools/build-install-guides-rst.sh b/tools/build-install-guides-rst.sh
index 9419a48bd7..9cec28d5cb 100755
--- a/tools/build-install-guides-rst.sh
+++ b/tools/build-install-guides-rst.sh
@@ -5,21 +5,11 @@ mkdir -p publish-docs
# Do not build debian debconf for now, there're no Ocata packages at all.
INDEX=doc/install-guide/source/index.rst
-TAGS="obs rdo ubuntu"
LINKCHECK=""
PDF_OPTION=""
while [[ $# > 0 ]] ; do
option="$1"
case $option in
- obs)
- TAGS=obs
- ;;
- rdo)
- TAGS=rdo
- ;;
- ubuntu)
- TAGS=ubuntu
- ;;
--linkcheck)
LINKCHECK="--linkcheck"
;;
@@ -30,16 +20,6 @@ while [[ $# > 0 ]] ; do
shift
done
-# For translation work, we should have only one index file,
-# because our tools generate translation resources from
-# only one index file.
-# Therefore, this tool uses one combined index file
-# while processing title for each distribution.
-
-# Save and restore the index file
-cp -f ${INDEX} ${INDEX}.save
-trap "mv -f ${INDEX}.save ${INDEX}" EXIT
-
# Set this to a sensible value if not set by OpenStack CI.
if [ -z "$ZUUL_REFNAME" ] ; then
ZUUL_REFNAME="master"
@@ -49,31 +29,6 @@ fi
# Note for stable branches, this needs to be the top of each manual.
MARKER_TEXT="Project: $ZUUL_PROJECT Ref: $ZUUL_REFNAME Build: $ZUUL_UUID Revision: $ZUUL_NEWREV"
-for tag in $TAGS; do
- TARGET="draft/install-guide-${tag}"
- if [[ "$tag" == "debconf" ]]; then
- # Build the guide with debconf
- # To use debian only contents, use "debian" tag.
- tools/build-rst.sh doc/install-guide-debconf \
- --tag debian --target "$TARGET" $LINKCHECK $PDF_OPTION
- else
- ##
- # Because Sphinx uses the first heading as title regardless of
- # only directive, replace title directive with the proper title
- # for each distribution to set the title explicitly.
-
- title=$(grep -A 5 "^.. only:: ${tag}" ${INDEX} | \
- head -n 6 | sed -n 4p | sed -e 's/^ *//g')
- sed -i -e "s/\.\. title::.*/.. title:: ${title}/" ${INDEX}
-
- # Build the guide
- tools/build-rst.sh doc/install-guide \
- --tag ${tag} --target "$TARGET" $LINKCHECK $PDF_OPTION
- fi
- # Add this for stable branches
- # TODO(jaegerandi): Enable for stable branches after branch is
- # created:
- # if [ "$ZUUL_REFNAME" != "master" ] ; then
- # echo $MARKER_TEXT > publish-docs/$TARGET/.root-marker
- # fi
-done
+# Build the guide
+tools/build-rst.sh doc/install-guide \
+ --target "draft/install-guide" $LINKCHECK $PDF_OPTION
diff --git a/tools/split_platforms.py b/tools/split_platforms.py
new file mode 100644
index 0000000000..cc84b58326
--- /dev/null
+++ b/tools/split_platforms.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import os
+
+
+def do_one(base, out_dir, filename, tag):
+ outname = filename[:-4] + '-' + tag + '.rst'
+ print(outname, tag)
+
+ inside_only = None
+ prefix_len = None
+ output = []
+ num_only_blocks = 0
+
+ with open(filename, 'r', encoding='utf-8') as inf:
+ for line in inf:
+ if '.. only::' in line:
+ print(line.rstrip())
+ inside_only = line
+ num_only_blocks += 1
+ prefix_len = None
+ continue
+ elif '.. endonly' in line:
+ inside_only = None
+ prefix_len = None
+ continue
+ elif inside_only:
+ if line.lstrip() == line and line.strip():
+ # The line has content and is flush left, so the
+ # existing inside block was not closed with the
+ # comment.
+ inside_only = None
+ prefix_len = None
+ print('copying %r' % line)
+ output.append(line)
+ elif tag in inside_only:
+ if not line.strip():
+ # blank line, include it but do not use it to find
+ # the prefix len
+ output.append('\n')
+ continue
+ if prefix_len is None:
+ # Determine how much this block is indented.
+ prefix_len = len(line) - len(line.lstrip())
+ print('prefix length:', prefix_len)
+ output.append(line[prefix_len:])
+ print('ONLY:', repr(line[prefix_len:]))
+ else:
+ print('IGNORE:', repr(line))
+ else:
+ print('copying %r' % line)
+ output.append(line)
+ if inside_only:
+ raise RuntimeError('unclosed only block in %s' % filename)
+ if num_only_blocks:
+ with open(outname, 'w', encoding='utf-8') as outf:
+ outf.writelines(output)
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('base')
+parser.add_argument('outdir')
+parser.add_argument('tag', nargs='+')
+args = parser.parse_args()
+
+base = args.base.rstrip('/') + '/'
+
+for dir_name, sub_dirs, files in os.walk(base):
+ for f in files:
+ if not f.endswith('.rst'):
+ continue
+ for tag in args.tag:
+ do_one(base, args.outdir, os.path.join(dir_name, f), tag)
diff --git a/tools/split_platforms.sh b/tools/split_platforms.sh
new file mode 100755
index 0000000000..f225b1ee0c
--- /dev/null
+++ b/tools/split_platforms.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+toolsdir=$(dirname $0)
+install_guide=doc/install-guide/source
+
+python3 $toolsdir/split_platforms.py doc/install-guide/source doc/install-guide \
+ rdo obs ubuntu debian
+
+# $ git grep 'only::' | cut -f2- -d: | sed 's/.*:: //g' | sort -ur
+# ubuntu or debian
+# ubuntu
+# rdo or ubuntu or debian or obs
+# rdo or ubuntu or debian
+# rdo or ubuntu
+# rdo or obs or ubuntu
+# rdo or obs
+# rdo or debian or obs
+# rdo
+# obs or ubuntu
+# obs or rdo or ubuntu
+# obs or rdo
+# obs or debian
+# obs
+# debian or ubuntu
+# debian