From 5676e4fda4dbe62527dc455f3a07fc58d6f42784 Mon Sep 17 00:00:00 2001 From: Kristal Dale Date: Wed, 21 Aug 2019 11:27:07 -0700 Subject: [PATCH] Initial commit for guide reorg - upcoming release - Migrated reorged content from wiki - Naming, title, and capitalization consistency, minor rewording in sections - Set up includes to reuse common content across pages - Introduction: remove 'openstack users' and 'kubernetes users' - Consolidate term definition into Key concepts page - Archive R1, move R2 to current releast, set up for R3 - Remove stub pages for duplicate content, or content deferred to R3 - Rework intro and contribute pages for better readability - Split Key concepts into two pages: Terms and Deployment Options - Pass for grammar, punctuation, licensing, etc. - Pull streamlined intro content into R2 install guides (from prev version) - Added R2 release note page - Update links to projects/project names to remove the "stx-" - Add instructions for creating a bootable USB Story: 2006315 Task: 36046 Change-Id: I38656fd382d1d9cf2969812c548fb7b2dc9dd31e Signed-off-by: Kristal Dale --- doc/source/api-ref/index.rst | 22 +- .../contributor/api_contribute_guide.rst | 48 +- .../build_guides/current/index.rst | 67 +- .../{latest => r1_release}/index.rst | 67 +- doc/source/contributor/index.rst | 56 +- .../release_note_contribute_guide.rst | 48 +- .../deploy_install_guides/bootable_usb.rst | 143 ++ .../current/access_starlingx_kubernetes.rst | 167 ++ .../current/access_starlingx_openstack.rst | 75 + .../current/bare_metal_aio_duplex.rst | 511 ++++++ .../current/bare_metal_aio_simplex.rst | 486 ++++++ .../current/bare_metal_controller_storage.rst | 547 ++++++ .../current/bare_metal_dedicated_storage.rst | 442 +++++ .../bare_metal_ironic.rst} | 79 +- .../starlingx-deployment-options-ironic.png | Bin .../current/install_openstack.rst | 69 + .../current/uninstall_delete_openstack.rst | 33 + .../current/virtual_aio_duplex.rst | 607 +++++++ .../current/virtual_aio_simplex.rst | 514 ++++++ .../current/virtual_controller_storage.rst | 646 ++++++++ .../current/virtual_dedicated_storage.rst | 694 ++++++++ doc/source/deploy_install_guides/index.rst | 125 +- .../controller_storage.rst | 6 +- .../dedicated_storage.rst | 6 +- .../deployment_terminology.rst | 0 .../{current => r1_release}/duplex.rst | 6 +- ...-deployment-options-controller-storage.png | Bin ...x-deployment-options-dedicated-storage.png | Bin ...ngx-deployment-options-duplex-extended.png | Bin .../starlingx-deployment-options-duplex.png | Bin .../starlingx-deployment-options-simplex.png | Bin .../{current => r1_release}/index.rst | 19 +- .../installation_libvirt_qemu.rst | 16 +- .../{current => r1_release}/simplex.rst | 6 +- .../upcoming/aio_duplex.rst | 1443 ---------------- .../upcoming/aio_simplex.rst | 1052 ------------ .../upcoming/controller_storage.rst | 1381 --------------- .../upcoming/dedicated_storage.rst | 1476 ----------------- .../upcoming/deployment_terminology.rst | 119 -- .../upcoming/dist_cloud.rst | 10 - .../upcoming/horizon_access.rst | 34 - .../upcoming/installation_libvirt_qemu.rst | 210 --- .../upcoming/multi_region.rst | 10 - .../upcoming/options.rst | 79 - .../upcoming/planning.rst | 10 - .../uninstalling_deleting_openstack.rst | 38 - doc/source/index.rst | 60 +- doc/source/introduction/box.rst | 10 - doc/source/introduction/concepts.rst | 84 - doc/source/introduction/consuming.rst | 64 +- doc/source/introduction/deploy_options.rst | 36 + doc/source/introduction/index.rst | 36 +- doc/source/introduction/intro.rst | 29 - doc/source/introduction/kubernetes.rst | 10 - doc/source/introduction/openstack.rst | 10 - doc/source/introduction/roadmap.rst | 10 - .../introduction/software_evaluation.rst | 10 - doc/source/introduction/terms.rst | 122 ++ doc/source/operation_guides/index.rst | 103 -- .../latest/ceph_storage_config/index.rst | 10 - .../latest/cli_reference/index.rst | 10 - .../latest/data_network_config/index.rst | 10 - .../latest/fault_management/index.rst | 10 - .../latest/kubernetes_cluster_guide/index.rst | 10 - .../latest/patching_guide/index.rst | 10 - .../latest/sdn_networking/index.rst | 10 - .../latest/swift_config_management/index.rst | 10 - .../latest/upgrade_guide/index.rst | 10 - doc/source/releasenotes/index.rst | 142 +- doc/source/releasenotes/r1_release.rst | 108 ++ doc/source/releasenotes/r2_release.rst | 5 + 71 files changed, 5611 insertions(+), 6685 deletions(-) rename doc/source/contributor/build_guides/{latest => r1_release}/index.rst (90%) create mode 100644 doc/source/deploy_install_guides/bootable_usb.rst create mode 100644 doc/source/deploy_install_guides/current/access_starlingx_kubernetes.rst create mode 100644 doc/source/deploy_install_guides/current/access_starlingx_openstack.rst create mode 100644 doc/source/deploy_install_guides/current/bare_metal_aio_duplex.rst create mode 100644 doc/source/deploy_install_guides/current/bare_metal_aio_simplex.rst create mode 100644 doc/source/deploy_install_guides/current/bare_metal_controller_storage.rst create mode 100644 doc/source/deploy_install_guides/current/bare_metal_dedicated_storage.rst rename doc/source/deploy_install_guides/{upcoming/ironic.rst => current/bare_metal_ironic.rst} (83%) rename doc/source/deploy_install_guides/{upcoming => current}/figures/starlingx-deployment-options-ironic.png (100%) create mode 100644 doc/source/deploy_install_guides/current/install_openstack.rst create mode 100644 doc/source/deploy_install_guides/current/uninstall_delete_openstack.rst create mode 100644 doc/source/deploy_install_guides/current/virtual_aio_duplex.rst create mode 100644 doc/source/deploy_install_guides/current/virtual_aio_simplex.rst create mode 100644 doc/source/deploy_install_guides/current/virtual_controller_storage.rst create mode 100644 doc/source/deploy_install_guides/current/virtual_dedicated_storage.rst rename doc/source/deploy_install_guides/{current => r1_release}/controller_storage.rst (97%) rename doc/source/deploy_install_guides/{current => r1_release}/dedicated_storage.rst (97%) rename doc/source/deploy_install_guides/{current => r1_release}/deployment_terminology.rst (100%) rename doc/source/deploy_install_guides/{current => r1_release}/duplex.rst (97%) rename doc/source/deploy_install_guides/{upcoming => r1_release}/figures/starlingx-deployment-options-controller-storage.png (100%) rename doc/source/deploy_install_guides/{upcoming => r1_release}/figures/starlingx-deployment-options-dedicated-storage.png (100%) rename doc/source/deploy_install_guides/{upcoming => r1_release}/figures/starlingx-deployment-options-duplex-extended.png (100%) rename doc/source/deploy_install_guides/{upcoming => r1_release}/figures/starlingx-deployment-options-duplex.png (100%) rename doc/source/deploy_install_guides/{upcoming => r1_release}/figures/starlingx-deployment-options-simplex.png (100%) rename doc/source/deploy_install_guides/{current => r1_release}/index.rst (88%) rename doc/source/deploy_install_guides/{current => r1_release}/installation_libvirt_qemu.rst (86%) rename doc/source/deploy_install_guides/{current => r1_release}/simplex.rst (97%) delete mode 100644 doc/source/deploy_install_guides/upcoming/aio_duplex.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/aio_simplex.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/controller_storage.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/dedicated_storage.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/deployment_terminology.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/dist_cloud.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/horizon_access.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/installation_libvirt_qemu.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/multi_region.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/options.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/planning.rst delete mode 100644 doc/source/deploy_install_guides/upcoming/uninstalling_deleting_openstack.rst delete mode 100644 doc/source/introduction/box.rst delete mode 100644 doc/source/introduction/concepts.rst create mode 100644 doc/source/introduction/deploy_options.rst delete mode 100644 doc/source/introduction/intro.rst delete mode 100644 doc/source/introduction/kubernetes.rst delete mode 100644 doc/source/introduction/openstack.rst delete mode 100644 doc/source/introduction/roadmap.rst delete mode 100644 doc/source/introduction/software_evaluation.rst create mode 100644 doc/source/introduction/terms.rst delete mode 100644 doc/source/operation_guides/index.rst delete mode 100644 doc/source/operation_guides/latest/ceph_storage_config/index.rst delete mode 100644 doc/source/operation_guides/latest/cli_reference/index.rst delete mode 100644 doc/source/operation_guides/latest/data_network_config/index.rst delete mode 100644 doc/source/operation_guides/latest/fault_management/index.rst delete mode 100644 doc/source/operation_guides/latest/kubernetes_cluster_guide/index.rst delete mode 100644 doc/source/operation_guides/latest/patching_guide/index.rst delete mode 100644 doc/source/operation_guides/latest/sdn_networking/index.rst delete mode 100644 doc/source/operation_guides/latest/swift_config_management/index.rst delete mode 100644 doc/source/operation_guides/latest/upgrade_guide/index.rst create mode 100644 doc/source/releasenotes/r1_release.rst create mode 100644 doc/source/releasenotes/r2_release.rst diff --git a/doc/source/api-ref/index.rst b/doc/source/api-ref/index.rst index b246815e3..8af2aa368 100644 --- a/doc/source/api-ref/index.rst +++ b/doc/source/api-ref/index.rst @@ -4,17 +4,17 @@ REST API Reference StarlingX project REST API references: -* `Bare Metal `__ -* `Configuration `__ -* `Distributed Cloud `__ -* `Fault Management `__ -* `High Availability `__ -* `NFV `__ -* `Software Updates `__ +* `Bare Metal `__ +* `Configuration `__ +* `Distributed Cloud `__ +* `Fault Management `__ +* `High Availability `__ +* `NFV `__ +* `Software Updates `__ StarlingX also has API references for the modified OpenStack APIs: -* `Block Storage v2 `__ -* `Compute v2 `__ -* `Image v2 `__ -* `Network v2 `__ +* `Block Storage v2 `__ +* `Compute v2 `__ +* `Image v2 `__ +* `Network v2 `__ diff --git a/doc/source/contributor/api_contribute_guide.rst b/doc/source/contributor/api_contribute_guide.rst index e71b20892..11cb043eb 100644 --- a/doc/source/contributor/api_contribute_guide.rst +++ b/doc/source/contributor/api_contribute_guide.rst @@ -15,25 +15,25 @@ https://docs.openstack.org/doc-contrib-guide/api-guides.html. StarlingX API Reference documentation exists in the following projects: -- **stx-config:** StarlingX System Configuration Management -- **stx-docs:** StarlingX Documentation +- `starlingx/config`_: StarlingX System Configuration Management +- `starlingx/docs`_: StarlingX Documentation - - **stx-python-cinderclient** // i.e. only StarlingX-specific + - *stx-python-cinderclient* // only StarlingX-specific extensions to Cinder API are documented here - - **stx-nova** // i.e. only StarlingX-specific extensions to Nova + - *stx-nova* // only StarlingX-specific extensions to Nova API are documented here - - **stx-glance** // i.e. only StarlingX-specific extensions to + - *stx-glance* // only StarlingX-specific extensions to Glance API are documented here - - **stx-neutron** // i.e. only StarlingX-specific extensions to + - *stx-neutron* // only StarlingX-specific extensions to Neutron API are documented here -- **stx-distcloud:** StarlingX Distributed Cloud -- **stx-fault:** StarlingX Fault Management -- **stx-ha:** StarlingX High Availability/Process Monitoring/Service +- `starlingx/distcloud`_: StarlingX Distributed Cloud +- `starlingx/fault`_: StarlingX Fault Management +- `starlingx/ha`_: StarlingX High Availability/Process Monitoring/Service Management -- **stx-metal:** StarlingX Bare Metal and Node Management, Hardware +- `starlingx/metal`_: StarlingX Bare Metal and Node Management, Hardware Maintenance -- **stx-nfv:** StarlingX NFVI Orchestration +- `starlingx/nfv`_: StarlingX NFVI Orchestration -------------------- Directory Structures @@ -41,7 +41,7 @@ Directory Structures The directory structure of the API Reference documentation under each StarlingX project repository is fixed. Here is an example showing -**stx-config** StarlingX System Configuration Management +`starlingx/config`_ StarlingX System Configuration Management :: @@ -69,7 +69,7 @@ service in each StarlingX project are as follows: - **tox.ini** modifications to add the configuration to build the API reference locally -See stx-config [Doc] OpenStack API Reference Guide as an example of this +See `starlingx/config`_ [Doc] OpenStack API Reference Guide as an example of this first commit: https://review.openstack.org/#/c/603258/ ---------------------------- @@ -79,7 +79,7 @@ Creating the RST Source File Once the API Documentation service has been enabled, you create the RST source files that document the API operations under the same API Reference documentation project directory. The following shows the RST -source file for the **stx-config** StarlingX System Configuration +source file for the `starlingx/config`_ StarlingX System Configuration Management: Configuration API v1 :: @@ -98,7 +98,7 @@ description of the document, and the table-of-contents structure with depth restrictions. The **index.rst** file resides in the same folder as the RST source file. -Here is an example using the **stx-config** StarlingX System +Here is an example using the `starlingx/config`_ StarlingX System Configuration Management: Configuration API v1: :: @@ -109,7 +109,7 @@ Configuration Management: Configuration API v1: |___index.rst The syntax of the **index.rst** file is fixed. Following shows the -**index.rst** file used in the **stx-config**: +**index.rst** file used in the `starlingx/config`_: :: @@ -330,3 +330,19 @@ the **index.html** file. **NOTE:** The PDF build uses a different tox environment and is currently not supported for StarlingX. + + + + +.. _starlingx/config: https://opendev.org/starlingx/config +.. _starlingx/docs: https://opendev.org/starlingx/docs +.. _starlingx/distcloud: https://opendev.org/starlingx/distcloud +.. _starlingx/fault: https://opendev.org/starlingx/fault +.. _starlingx/ha: https://opendev.org/starlingx/ha +.. _starlingx/metal: https://opendev.org/starlingx/metal +.. _starlingx/nfv: https://opendev.org/starlingx/nfv + +.. _starlingx/tools: https://opendev.org/starlingx/tools +.. _starlingx/update: https://opendev.org/starlingx/update +.. _starlingx/upstream: https://opendev.org/starlingx/upstream + diff --git a/doc/source/contributor/build_guides/current/index.rst b/doc/source/contributor/build_guides/current/index.rst index 8ea3a7c5b..2994e9103 100644 --- a/doc/source/contributor/build_guides/current/index.rst +++ b/doc/source/contributor/build_guides/current/index.rst @@ -1,16 +1,14 @@ -======================= -Build guide stx.2018.10 -======================= +========================== +Build guide StarlingX R2.0 +========================== -This section contains the steps for building a StarlingX ISO from -the "current" StarlingX software (i.e. the most recently released version). +This section describes the steps for building a StarlingX ISO from the R2.0 +StarlingX release. ------------ Requirements ------------ -The recommended minimum requirements include: - ********************* Hardware requirements ********************* @@ -151,23 +149,23 @@ Create a workspace directory $ mkdir -p $HOME/starlingx/ -************************* -Install stx-tools project -************************* +********************* +Install tools project +********************* -#. Under your $HOME directory, clone the project: +#. Under your $HOME directory, clone the project: .. code:: sh $ cd $HOME - $ git clone https://git.starlingx.io/stx-tools + $ git clone https://opendev.org/starlingx/tools.git -#. Navigate to the *<$HOME/stx-tools>* project +#. Navigate to the *<$HOME/tools>* project directory: .. code:: sh - $ cd $HOME/stx-tools/ + $ cd $HOME/tools/ ----------------------------- Prepare the base Docker image @@ -221,7 +219,7 @@ to build the base Docker image. ENV ftp_proxy " http://your.actual_ftp_proxy.com:your_port " RUN echo " proxy=http://your-proxy.com:port " >> /etc/yum.conf -#. The ``tb.sh`` script automates the Base Docker image build: +#. The ``tb.sh`` script automates the base Docker image build: .. code:: sh @@ -231,8 +229,8 @@ to build the base Docker image. Build the CentOS mirror repository ---------------------------------- -The creation of the StarlingX ISO relies on a repository of RPM Binaries, -RPM Sources, and Tar Compressed files. This section describes how to build +The creation of the StarlingX ISO relies on a repository of RPM binaries, +RPM sources, and tar compressed files. This section describes how to build this CentOS mirror repository. ******************************* @@ -241,12 +239,12 @@ Run repository Docker container | Run the following commands under a terminal identified as "**One**": -#. Navigate to the *$HOME/stx-tools/centos-mirror-tool* project +#. Navigate to the *$HOME/tools/centos-mirror-tool* project directory: .. code:: sh - $ cd $HOME/stx-tools/centos-mirror-tools/ + $ cd $HOME/tools/centos-mirror-tools/ #. Launch the Docker container using the previously created base Docker image *:*. As /localdisk is defined as the workdir of the @@ -292,7 +290,7 @@ Verify packages :: # cat logs/*_missing_*.log - # cat logs/*_failmove_*.log + # cat logs/*_failmoved_*.log #. In case missing or failed packages do exist, which is usually caused by network instability (or timeout), you need to download the packages @@ -309,7 +307,7 @@ from downloading the packages: :: - /home//stx-tools/centos-mirror-tools/output + /home//tools/centos-mirror-tools/output └── stx-r1 └── CentOS └── pike @@ -339,13 +337,13 @@ as "**Two**", run the following commands: $ mkdir -p $HOME/starlingx/mirror/CentOS/ -#. Copy the built CentOS Mirror Repository built under - *$HOME/stx-tools/centos-mirror-tool* to the *$HOME/starlingx/mirror/* +#. Copy the built CentOS mirror repository built under + *$HOME/tools/centos-mirror-tool* to the *$HOME/starlingx/mirror/* workspace directory: .. code:: sh - $ cp -r $HOME/stx-tools/centos-mirror-tools/output/stx-r1/ $HOME/starlingx/mirror/CentOS/ + $ cp -r $HOME/tools/centos-mirror-tools/output/stx-r1/ $HOME/starlingx/mirror/CentOS/ ------------------------- @@ -362,11 +360,11 @@ Run building Docker container $ mkdir -p $HOME/starlingx/workspace -#. Navigate to the *$HOME/stx-tools* project directory: +#. Navigate to the *$HOME/tools* project directory: .. code:: sh - $ cd $HOME/stx-tools + $ cd $HOME/tools #. Verify environment variables: @@ -391,20 +389,20 @@ Download source code repositories ********************************* #. From the terminal identified as "**Two**", which is now inside the - Building Docker container, start the internal environment: + building Docker container, start the internal environment: .. code:: sh $ eval $(ssh-agent) $ ssh-add -#. Use the repo tool to create a local clone of the stx-manifest - Git repository based on the "r/2018.10" branch: +#. Use the repo tool to create a local clone of the manifest + Git repository based on the "master" branch: .. code:: sh $ cd $MY_REPO_ROOT_DIR - $ repo init -u https://git.starlingx.io/stx-manifest -m default.xml -b r/2018.10 + $ repo init -u https://opendev.org/starlingx/manifest -m default.xml #. Synchronize the repository: @@ -440,7 +438,8 @@ Download source code repositories Build packages ************** -#. Go back to the terminal identified as "**Two**", which is the Building Docker container. +#. Go back to the terminal identified as "**Two**", which is the building + Docker container. #. **Temporal!** Build-Pkgs Errors. Be prepared to have some missing / corrupted rpm and tarball packages generated during @@ -586,7 +585,7 @@ Limitations Method (in brief) ***************** -#. Reference Builds +#. Reference builds - A server in the regional office performs regular (e.g. daily) automated builds using existing methods. These builds are called @@ -746,8 +745,8 @@ the last full build. Your build script might look like this ... # update software repo init -u ${BUILD_REPO_URL} -b ${BUILD_BRANCH} repo sync --force-sync - $MY_REPO_ROOT_DIR/stx-tools/toCOPY/generate-cgcs-centos-repo.sh - $MY_REPO_ROOT_DIR/stx-tools/toCOPY/populate_downloads.sh + $MY_REPO_ROOT_DIR/tools/toCOPY/generate-cgcs-centos-repo.sh + $MY_REPO_ROOT_DIR/tools/toCOPY/populate_downloads.sh # User can optionally define BUILD_METHOD equal to one of 'FULL', 'AVOIDANCE', or 'AUTO' # Sanitize BUILD_METHOD diff --git a/doc/source/contributor/build_guides/latest/index.rst b/doc/source/contributor/build_guides/r1_release/index.rst similarity index 90% rename from doc/source/contributor/build_guides/latest/index.rst rename to doc/source/contributor/build_guides/r1_release/index.rst index 7577d1711..301231dd8 100644 --- a/doc/source/contributor/build_guides/latest/index.rst +++ b/doc/source/contributor/build_guides/r1_release/index.rst @@ -1,14 +1,16 @@ -======================= -Build guide stx.2019.05 -======================= +========================== +Build guide StarlingX R1.0 +========================== -This section contains the steps for building a StarlingX ISO from -the "latest" StarlingX software (i.e. the "under development" branch). +This section describes the steps for building a StarlingX ISO from the R1.0 +StarlingX release. ------------ Requirements ------------ +The recommended minimum requirements include: + ********************* Hardware requirements ********************* @@ -149,23 +151,23 @@ Create a workspace directory $ mkdir -p $HOME/starlingx/ -********************* -Install tools project -********************* +************************* +Install stx-tools project +************************* -#. Under your $HOME directory, clone the project: +#. Under your $HOME directory, clone the project: .. code:: sh $ cd $HOME - $ git clone https://opendev.org/starlingx/tools.git + $ git clone https://git.starlingx.io/stx-tools -#. Navigate to the *<$HOME/tools>* project +#. Navigate to the *<$HOME/stx-tools>* project directory: .. code:: sh - $ cd $HOME/tools/ + $ cd $HOME/stx-tools/ ----------------------------- Prepare the base Docker image @@ -219,7 +221,7 @@ to build the base Docker image. ENV ftp_proxy " http://your.actual_ftp_proxy.com:your_port " RUN echo " proxy=http://your-proxy.com:port " >> /etc/yum.conf -#. The ``tb.sh`` script automates the base Docker image build: +#. The ``tb.sh`` script automates the Base Docker image build: .. code:: sh @@ -229,8 +231,8 @@ to build the base Docker image. Build the CentOS mirror repository ---------------------------------- -The creation of the StarlingX ISO relies on a repository of RPM binaries, -RPM sources, and tar compressed files. This section describes how to build +The creation of the StarlingX ISO relies on a repository of RPM Binaries, +RPM Sources, and Tar Compressed files. This section describes how to build this CentOS mirror repository. ******************************* @@ -239,12 +241,12 @@ Run repository Docker container | Run the following commands under a terminal identified as "**One**": -#. Navigate to the *$HOME/tools/centos-mirror-tool* project +#. Navigate to the *$HOME/stx-tools/centos-mirror-tool* project directory: .. code:: sh - $ cd $HOME/tools/centos-mirror-tools/ + $ cd $HOME/stx-tools/centos-mirror-tools/ #. Launch the Docker container using the previously created base Docker image *:*. As /localdisk is defined as the workdir of the @@ -290,7 +292,7 @@ Verify packages :: # cat logs/*_missing_*.log - # cat logs/*_failmoved_*.log + # cat logs/*_failmove_*.log #. In case missing or failed packages do exist, which is usually caused by network instability (or timeout), you need to download the packages @@ -307,7 +309,7 @@ from downloading the packages: :: - /home//tools/centos-mirror-tools/output + /home//stx-tools/centos-mirror-tools/output └── stx-r1 └── CentOS └── pike @@ -337,13 +339,13 @@ as "**Two**", run the following commands: $ mkdir -p $HOME/starlingx/mirror/CentOS/ -#. Copy the built CentOS mirror repository built under - *$HOME/tools/centos-mirror-tool* to the *$HOME/starlingx/mirror/* +#. Copy the built CentOS Mirror Repository built under + *$HOME/stx-tools/centos-mirror-tool* to the *$HOME/starlingx/mirror/* workspace directory: .. code:: sh - $ cp -r $HOME/tools/centos-mirror-tools/output/stx-r1/ $HOME/starlingx/mirror/CentOS/ + $ cp -r $HOME/stx-tools/centos-mirror-tools/output/stx-r1/ $HOME/starlingx/mirror/CentOS/ ------------------------- @@ -360,11 +362,11 @@ Run building Docker container $ mkdir -p $HOME/starlingx/workspace -#. Navigate to the *$HOME/tools* project directory: +#. Navigate to the *$HOME/stx-tools* project directory: .. code:: sh - $ cd $HOME/tools + $ cd $HOME/stx-tools #. Verify environment variables: @@ -389,20 +391,20 @@ Download source code repositories ********************************* #. From the terminal identified as "**Two**", which is now inside the - building Docker container, start the internal environment: + Building Docker container, start the internal environment: .. code:: sh $ eval $(ssh-agent) $ ssh-add -#. Use the repo tool to create a local clone of the manifest - Git repository based on the "master" branch: +#. Use the repo tool to create a local clone of the stx-manifest + Git repository based on the "r/2018.10" branch: .. code:: sh $ cd $MY_REPO_ROOT_DIR - $ repo init -u https://opendev.org/starlingx/manifest -m default.xml + $ repo init -u https://git.starlingx.io/stx-manifest -m default.xml -b r/2018.10 #. Synchronize the repository: @@ -438,8 +440,7 @@ Download source code repositories Build packages ************** -#. Go back to the terminal identified as "**Two**", which is the building - Docker container. +#. Go back to the terminal identified as "**Two**", which is the Building Docker container. #. **Temporal!** Build-Pkgs Errors. Be prepared to have some missing / corrupted rpm and tarball packages generated during @@ -585,7 +586,7 @@ Limitations Method (in brief) ***************** -#. Reference builds +#. Reference Builds - A server in the regional office performs regular (e.g. daily) automated builds using existing methods. These builds are called @@ -745,8 +746,8 @@ the last full build. Your build script might look like this ... # update software repo init -u ${BUILD_REPO_URL} -b ${BUILD_BRANCH} repo sync --force-sync - $MY_REPO_ROOT_DIR/tools/toCOPY/generate-cgcs-centos-repo.sh - $MY_REPO_ROOT_DIR/tools/toCOPY/populate_downloads.sh + $MY_REPO_ROOT_DIR/stx-tools/toCOPY/generate-cgcs-centos-repo.sh + $MY_REPO_ROOT_DIR/stx-tools/toCOPY/populate_downloads.sh # User can optionally define BUILD_METHOD equal to one of 'FULL', 'AVOIDANCE', or 'AUTO' # Sanitize BUILD_METHOD diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst index b4ffa1948..950820f35 100644 --- a/doc/source/contributor/index.rst +++ b/doc/source/contributor/index.rst @@ -4,13 +4,12 @@ Contribute ========== -Please use the following guides when contributing to the StarlingX -documentation. Additional information about contributing to -OpenStack documentation can be found in the -`OpenStack API documentation guide`_ and the -`OpenStack Documentation Contributor Guide `_. +The following guides provide detailed instructions on the contribution workflow +and conventions to be considered when contributing to the StarlingX documentation. -.. _`OpenStack API documentation guide`: https://docs.openstack.org/doc-contrib-guide/api-guides.html +--------------------------- +Contribute to documentation +--------------------------- .. toctree:: :maxdepth: 1 @@ -19,39 +18,40 @@ OpenStack documentation can be found in the release_note_contribute_guide doc_contribute_guide -For information on the StarlingX development process, see the following: +------------------- +Development process +------------------- + +For information on the StarlingX development process, refer to the following guide: .. toctree:: :maxdepth: 1 development_process -Build guides for StarlingX are release-specific. -The following list provides help on choosing the correct -build guide based on a release: +--------------- +Build StarlingX +--------------- -- The "current" release is the most recent officially released version of StarlingX. - Following is the current build guide: +For instructions on how to build StarlingX, refer to the following guides (build +guides for StarlingX are release-specific). - .. toctree:: - :maxdepth: 1 +.. toctree:: + :maxdepth: 1 - build_guides/current/index + build_guides/current/index + build_guides/r1_release/index -- The "latest" release is the forthcoming version under development. - Following is the latest build guide: +-------------------- +Additional resources +-------------------- - .. toctree:: - :maxdepth: 1 +Additional information about contributing to OpenStack documentation can be found +in the following guides: - build_guides/latest/index +* `OpenStack API documentation guide`_ +* `OpenStack Documentation Contributor Guide`_ -- The "archived" build guides are as follows: - - * Currently, no archived build guides exist. - - - -.. When a new release of developer guides occurs, follow the steps in the - commented block of the /installation_guide/index.rst file. +.. _`OpenStack Documentation Contributor Guide`: https://docs.openstack.org/doc-contrib-guide/index.html +.. _`OpenStack API documentation guide`: https://docs.openstack.org/doc-contrib-guide/api-guides.html diff --git a/doc/source/contributor/release_note_contribute_guide.rst b/doc/source/contributor/release_note_contribute_guide.rst index 86404b47e..7c5c1f63f 100644 --- a/doc/source/contributor/release_note_contribute_guide.rst +++ b/doc/source/contributor/release_note_contribute_guide.rst @@ -13,27 +13,27 @@ be found at: https://docs.openstack.org/reno Locations --------- -StarlingX Release Notes documentation exists in the following projects: +StarlingX release notes documentation exists in the following projects: -- **stx-clients:** StarlingX Client Libraries -- **stx-config:** StarlingX System Configuration Management -- **stx-distcloud:** StarlingX Distributed Cloud -- **stx-distcloud-client:** StarlingX Distributed Cloud Client -- **stx-fault:** StarlingX Fault Management -- **stx-gui:** StarlingX Horizon plugins for new StarlingX services -- **stx-ha:** StarlingX High Availability/Process Monitoring/Service Management -- **stx-integ:** StarlingX Integration and Packaging -- **stx-metal:** StarlingX Bare Metal and Node Management, Hardware Maintenance -- **stx-nfv:** StarlingX NFVI Orchestration -- **stx-tools:** StarlingX Build Tools -- **stx-update:** StarlingX Installation/Update/Patching/Backup/Restore -- **stx-upstream:** StarlingX Upstream Packaging +- `starlingx/clients`_: StarlingX Client Libraries +- `starlingx/config`_: StarlingX System Configuration Management +- `starlingx/distcloud`_: StarlingX Distributed Cloud +- `starlingx/distcloud-client`_: StarlingX Distributed Cloud Client +- `starlingx/fault`_: StarlingX Fault Management +- `starlingx/gui`_: StarlingX Horizon plugins for new StarlingX services +- `starlingx/ha`_: StarlingX High Availability/Process Monitoring/Service Management +- `starlingx/integ`_: StarlingX Integration and Packaging +- `starlingx/metal`_: StarlingX Bare Metal and Node Management, Hardware Maintenance +- `starlingx/nfv`_: StarlingX NFVI Orchestration +- `starlingx/tools`_: StarlingX Build Tools +- `starlingx/update`_: StarlingX Installation/Update/Patching/Backup/Restore +- `starlingx/upstream`_: StarlingX Upstream Packaging -------------------- Directory Structures -------------------- -The directory structure of Release documentation under each StarlingX project +The directory structure of release documentation under each StarlingX project repository is fixed. Here is an example showing **stx-confi** StarlingX System Configuration Management: @@ -81,7 +81,7 @@ Release Notes Files The following shows the YAML source file for the stx-config StarlingX System Configuration Management: -`Release Summary r/2018.10 `_ +`Release Summary R1.0 `_ :: @@ -202,3 +202,19 @@ Release Team Workflow Git tag. #. Generate the Reno Report. #. Add your change and submit for review. + + + +.. _starlingx/clients: https://opendev.org/starlingx/clients +.. _starlingx/config: https://opendev.org/starlingx/config +.. _starlingx/distcloud: https://opendev.org/starlingx/distcloud +.. _starlingx/distcloud-client: https://opendev.org/starlingx/distcloud-client +.. _starlingx/fault: https://opendev.org/starlingx/fault +.. _starlingx/gui: https://opendev.org/starlingx/gui +.. _starlingx/ha: https://opendev.org/starlingx/ha +.. _starlingx/integ: https://opendev.org/starlingx/integ +.. _starlingx/metal: https://opendev.org/starlingx/metal +.. _starlingx/nfv: https://opendev.org/starlingx/nfv +.. _starlingx/tools: https://opendev.org/starlingx/tools +.. _starlingx/update: https://opendev.org/starlingx/update +.. _starlingx/upstream: https://opendev.org/starlingx/upstream diff --git a/doc/source/deploy_install_guides/bootable_usb.rst b/doc/source/deploy_install_guides/bootable_usb.rst new file mode 100644 index 000000000..fea942804 --- /dev/null +++ b/doc/source/deploy_install_guides/bootable_usb.rst @@ -0,0 +1,143 @@ +=================== +Create Bootable USB +=================== + +Follow the instructions for your system to create a bootable USB with the +StarlingX ISO: + +* :ref:`bootable-usb-linux` +* :ref:`bootable-usb-mac` +* :ref:`bootable-usb-windows` + + +.. _bootable-usb-linux: + +-------------------------------------- +Create a bootable USB drive on Linux\* +-------------------------------------- + +#. Open a terminal and get root privilege: + + :: + + sudo -s + +#. Get the StarlingX ISO. + This can be from a private StarlingX build or from the public Cengn StarlingX + build off the 'master' branch as shown below: + + :: + + wget http://mirror.starlingx.cengn.ca/mirror/starlingx/release/2.0.0/centos/outputs/iso/bootimage.iso + +#. Navigate to the directory with the ISO. + +#. Plug in the USB drive and get its identifier: + + :: + + lsblk + + This will list available disks and their partitions. + +#. Unmount the USB drive before burning an image onto it. (Note that + some Linux distros automatically mount a USB drive when it is plugged in.) + For example: + + :: + + umount /dev/sdd2 + +#. Burn the StarlingX bootimage.iso onto the USB drive: + + :: + + dd if= of= bs=1M status=progress + +.. caution:: + + Not fully unmounting the USB drive before burning an image can cause + file system checksum errors. If this happens, burn the image again, + ensuring all the USB drive partitions are unmounted first. + + +.. _bootable-usb-mac: + +-------------------------------------- +Create a bootable USB drive on macOS\* +-------------------------------------- + +#. Launch the Terminal app. + +#. Get the StarlingX ISO. + This can be from a private StarlingX build or from the public Cengn StarlingX + build off the 'master' branch as shown below: + + :: + + curl -O http://mirror.starlingx.cengn.ca/mirror/starlingx/release/2.0.0/centos/outputs/iso/bootimage.iso + +#. Navigate to the directory with the ISO. + +#. Plug in a USB drive and get its identifier: + + :: + + diskutil list + + This will list available disks and their partitions. + +#. Unmount the USB drive identified in the previous step. For example: + + :: + + diskutil umountDisk /dev/disk2 + +#. Burn the StarlingX bootimage.iso onto the USB drive. + The example below burns an ISO onto ``: + + .. code-block:: bash + + sudo dd if= of= bs=1m + + To speed up the imaging process, add an ‘r’ in front of the disk identifier. + For example `/dev/rdisk2`. + + Press ``-T`` to check imaging progress. + +#. Eject the USB drive. + + .. code-block:: bash + + diskutil eject /dev/disk2 + +.. _bootable-usb-windows: + +---------------------------------------- +Create a bootable USB drive on Windows\* +---------------------------------------- + +#. Get the StarlingX ISO. + This can be from a private StarlingX build or from the public Cengn StarlingX + build off the 'master' branch: + + http://mirror.starlingx.cengn.ca/mirror/starlingx/release/2.0.0/centos/outputs/iso/bootimage.iso + +#. Download the `Rufus`_ utility to burn the image onto a USB drive. + **Only use the latest version of Rufus**. + +#. Plug in the USB drive and open Rufus. + +#. Under `Boot selection`, click the :guilabel:`SELECT` button. + +#. Find and select the StarlingX ISO. + +#. Click the :guilabel:`START` button. + +#. When the dialogue appears, select + :guilabel:`Write in ISO image mode (Recommended)`. + +#. Select the Windows taskbar menu for USB and select eject. + +.. _Rufus: https://rufus.ie/ + diff --git a/doc/source/deploy_install_guides/current/access_starlingx_kubernetes.rst b/doc/source/deploy_install_guides/current/access_starlingx_kubernetes.rst new file mode 100644 index 000000000..9e4434877 --- /dev/null +++ b/doc/source/deploy_install_guides/current/access_starlingx_kubernetes.rst @@ -0,0 +1,167 @@ +=========================== +Access StarlingX Kubernetes +=========================== + +.. contents:: + :local: + :depth: 1 + +---------- +Local CLIs +---------- + +#. Log in to controller-0 via the console or SSH with a sysadmin/. + +#. Acquire Keystone admin and Kubernetes admin credentials: + + :: + + source /etc/platform/openrc + +********************************************* +StarlingX system and host management commands +********************************************* + +Access StarlingX system and host management commands using the :command:`system` command, for +example: + +:: + + [sysadmin@controller-0 ~(keystone_admin)]$ system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + +----+--------------+-------------+----------------+-------------+--------------+ + +Use the :command:`system help` command for the full list of options. + +*********************************** +StarlingX fault management commands +*********************************** + +Access StarlingX fault management commands using the :command:`fm` command, for example: + +:: + + [sysadmin@controller-0 ~(keystone_admin)]$ fm alarm-list + +******************* +Kubernetes commands +******************* + +Access Kubernetes commands using the :command:`kubectl` command, for example: + +:: + + [sysadmin@controller-0 ~(keystone_admin)]$ kubectl get nodes + NAME STATUS ROLES AGE VERSION + controller-0 Ready master 5d19h v1.13.5 + +See https://kubernetes.io/docs/reference/kubectl/overview/ for details. + +----------- +Remote CLIs +----------- + +Documentation coming soon. + +--- +GUI +--- + +********************* +StarlingX Horizon GUI +********************* + +Access the StarlingX Horizon GUI in your browser at the following address: + +:: + + http://:8080 + + +Log in to Horizon with an admin/. + +******************** +Kubernetes dashboard +******************** + +The Kubernetes dashboard is not installed by default. + +To install the Kubernetes dashboard: + +#. Use the kubernetes-dashboard helm chart from the stable helm repository with + the override values shown below: + + :: + + cat < dashboard-values.yaml + service: + type: NodePort + nodePort: 30000 + + rbac: + create: true + clusterAdminRole: true + + serviceAccount: + create: true + name: kubernetes-dashboard + EOF + + helm helm repo update + + helm install stable/kubernetes-dashboard --name dashboard -f dashboard-values.yaml + +#. Create an ``admin-user`` service account with ``cluster-admin`` privileges, and + display its token for logging into the Kubernetes dashboard. + + :: + + cat < admin-login.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: admin-user + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: admin-user + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: admin-user + namespace: kube-system + EOF + + kubectl apply -f admin-login.yaml + + kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') + +#. Access the Kubernetes dashboard GUI in your browser at the following address: + + :: + + https://:30000 + +#. Log in with the ``admin-user`` TOKEN. + +--------- +REST APIs +--------- + +List the StarlingX platform-related public REST API endpoints using the +following command: + +:: + + openstack endpoint list | grep public + +Use these URLs as the prefix for the URL target of StarlingX Platform Services' +REST API messages. diff --git a/doc/source/deploy_install_guides/current/access_starlingx_openstack.rst b/doc/source/deploy_install_guides/current/access_starlingx_openstack.rst new file mode 100644 index 000000000..575e41f26 --- /dev/null +++ b/doc/source/deploy_install_guides/current/access_starlingx_openstack.rst @@ -0,0 +1,75 @@ +========================== +Access StarlingX OpenStack +========================== + +.. contents:: + :local: + :depth: 1 + +---------- +Local CLIs +---------- + +#. Log in to controller-0 via the console or SSH with a sysadmin/. + *Do not use* source /etc/platform/openrc . + +#. Set the CLI context to the StarlingX OpenStack Cloud Application and set up + OpenStack admin credentials: + + :: + + sudo su - + mkdir -p /etc/openstack + tee /etc/openstack/clouds.yaml << EOF + clouds: + openstack_helm: + region_name: RegionOne + identity_api_version: 3 + endpoint_type: internalURL + auth: + username: 'admin' + password: '' + project_name: 'admin' + project_domain_name: 'default' + user_domain_name: 'default' + auth_url: 'http://keystone.openstack.svc.cluster.local/v3' + EOF + exit + + export OS_CLOUD=openstack_helm + +********************** +OpenStack CLI commands +********************** + +Access OpenStack CLI commands for the StarlingX OpenStack Cloud Application using the +:command:`openstack` command, for example: + +:: + + [sysadmin@controller-0 ~(keystone_admin)]$ openstack flavor list + [sysadmin@controller-0 ~(keystone_admin)]$ openstack image list + +----------- +Remote CLIs +----------- + +Documentation coming soon. + +--- +GUI +--- + +Access the StarlingX Containerized OpenStack Horizon GUI in your browser at the following address: + +:: + + http://:31000 + +Log in to the Containerized OpenStack Horizon GUI with an admin/. + +--------- +REST APIs +--------- + +Documentation coming soon. \ No newline at end of file diff --git a/doc/source/deploy_install_guides/current/bare_metal_aio_duplex.rst b/doc/source/deploy_install_guides/current/bare_metal_aio_duplex.rst new file mode 100644 index 000000000..5cfb120cb --- /dev/null +++ b/doc/source/deploy_install_guides/current/bare_metal_aio_duplex.rst @@ -0,0 +1,511 @@ +================================= +Bare metal All-in-one Duplex R2.0 +================================= + +.. contents:: + :local: + :depth: 1 + +----------- +Description +----------- + +.. include:: virtual_aio_duplex.rst + :start-after: incl-aio-duplex-intro-start: + :end-before: incl-aio-duplex-intro-end: + +.. include:: virtual_aio_simplex.rst + :start-after: incl-ipv6-note-start: + :end-before: incl-ipv6-note-end: + +--------------------- +Hardware requirements +--------------------- + +The recommended minimum requirements for the Bare Metal Servers for the various +host types are: + ++-------------------------+-----------------------------------------------------------+ +| Minimum Requirement | All-in-one Controller Node | ++=========================+===========================================================+ +| Number of Servers | 2 | ++-------------------------+-----------------------------------------------------------+ +| Minimum Processor Class | - Dual-CPU Intel® Xeon® E5 26xx Family (SandyBridge) | +| | 8 cores/socket | +| | | +| | or | +| | | +| | - Single-CPU Intel® Xeon® D-15xx family, 8 cores | +| | (low-power/low-cost option) | ++-------------------------+-----------------------------------------------------------+ +| Minimum Memory | 64 GB | ++-------------------------+-----------------------------------------------------------+ +| Primary Disk | 500 GB SDD or NVMe | ++-------------------------+-----------------------------------------------------------+ +| Additional Disks | - 1 or more 500 GB (min. 10K RPM) for Ceph OSD | +| | - Recommended, but not required: 1 or more SSDs or NVMe | +| | drives for Ceph journals (min. 1024 MiB per OSD journal)| +| | - For OpenStack, recommend 1 or more 500 GB (min. 10K RPM)| +| | for VM local ephemeral storage | ++-------------------------+-----------------------------------------------------------+ +| Minimum Network Ports | - Mgmt/Cluster: 1x10GE | +| | - OAM: 1x1GE | +| | - Data: 1 or more x 10GE | ++-------------------------+-----------------------------------------------------------+ +| BIOS Settings | - Hyper-Threading technology enabled | +| | - Virtualization technology enabled | +| | - VT for directed I/O enabled | +| | - CPU power and performance policy set to performance | +| | - CPU C state control disabled | +| | - Plug & play BMC detection disabled | ++-------------------------+-----------------------------------------------------------+ + +--------------- +Prepare Servers +--------------- + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-prepare-servers-start: + :end-before: incl-prepare-servers-end: + +-------------------- +StarlingX Kubernetes +-------------------- + +******************************* +Installing StarlingX Kubernetes +******************************* + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Create a bootable USB with the StarlingX ISO +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Create a bootable USB with the StarlingX ISO. + +Refer to :doc:`/deploy_install_guides/bootable_usb` for instructions on how to +create a bootable USB on your system. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-install-software-controller-0-start: + :end-before: incl-install-software-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Bootstrap system on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-bootstrap-sys-controller-0-start: + :end-before: incl-bootstrap-sys-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-0 +^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-config-controller-0-start: + :end-before: incl-config-controller-0-end: + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-0 +^^^^^^^^^^^^^^^^^^^ + +.. incl-unlock-controller-0-start: + +Unlock controller-0 in order to bring it into service: + +:: + + system host-unlock controller-0 + +Controller-0 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +.. incl-unlock-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-1 node +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Power on the controller-1 server and force it to network boot with the + appropriate BIOS boot options for your particular server. + +#. As controller-1 boots, a message appears on its console instructing you to + configure the personality of the node. + +#. On the console of controller-0, list hosts to see newly discovered controller-1 + host, that is, host with hostname of None: + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | None | None | locked | disabled | offline | + +----+--------------+-------------+----------------+-------------+--------------+ + +#. Using the host id, set the personality of this host to 'controller': + + :: + + system host-update 2 personality=controller + +#. Wait for the software installation on controller-1 to complete, for controller-1 to + reboot, and for controller-1 to show as locked/disabled/online in 'system host-list'. + + This can take 5-10 minutes, depending on the performance of the host machine. + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | controller-1 | controller | locked | disabled | online | + +----+--------------+-------------+----------------+-------------+--------------+ + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-1 +^^^^^^^^^^^^^^^^^^^^^^ + +#. Configure the OAM and MGMT interfaces of controller-1 and specify the + attached networks. Use the OAM and MGMT port names, e.g. eth0, applicable to + your deployment environment: + + (Note that the MGMT interface is partially set up automatically by the network + install procedure.) + + :: + + OAM_IF= + MGMT_IF= + system host-if-modify controller-1 $OAM_IF -c platform + system interface-network-assign controller-1 $OAM_IF oam + system interface-network-assign controller-1 $MGMT_IF cluster-host + +#. Configure data interfaces for controller-1. Use the DATA port names, e.g. + eth0, applicable to your deployment environment. + + .. note:: + + This step is **required** for OpenStack and optional for Kubernetes. For + example, do this step if using SRIOV network attachments in application + containers. + + For Kubernetes SRIOV network attachments: + + * Configure the SRIOV device plugin: + + :: + + system host-label-assign controller-1 sriovdp=enabled + + * If planning on running DPDK in containers on this host, configure the number + of 1G Huge pages required on both NUMA nodes: + + :: + + system host-memory-modify controller-1 0 -1G 100 + system host-memory-modify controller-1 1 -1G 100 + + + For both Kubernetes and OpenStack: + + :: + + DATA0IF= + DATA1IF= + export COMPUTE=controller-1 + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + +#. Add an OSD on controller-1 for ceph: + + :: + + echo ">>> Add OSDs to primary tier" + system host-disk-list controller-1 + system host-disk-list controller-1 | awk '/\/dev\/sdb/{print $2}' | xargs -i system host-stor-add controller-1 {} + system host-stor-list controller-1 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to controller-1 in + support of installing the stx-openstack manifest and helm-charts later. + + :: + + system host-label-assign controller-1 openstack-control-plane=enabled + system host-label-assign controller-1 openstack-compute-node=enabled + system host-label-assign controller-1 openvswitch=enabled + system host-label-assign controller-1 sriov=enabled + +#. **For OpenStack only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks. + + :: + + export COMPUTE=controller-1 + + echo ">>> Getting root disk info" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + echo "Root disk: $ROOT_DISK, UUID: $ROOT_DISK_UUID" + + echo ">>>> Configuring nova-local" + NOVA_SIZE=34 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${NOVA_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + sleep 2 + + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-1 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-1 in order to bring it into service: + +:: + + system host-unlock controller-1 + +Controller-1 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +When it completes, your Kubernetes cluster is up and running. + +*************************** +Access StarlingX Kubernetes +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-kubernetes-start: + :end-before: incl-access-starlingx-kubernetes-end: + +------------------- +StarlingX OpenStack +------------------- + +*************************** +Install StarlingX OpenStack +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-install-starlingx-openstack-start: + :end-before: incl-install-starlingx-openstack-end: + +************************** +Access StarlingX OpenStack +************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-openstack-start: + :end-before: incl-access-starlingx-openstack-end: + +***************************** +Uninstall StarlingX OpenStack +***************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-uninstall-starlingx-openstack-start: + :end-before: incl-uninstall-starlingx-openstack-end: + +---------------------------------------------- +Extending capacity with worker / compute nodes +---------------------------------------------- + +************************************************** +Install software on controller-1 and compute nodes +************************************************** + +#. Power on the compute servers and force them to network boot with the + appropriate BIOS boot options for your particular server. + +#. As the compute servers boot, a message appears on their console instructing + you to configure the personality of the node. + +#. On the console of controller-0, list hosts to see newly discovered compute hosts, + that is, hosts with hostname of None: + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | controller-0 | controller | unlocked | enabled | available | + | 3 | None | None | locked | disabled | offline | + | 4 | None | None | locked | disabled | offline | + +----+--------------+-------------+----------------+-------------+--------------+ + +#. Using the host id, set the personality of this host to 'controller': + + :: + + system host-update 3 personality=worker hostname=compute-0 + system host-update 4 personality=worker hostname=compute-1 + + This initiates the install of software on compute nodes. + This can take 5-10 minutes, depending on the performance of the host machine. + +#. Wait for the install of software on the computes to complete, the computes to + reboot and to both show as locked/disabled/online in 'system host-list'. + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | controller-1 | controller | unlocked | enabled | available | + | 3 | compute-0 | compute | locked | disabled | online | + | 4 | compute-1 | compute | locked | disabled | online | + +----+--------------+-------------+----------------+-------------+--------------+ + +*********************** +Configure compute nodes +*********************** + +#. Assign the cluster-host network to the MGMT interface for the compute nodes: + + (Note that the MGMT interfaces are partially set up automatically by the + network install procedure.) + + :: + + for COMPUTE in compute-0 compute-1; do + system interface-network-assign $COMPUTE mgmt0 cluster-host + done + +#. Configure data interfaces for compute nodes. Use the DATA port names, e.g. + eth0, applicable to your deployment environment. + + :: + + DATA0IF= + DATA1IF= + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + + # configure the datanetworks in sysinv, prior to referencing it + # in the ``system host-if-modify`` command'. + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + for COMPUTE in compute-0 compute-1; do + echo "Configuring interface for: $COMPUTE" + set -ex + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + set +ex + done + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +OpenStack-specific host configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to the compute nodes in + support of installing the stx-openstack manifest and helm-charts later. + + :: + + for NODE in compute-0 compute-1; do + system host-label-assign $NODE openstack-compute-node=enabled + system host-label-assign $NODE openvswitch=enabled + system host-label-assign $NODE sriov=enabled + done + +#. **For OpenStack only:** Setup disk partition for nova-local volume group, + needed for stx-openstack nova ephemeral disks. + + :: + + for COMPUTE in compute-0 compute-1; do + echo "Configuring Nova local for: $COMPUTE" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + PARTITION_SIZE=10 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${PARTITION_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + done + + for COMPUTE in compute-0 compute-1; do + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + done + +******************** +Unlock compute nodes +******************** + +Unlock compute nodes in order to bring them into service: + +:: + + for COMPUTE in compute-0 compute-1; do + system host-unlock $COMPUTE + done + +The compute nodes will reboot to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + diff --git a/doc/source/deploy_install_guides/current/bare_metal_aio_simplex.rst b/doc/source/deploy_install_guides/current/bare_metal_aio_simplex.rst new file mode 100644 index 000000000..adc496318 --- /dev/null +++ b/doc/source/deploy_install_guides/current/bare_metal_aio_simplex.rst @@ -0,0 +1,486 @@ +================================== +Bare metal All-in-one Simplex R2.0 +================================== + +.. contents:: + :local: + :depth: 1 + +----------- +Description +----------- + +.. include:: virtual_aio_simplex.rst + :start-after: incl-aio-simplex-intro-start: + :end-before: incl-aio-simplex-intro-end: + +.. include:: virtual_aio_simplex.rst + :start-after: incl-ipv6-note-start: + :end-before: incl-ipv6-note-end: + +--------------------- +Hardware requirements +--------------------- + +The recommended minimum requirements for the bare metal servers for the various +host types are: + ++-------------------------+-----------------------------------------------------------+ +| Minimum Requirement | All-in-one Controller Node | ++=========================+===========================================================+ +| Number of Servers | 1 | ++-------------------------+-----------------------------------------------------------+ +| Minimum Processor Class | - Dual-CPU Intel® Xeon® E5 26xx Family (SandyBridge) | +| | 8 cores/socket | +| | | +| | or | +| | | +| | - Single-CPU Intel® Xeon® D-15xx Family, 8 cores | +| | (low-power/low-cost option) | ++-------------------------+-----------------------------------------------------------+ +| Minimum Memory | 64 GB | ++-------------------------+-----------------------------------------------------------+ +| Primary Disk | 500 GB SDD or NVMe | ++-------------------------+-----------------------------------------------------------+ +| Additional Disks | - 1 or more 500 GB (min. 10K RPM) for Ceph OSD | +| | - Recommended, but not required: 1 or more SSDs or NVMe | +| | drives for Ceph journals (min. 1024 MiB per OSD | +| | journal) | +| | - For OpenStack, recommend 1 or more 500 GB (min. 10K | +| | RPM) | +| | for VM local ephemeral storage | ++-------------------------+-----------------------------------------------------------+ +| Minimum Network Ports | - OAM: 1x1GE | +| | - Data: 1 or more x 10GE | ++-------------------------+-----------------------------------------------------------+ +| BIOS Settings | - Hyper-Threading technology enabled | +| | - Virtualization technology enabled | +| | - VT for directed I/O enabled | +| | - CPU power and performance policy set to performance | +| | - CPU C state control disabled | +| | - Plug & play BMC detection disabled | ++-------------------------+-----------------------------------------------------------+ + +--------------------- +Preparing the servers +--------------------- + +.. incl-prepare-servers-start: + +Prior to starting the StarlingX installation, the bare metal servers must be in the +following condition: + +* Physically installed + +* Cabled for power + +* Cabled for networking + + * Far-end switch ports should be properly configured to realize the networking + shown in Figure 1. + +* All disks wiped + + * Ensures that servers will boot from either the network or USB storage (if present) + +* Powered off + +.. incl-prepare-servers-end: + +-------------------- +StarlingX Kubernetes +-------------------- + +******************************* +Installing StarlingX Kubernetes +******************************* + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Create a bootable USB with the StarlingX ISO +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Create a bootable USB with the StarlingX ISO. + +Refer to :doc:`/deploy_install_guides/bootable_usb` for instructions on how to +create a bootable USB on your system. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. incl-install-software-controller-0-start: + +#. Insert the bootable USB into a bootable USB port on the host you are + configuring as controller-0. + +#. Power on the host. + +#. Attach to a console, ensure the host boots from the USB, and wait for the + StarlingX Installer Menus. + +#. Make the following menu selections in the installer: + + #. First menu: Select 'Standard Controller Configuration' + #. Second menu: Select 'Graphical Console' or 'Textual Console' depending on + your terminal access to the console port + #. Third menu: Select 'Standard Security Profile' + +#. Wait for non-interactive install of software to complete and server to reboot. + This can take 5-10 minutes, depending on the performance of the server. + +.. incl-install-software-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Bootstrap system on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. incl-bootstrap-sys-controller-0-start: + +#. Login using the username / password of "sysadmin" / "sysadmin". + When logging in for the first time, you will be forced to change the password. + + :: + + Login: sysadmin + Password: + Changing password for sysadmin. + (current) UNIX Password: sysadmin + New Password: + (repeat) New Password: + +#. External connectivity is required to run the Ansible bootstrap playbook. The + StarlingX boot image will DHCP out all interfaces so the server may have obtained an IP address + and have external IP connectivity if a DHCP server is present in your environment. + Verify this using the :command:`ip add` and :command:`ping 8.8.8.8` commands. + + Otherwise, manually configure an IP address and default IP route. Use the + PORT, IP-ADDRESS/SUBNET-LENGTH and GATEWAY-IP-ADDRESS applicable to your + deployment environment. + + :: + + sudo ip address add / dev + sudo ip link set up dev + sudo ip route add default via dev + ping 8.8.8.8 + +#. Specify user configuration overrides for the Ansible bootstrap playbook. + + Ansible is used to bootstrap StarlingX on controller-0: + + * The default Ansible inventory file, ``/etc/ansible/hosts``, contains a single + host: localhost. + * The Ansible bootstrap playbook is at: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml`` + * The default configuration values for the bootstrap playbook are in: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/host_vars/default.yml`` + * By default Ansible looks for and imports user configuration override files + for hosts in the sysadmin home directory ($HOME), for example: ``OME/.yml`` + + Specify the user configuration override file for the ansible bootstrap + playbook, by either: + + * Copying the default.yml file listed above to ``$HOME/localhost.yml`` and editing + the configurable values as desired, based on the commented instructions in + the file. + + or + + * Creating the minimal user configuration override file as shown in the + example below, using the OAM IP SUBNET and IP ADDRESSing applicable to your + deployment environment: + + :: + + cd ~ + cat < localhost.yml + system_mode: standard + + dns_servers: + - 8.8.8.8 + - 8.8.4.4 + + external_oam_subnet: / + external_oam_gateway_address: + external_oam_floating_address: + external_oam_node_0_address: + external_oam_node_1_address: + + admin_username: admin + admin_password: + ansible_become_pass: + EOF + + If you are using IPv6, provide IPv6 configuration overrides. Note that all + addressing, except pxeboot_subnet, should be updated to IPv6 addressing. + Example IPv6 override values are shown below: + + :: + + dns_servers: + ‐ 2001:4860:4860::8888 + ‐ 2001:4860:4860::8844 + pxeboot_subnet: 169.254.202.0/24 + management_subnet: 2001:db8:2::/64 + cluster_host_subnet: 2001:db8:3::/64 + cluster_pod_subnet: 2001:db8:4::/64 + cluster_service_subnet: 2001:db8:4::/112 + external_oam_subnet: 2001:db8:1::/64 + external_oam_gateway_address: 2001:db8::1 + external_oam_floating_address: 2001:db8::2 + external_oam_node_0_address: 2001:db8::3 + external_oam_node_1_address: 2001:db8::4 + management_multicast_subnet: ff08::1:1:0/124 + + Note that the external_oam_node_0_address, and external_oam_node_1_address + parameters are not required for the AIO‐SX installation. + +#. Run the Ansible bootstrap playbook: + + :: + + ansible-playbook /usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml + + Wait for Ansible bootstrap playbook to complete. + This can take 5-10 minutes, depending on the performance of the host machine. + +.. incl-bootstrap-sys-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-0 +^^^^^^^^^^^^^^^^^^^^^^ + +.. incl-config-controller-0-start: + +#. Acquire admin credentials: + + :: + + source /etc/platform/openrc + +#. Configure the OAM and MGMT interfaces of controller-0 and specify the attached + networks. Use the OAM and MGMT port names, e.g. eth0, applicable to your + deployment environment. + + :: + + OAM_IF= + MGMT_IF= + system host-if-modify controller-0 lo -c none + IFNET_UUIDS=$(system interface-network-list controller-0 | awk '{if ($6=="lo") print $4;}') + for UUID in $IFNET_UUIDS; do + system interface-network-remove ${UUID} + done + system host-if-modify controller-0 $OAM_IF -c platform + system interface-network-assign controller-0 $OAM_IF oam + system host-if-modify controller-0 $MGMT_IF -c platform + system interface-network-assign controller-0 $MGMT_IF mgmt + system interface-network-assign controller-0 $MGMT_IF cluster-host + +#. Configure NTP Servers for network time synchronization: + + :: + + system ntp-modify ntpservers=0.pool.ntp.org,1.pool.ntp.org + +#. Configure data interfaces for controller-0. Use the DATA port names, e.g. + eth0, applicable to your deployment environment. + + .. note:: + + This step is **required** for OpenStack and optional for Kubernetes. For + example, do this step if using SRIOV network attachments in application + containers. + + For Kubernetes SRIOV network attachments: + + * Configure the SRIOV device plugin + + :: + + system host-label-assign controller-0 sriovdp=enabled + + * If planning on running DPDK in containers on this host, configure the number + of 1G Huge pages required on both NUMA nodes. + + :: + + system host-memory-modify controller-0 0 -1G 100 + system host-memory-modify controller-0 1 -1G 100 + + For both Kubernetes and OpenStack: + + :: + + DATA0IF= + DATA1IF= + export COMPUTE=controller-0 + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + +#. Add an OSD on controller-0 for ceph: + + :: + + echo ">>> Add OSDs to primary tier" + system host-disk-list controller-0 + system host-disk-list controller-0 | awk '/\/dev\/sdb/{print $2}' | xargs -i system host-stor-add controller-0 {} + system host-stor-list controller-0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to controller-0 in + support of installing the stx-openstack manifest and helm-charts later. + + :: + + system host-label-assign controller-0 openstack-control-plane=enabled + system host-label-assign controller-0 openstack-compute-node=enabled + system host-label-assign controller-0 openvswitch=enabled + system host-label-assign controller-0 sriov=enabled + +#. **For OpenStack only:** Configure the system setting for the vSwitch. + + StarlingX has OVS (kernel-based) vSwitch configured as default: + + * Running in a container; defined within the helm charts of stx-openstack + manifest. + * Shares the core(s) assigned to the platform. + + If you require better performance, OVS-DPDK should be used: + + * Running directly on the host (i.e. NOT containerized). + * Requires that at least 1 core be assigned/dedicated to the vSwitch function. + + To deploy the default containerized OVS: + + :: + + system modify --vswitch_type none + + Do not run any vSwitch directly on the host, instead, use the containerized + OVS defined in the helm charts of stx-openstack manifest. + + To deploy OVS-DPDK (OVS with the Data Plane Development Kit, which is + supported only on bare metal hardware, run the following command: + + :: + + system modify --vswitch_type ovs-dpdk + system host-cpu-modify -f vswitch -p0 1 controller-0 + + Once vswitch_type is set to OVS-DPDK, any subsequent nodes created will + default to automatically assigning 1 vSwitch core for AIO controllers and 2 + vSwitch cores for computes. + + When using OVS-DPDK, virtual machines must be configured to use a flavor with + property: hw:mem_page_size=large + + .. note:: + + After controller-0 is unlocked, changing vswitch_type would require + locking and unlocking all computes (and/or AIO Controllers) in order to + apply the change. + +#. **For OpenStack only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks. + + :: + + export COMPUTE=controller-0 + + echo ">>> Getting root disk info" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + echo "Root disk: $ROOT_DISK, UUID: $ROOT_DISK_UUID" + + echo ">>>> Configuring nova-local" + NOVA_SIZE=34 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${NOVA_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + sleep 2 + + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + +.. incl-config-controller-0-end: + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-0 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-0 in order to bring it into service: + +:: + + system host-unlock controller-0 + +Controller-0 will reboot in order to apply configuration change and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +When it completes, your Kubernetes cluster is up and running. + +*************************** +Access StarlingX Kubernetes +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-kubernetes-start: + :end-before: incl-access-starlingx-kubernetes-end: + +------------------- +StarlingX OpenStack +------------------- + +*************************** +Install StarlingX OpenStack +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-install-starlingx-openstack-start: + :end-before: incl-install-starlingx-openstack-end: + +************************** +Access StarlingX OpenStack +************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-openstack-start: + :end-before: incl-access-starlingx-openstack-end: + +***************************** +Uninstall StarlingX OpenStack +***************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-uninstall-starlingx-openstack-start: + :end-before: incl-uninstall-starlingx-openstack-end: \ No newline at end of file diff --git a/doc/source/deploy_install_guides/current/bare_metal_controller_storage.rst b/doc/source/deploy_install_guides/current/bare_metal_controller_storage.rst new file mode 100644 index 000000000..f53f38b32 --- /dev/null +++ b/doc/source/deploy_install_guides/current/bare_metal_controller_storage.rst @@ -0,0 +1,547 @@ +================================================ +Bare metal Standard with Controller Storage R2.0 +================================================ + +.. contents:: + :local: + :depth: 1 + +----------- +Description +----------- + +.. include:: virtual_controller_storage.rst + :start-after: incl-controller-storage-intro-start: + :end-before: incl-controller-storage-intro-end: + +.. include:: virtual_aio_simplex.rst + :start-after: incl-ipv6-note-start: + :end-before: incl-ipv6-note-end: + +--------------------- +Hardware requirements +--------------------- + +The recommended minimum requirements for the bare metal servers for the various +host types are: + ++-------------------------+-----------------------------+-----------------------------+ +| Minimum Requirement | Controller Node | Compute Node | ++=========================+=============================+=============================+ +| Number of Servers | 2 | 2-10 | ++-------------------------+-----------------------------+-----------------------------+ +| Minimum Processor Class | - Dual-CPU Intel® Xeon® E5 26xx family (SandyBridge) | +| | 8 cores/socket | ++-------------------------+-----------------------------+-----------------------------+ +| Minimum Memory | 64 GB | 32 GB | ++-------------------------+-----------------------------+-----------------------------+ +| Primary Disk | 500 GB SDD or NVMe | 120 GB (Minimum 10k RPM) | ++-------------------------+-----------------------------+-----------------------------+ +| Additional Disks | - 1 or more 500 GB (min. | - For OpenStack, recommend | +| | 10K RPM) for Ceph OSD | 1 or more 500 GB (min. | +| | - Recommended, but not | 10K RPM) for VM local | +| | required: 1 or more SSDs | ephemeral storage | +| | or NVMe drives for Ceph | | +| | journals (min. 1024 MiB | | +| | per OSD journal) | | ++-------------------------+-----------------------------+-----------------------------+ +| Minimum Network Ports | - Mgmt/Cluster: 1x10GE | - Mgmt/Cluster: 1x10GE | +| | - OAM: 1x1GE | - Data: 1 or more x 10GE | ++-------------------------+-----------------------------+-----------------------------+ +| BIOS Settings | - Hyper-Threading technology enabled | +| | - Virtualization technology enabled | +| | - VT for directed I/O enabled | +| | - CPU power and performance policy set to performance | +| | - CPU C state control disabled | +| | - Plug & play BMC detection disabled | ++-------------------------+-----------------------------+-----------------------------+ + +--------------- +Prepare Servers +--------------- + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-prepare-servers-start: + :end-before: incl-prepare-servers-end: + +-------------------- +StarlingX Kubernetes +-------------------- + +******************************* +Installing StarlingX Kubernetes +******************************* + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Create a bootable USB with the StarlingX ISO +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Create a bootable USB with the StarlingX ISO. + +Refer to :doc:`/deploy_install_guides/bootable_usb` for instructions on how to +create a bootable USB on your system. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-install-software-controller-0-start: + :end-before: incl-install-software-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Bootstrap system on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-bootstrap-sys-controller-0-start: + :end-before: incl-bootstrap-sys-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-0 +^^^^^^^^^^^^^^^^^^^^^^ + +#. Acquire admin credentials: + + :: + + source /etc/platform/openrc + +#. Configure the OAM and MGMT interfaces of controller-0 and specify the + attached networks. Use the OAM and MGMT port names, e.g. eth0, applicable to + your deployment environment. + + :: + + OAM_IF= + MGMT_IF= + system host-if-modify controller-0 lo -c none + IFNET_UUIDS=$(system interface-network-list controller-0 | awk '{if ($6=="lo") print $4;}') + for UUID in $IFNET_UUIDS; do + system interface-network-remove ${UUID} + done + system host-if-modify controller-0 $OAM_IF -c platform + system interface-network-assign controller-0 $OAM_IF oam + system host-if-modify controller-0 $MGMT_IF -c platform + system interface-network-assign controller-0 $MGMT_IF mgmt + system interface-network-assign controller-0 $MGMT_IF cluster-host + +#. Configure NTP Servers for network time synchronization: + + :: + + system ntp-modify ntpservers=0.pool.ntp.org,1.pool.ntp.org + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to controller-0 in + support of installing the stx-openstack manifest and helm-charts later. + + :: + + system host-label-assign controller-0 openstack-control-plane=enabled + +#. **For OpenStack only:** Configure the system setting for the vSwitch. + + StarlingX has OVS (kernel-based) vSwitch configured as default: + + * Runs in a container; defined within the helm charts of stx-openstack + manifest. + * Shares the core(s) assigned to the platform. + + If you require better performance, OVS-DPDK should be used: + + * Runs directly on the host (it is NOT containerized). + * Requires that at least 1 core be assigned/dedicated to the vSwitch function. + + To deploy the default containerized OVS: + + :: + + system modify --vswitch_type none + + Do not run any vSwitch directly on the host, instead, use the containerized + OVS defined in the helm charts of stx-openstack manifest. + + To deploy OVS-DPDK (OVS with the Data Plane Development Kit, which is + supported only on bare metal hardware), run the following command: + + :: + + system modify --vswitch_type ovs-dpdk + system host-cpu-modify -f vswitch -p0 1 controller-0 + + Once vswitch_type is set to OVS-DPDK, any subsequent nodes created will + default to automatically assigning 1 vSwitch core for AIO controllers and 2 + vSwitch cores for computes. + + When using OVS-DPDK, Virtual Machines must be configured to use a flavor with + property: hw:mem_page_size=large. + + .. note:: + + After controller-0 is unlocked, changing vswitch_type requires + locking and unlocking all computes (and/or AIO controllers) to + apply the change. + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-0 +^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_duplex.rst + :start-after: incl-unlock-controller-0-start: + :end-before: incl-unlock-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-1 and compute nodes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Power on the controller-1 server and force it to network boot with the + appropriate BIOS boot options for your particular server. + +#. As controller-1 boots, a message appears on its console instructing you to + configure the personality of the node. + +#. On the console of controller-0, list hosts to see newly discovered controller-1 + host, that is, host with hostname of None: + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | None | None | locked | disabled | offline | + +----+--------------+-------------+----------------+-------------+--------------+ + +#. Using the host id, set the personality of this host to 'controller': + + :: + + system host-update 2 personality=controller + + This initiates the install of software on controller-1. + This can take 5-10 minutes, depending on the performance of the host machine. + +#. While waiting, repeat the same procedure for compute-0 server and + compute-1 server, except for setting the personality to 'worker' and + assigning a unique hostname, as shown below: + + :: + + system host-update 3 personality=worker hostname=compute-0 + system host-update 4 personality=worker hostname=compute-1 + +#. Wait for the software installation on controller-1, compute-0, and compute-1 to + complete, for all servers to reboot, and for to all show as locked/disabled/online in + 'system host-list'. + + :: + + system host-list + + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | controller-1 | controller | locked | disabled | online | + | 3 | compute-0 | compute | locked | disabled | online | + | 4 | compute-1 | compute | locked | disabled | online | + +----+--------------+-------------+----------------+-------------+--------------+ + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-1 +^^^^^^^^^^^^^^^^^^^^^^ + +.. incl-config-controller-1-start: + +Configure the OAM and MGMT interfaces of controller-0 and specify the attached +networks. Use the OAM and MGMT port names, e.g. eth0, applicable to your +deployment environment. + +(Note that the MGMT interface is partially set up automatically by the network +install procedure.) + +:: + + OAM_IF= + MGMT_IF= + system host-if-modify controller-1 $OAM_IF -c platform + system interface-network-assign controller-1 $OAM_IF oam + system interface-network-assign controller-1 $MGMT_IF cluster-host + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +**For OpenStack only:** Assign OpenStack host labels to controller-1 in support +of installing the stx-openstack manifest and helm-charts later. + +:: + + system host-label-assign controller-1 openstack-control-plane=enabled + +.. incl-config-controller-1-end: + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-1 +^^^^^^^^^^^^^^^^^^^ + +.. incl-unlock-controller-1-start: + +Unlock controller-1 in order to bring it into service: + +:: + + system host-unlock controller-1 + +Controller-1 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +.. incl-unlock-controller-1-end: + +^^^^^^^^^^^^^^^^^^^^^^^ +Configure compute nodes +^^^^^^^^^^^^^^^^^^^^^^^ + +#. Add the third Ceph monitor to compute-0: + + (The first two Ceph monitors are automatically assigned to controller-0 and + controller-1.) + + :: + + system ceph-mon-add compute-0 + +#. Wait for the compute node monitor to complete configuration: + + :: + + system ceph-mon-list + +--------------------------------------+-------+--------------+------------+------+ + | uuid | ceph_ | hostname | state | task | + | | mon_g | | | | + | | ib | | | | + +--------------------------------------+-------+--------------+------------+------+ + | 64176b6c-e284-4485-bb2a-115dee215279 | 20 | controller-1 | configured | None | + | a9ca151b-7f2c-4551-8167-035d49e2df8c | 20 | controller-0 | configured | None | + | f76bc385-190c-4d9a-aa0f-107346a9907b | 20 | compute-0 | configured | None | + +--------------------------------------+-------+--------------+------------+------+ + +#. Assign the cluster-host network to the MGMT interface for the compute nodes: + + (Note that the MGMT interfaces are partially set up automatically by the + network install procedure.) + + :: + + for COMPUTE in compute-0 compute-1; do + system interface-network-assign $COMPUTE mgmt0 cluster-host + done + +#. Configure data interfaces for compute nodes. Use the DATA port names, e.g. + eth0, applicable to your deployment environment. + + .. note:: + + This step is required for OpenStack and optional for Kubernetes. For + example, do this step if using SRIOV network attachments in application + containers. + + For Kubernetes SRIOV network attachments: + + * Configure SRIOV device plugin: + + :: + + for COMPUTE in compute-0 compute-1; do + system host-label-assign ${COMPUTE} sriovdp=enabled + done + + * If planning on running DPDK in containers on this host, configure the number + of 1G Huge pages required on both NUMA nodes: + + :: + + for COMPUTE in compute-0 compute-1; do + system host-memory-modify ${COMPUTE} 0 -1G 100 + system host-memory-modify ${COMPUTE} 1 -1G 100 + done + + + For both Kubernetes and OpenStack: + + :: + + DATA0IF= + DATA1IF= + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + + # configure the datanetworks in sysinv, prior to referencing it + # in the ``system host-if-modify`` command'. + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + for COMPUTE in compute-0 compute-1; do + echo "Configuring interface for: $COMPUTE" + set -ex + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + set +ex + done + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to the compute nodes in + support of installing the stx-openstack manifest and helm-charts later. + + :: + + for NODE in compute-0 compute-1; do + system host-label-assign $NODE openstack-compute-node=enabled + system host-label-assign $NODE openvswitch=enabled + system host-label-assign $NODE sriov=enabled + done + +#. **For OpenStack only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks. + + :: + + for COMPUTE in compute-0 compute-1; do + echo "Configuring Nova local for: $COMPUTE" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + PARTITION_SIZE=10 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${PARTITION_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + done + + for COMPUTE in compute-0 compute-1; do + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + done + +^^^^^^^^^^^^^^^^^^^^ +Unlock compute nodes +^^^^^^^^^^^^^^^^^^^^ + +Unlock compute nodes in order to bring them into service: + +:: + + for COMPUTE in compute-0 compute-1; do + system host-unlock $COMPUTE + done + +The compute nodes will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Add Ceph OSDs to controllers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Add OSDs to controller-0: + + :: + + HOST=controller-0 + DISKS=$(system host-disk-list ${HOST}) + TIERS=$(system storage-tier-list ceph_cluster) + OSDs="/dev/sdb" + for OSD in $OSDs; do + system host-stor-add ${HOST} $(echo "$DISKS" | grep "$OSD" | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') + while true; do system host-stor-list ${HOST} | grep ${OSD} | grep configuring; if [ $? -ne 0 ]; then break; fi; sleep 1; done + done + + system host-stor-list $HOST + +#. Add OSDs to controller-1: + + :: + + HOST=controller-1 + DISKS=$(system host-disk-list ${HOST}) + TIERS=$(system storage-tier-list ceph_cluster) + OSDs="/dev/sdb" + for OSD in $OSDs; do + system host-stor-add ${HOST} $(echo "$DISKS" | grep "$OSD" | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') + while true; do system host-stor-list ${HOST} | grep ${OSD} | grep configuring; if [ $? -ne 0 ]; then break; fi; sleep 1; done + done + + system host-stor-list $HOST + +Your Kubernetes cluster is up and running. + +*************************** +Access StarlingX Kubernetes +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-kubernetes-start: + :end-before: incl-access-starlingx-kubernetes-end: + +------------------- +StarlingX OpenStack +------------------- + +*************************** +Install StarlingX OpenStack +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-install-starlingx-openstack-start: + :end-before: incl-install-starlingx-openstack-end: + +************************** +Access StarlingX OpenStack +************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-openstack-start: + :end-before: incl-access-starlingx-openstack-end: + +***************************** +Uninstall StarlingX OpenStack +***************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-uninstall-starlingx-openstack-start: + :end-before: incl-uninstall-starlingx-openstack-end: diff --git a/doc/source/deploy_install_guides/current/bare_metal_dedicated_storage.rst b/doc/source/deploy_install_guides/current/bare_metal_dedicated_storage.rst new file mode 100644 index 000000000..036db8e54 --- /dev/null +++ b/doc/source/deploy_install_guides/current/bare_metal_dedicated_storage.rst @@ -0,0 +1,442 @@ +=============================================== +Bare metal Standard with Dedicated Storage R2.0 +=============================================== + +.. contents:: + :local: + :depth: 1 + +----------- +Description +----------- + +.. include:: virtual_dedicated_storage.rst + :start-after: incl-dedicated-storage-intro-start: + :end-before: incl-dedicated-storage-intro-end: + +.. include:: virtual_aio_simplex.rst + :start-after: incl-ipv6-note-start: + :end-before: incl-ipv6-note-end: + +--------------------- +Hardware requirements +--------------------- + +The recommended minimum requirements for the Bare Metal Servers for the various +host types are: + ++---------------------+-----------------------+-----------------------+-----------------------+ +| Minimum Requirement | Controller Node | Compute Node | | ++=====================+=======================+=======================+=======================+ +| Number of Servers | 2 | 2-9 | 2-100 | ++---------------------+-----------------------+-----------------------+-----------------------+ +| Minimum Processor | Dual-CPU Intel® Xeon® E5 26xx family (SandyBridge) 8 cores/socket | +| Class | | ++---------------------+-----------------------+-----------------------+-----------------------+ +| Minimum Memory | 64 GB | 64 GB | 32 GB | ++---------------------+-----------------------+-----------------------+-----------------------+ +| Primary Disk | 500 GB SDD or NVM | 120 GB (min. 10k RPM) | 120 GB (min. 10k RPM) | ++---------------------+-----------------------+-----------------------+-----------------------+ +| Additional Disks | None | - 1 or more 500 GB | - For OpenStack, | +| | | (min.10K RPM) for | recommend 1 or more | +| | | Ceph OSD | 500 GB (min. 10K | +| | | - Recommended, but | RPM) for VM | +| | | not required: 1 or | ephemeral storage | +| | | more SSDs or NVMe | | +| | | drives for Ceph | | +| | | journals (min. 1024 | | +| | | MiB per OSD | | +| | | journal) | | ++---------------------+-----------------------+-----------------------+-----------------------+ +| Minimum Network | - Mgmt/Cluster: | - Mgmt/Cluster: | - Mgmt/Cluster: | +| Ports | 1x10GE | 1x10GE | 1x10GE | +| | - OAM: 1x1GE | | - Data: 1 or more | +| | | | x 10GE | ++---------------------+-----------------------+-----------------------+-----------------------+ +| BIOS Settings | - Hyper-Threading technology enabled | +| | - Virtualization technology enabled | +| | - VT for directed I/O enabled | +| | - CPU power and performance policy set to performance | +| | - CPU C state control disabled | +| | - Plug & play BMC detection disabled | ++---------------------+-----------------------+-----------------------+-----------------------+ + +--------------- +Prepare Servers +--------------- + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-prepare-servers-start: + :end-before: incl-prepare-servers-end: + +-------------------- +StarlingX Kubernetes +-------------------- + +******************************* +Installing StarlingX Kubernetes +******************************* + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Create a bootable USB with the StarlingX ISO +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Create a bootable USB with the StarlingX ISO. + +Refer to :doc:`/deploy_install_guides/bootable_usb` for instructions on how to +create a bootable USB on your system. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-install-software-controller-0-start: + :end-before: incl-install-software-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Bootstrap system on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-bootstrap-sys-controller-0-start: + :end-before: incl-bootstrap-sys-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-0 +^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_simplex.rst + :start-after: incl-config-controller-0-start: + :end-before: incl-config-controller-0-end: + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-0 +^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_aio_duplex.rst + :start-after: incl-unlock-controller-0-start: + :end-before: incl-unlock-controller-0-end: + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-1, storage nodes and compute nodes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Power on the controller-1 server and force it to network boot with the + appropriate BIOS boot options for your particular server. + +#. As controller-1 boots, a message appears on its console instructing you to + configure the personality of the node. + +#. On the console of controller-0, list hosts to see newly discovered controller-1 + host, that is, host with hostname of None: + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | None | None | locked | disabled | offline | + +----+--------------+-------------+----------------+-------------+--------------+ + +#. Using the host id, set the personality of this host to 'controller': + + :: + + system host-update 2 personality=controller + + This initiates the install of software on controller-1. + This can take 5-10 minutes, depending on the performance of the host machine. + +#. While waiting, repeat the same procedure for storage-0 server and the + storage-1, except for setting the personality to 'storage' and assigning a + unique hostname, as shown below: + + :: + + system host-update 3 personality=storage hostname=storage-0 + system host-update 4 personality=storage hostname=storage-1 + + This initiates the software installation on storage-0 and storage-1. + This can take 5-10 minutes, depending on the performance of the host machine. + +#. While waiting, repeat the same procedure for compute-0 server and + compute-1 server, except for setting the personality to 'worker' and + assigning a unique hostname, as shown below: + + :: + + system host-update 5 personality=worker hostname=compute-0 + system host-update 6 personality=worker hostname=compute-1 + + This initiates the install of software on compute-0 and compute-1. + +#. Wait for the software installation on controller-1, storage-0, storage-1, + compute-0, and compute-1 to complete, for all servers to reboot, and for all to + show as locked/disabled/online in 'system host-list'. + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | controller-1 | controller | locked | disabled | online | + | 3 | storage-0 | storage | locked | disabled | online | + | 4 | storage-1 | storage | locked | disabled | online | + | 5 | compute-0 | compute | locked | disabled | online | + | 6 | compute-1 | compute | locked | disabled | online | + +----+--------------+-------------+----------------+-------------+--------------+ + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-1 +^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_controller_storage.rst + :start-after: incl-config-controller-1-start: + :end-before: incl-config-controller-1-end: + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-1 +^^^^^^^^^^^^^^^^^^^ + +.. include:: bare_metal_controller_storage.rst + :start-after: incl-unlock-controller-1-start: + :end-before: incl-unlock-controller-1-end: + +^^^^^^^^^^^^^^^^^^^^^^^ +Configure storage nodes +^^^^^^^^^^^^^^^^^^^^^^^ + +#. Assign the cluster-host network to the MGMT interface for the storage nodes: + + (Note that the MGMT interfaces are partially set up automatically by the + network install procedure.) + + :: + + for COMPUTE in compute-0 compute-1; do + system interface-network-assign $COMPUTE mgmt0 cluster-host + done + +#. Add OSDs to storage-0: + + :: + + HOST=storage-0 + DISKS=$(system host-disk-list ${HOST}) + TIERS=$(system storage-tier-list ceph_cluster) + OSDs="/dev/sdb" + for OSD in $OSDs; do + system host-stor-add ${HOST} $(echo "$DISKS" | grep "$OSD" | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') + while true; do system host-stor-list ${HOST} | grep ${OSD} | grep configuring; if [ $? -ne 0 ]; then break; fi; sleep 1; done + done + + system host-stor-list $HOST + +#. Add OSDs to storage-1: + + :: + + HOST=storage-1 + DISKS=$(system host-disk-list ${HOST}) + TIERS=$(system storage-tier-list ceph_cluster) + OSDs="/dev/sdb" + for OSD in $OSDs; do + system host-stor-add ${HOST} $(echo "$DISKS" | grep "$OSD" | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') + while true; do system host-stor-list ${HOST} | grep ${OSD} | grep configuring; if [ $? -ne 0 ]; then break; fi; sleep 1; done + done + + system host-stor-list $HOST + +^^^^^^^^^^^^^^^^^^^^ +Unlock storage nodes +^^^^^^^^^^^^^^^^^^^^ + +Unlock storage nodes in order to bring them into service: + +:: + + for STORAGE in storage-0 storage-1; do + system host-unlock $STORAGE + done + +The storage nodes will reboot in order to apply configuration changes and come +into service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^ +Configure compute nodes +^^^^^^^^^^^^^^^^^^^^^^^ + +#. Assign the cluster-host network to the MGMT interface for the compute nodes: + + (Note that the MGMT interfaces are partially set up automatically by the + network install procedure.) + + :: + + for COMPUTE in compute-0 compute-1; do + system interface-network-assign $COMPUTE mgmt0 cluster-host + done + +#. Configure data interfaces for compute nodes. Use the DATA port names, e.g. + eth0, applicable to your deployment environment. + + .. note:: + + This step is **required** for OpenStack and optional for Kubernetes. For + example, do this step if using SRIOV network attachments in application + containers. + + For Kubernetes SRIOV network attachments: + + * Configure the SRIOV device plugin: + + :: + + for COMPUTE in compute-0 compute-1; do + system host-label-assign ${COMPUTE} sriovdp=enabled + done + + * If planning on running DPDK in containers on this host, configure the number + of 1G Huge pages required on both NUMA nodes: + + :: + + for COMPUTE in compute-0 compute-1; do + system host-memory-modify ${COMPUTE} 0 -1G 100 + system host-memory-modify ${COMPUTE} 1 -1G 100 + done + + For both Kubernetes and OpenStack: + + :: + + DATA0IF= + DATA1IF= + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + + # configure the datanetworks in sysinv, prior to referencing it + # in the ``system host-if-modify`` command'. + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + for COMPUTE in compute-0 compute-1; do + echo "Configuring interface for: $COMPUTE" + set -ex + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + set +ex + done + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to the compute nodes in + support of installing the stx-openstack manifest and helm-charts later. + + :: + + for NODE in compute-0 compute-1; do + system host-label-assign $NODE openstack-compute-node=enabled + system host-label-assign $NODE openvswitch=enabled + system host-label-assign $NODE sriov=enabled + done + +#. **For OpenStack only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks. + + :: + + for COMPUTE in compute-0 compute-1; do + echo "Configuring Nova local for: $COMPUTE" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + PARTITION_SIZE=10 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${PARTITION_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + done + + for COMPUTE in compute-0 compute-1; do + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + done + +^^^^^^^^^^^^^^^^^^^^ +Unlock compute nodes +^^^^^^^^^^^^^^^^^^^^ + +Unlock compute nodes in order to bring them into service: + +:: + + for COMPUTE in compute-0 compute-1; do + system host-unlock $COMPUTE + done + +The compute nodes will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +Your Kubernetes cluster is up and running. + +*************************** +Access StarlingX Kubernetes +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-kubernetes-start: + :end-before: incl-access-starlingx-kubernetes-end: + +------------------- +StarlingX OpenStack +------------------- + +*************************** +Install StarlingX OpenStack +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-install-starlingx-openstack-start: + :end-before: incl-install-starlingx-openstack-end: + +************************** +Access StarlingX OpenStack +************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-openstack-start: + :end-before: incl-access-starlingx-openstack-end: + +***************************** +Uninstall StarlingX OpenStack +***************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-uninstall-starlingx-openstack-start: + :end-before: incl-uninstall-starlingx-openstack-end: diff --git a/doc/source/deploy_install_guides/upcoming/ironic.rst b/doc/source/deploy_install_guides/current/bare_metal_ironic.rst similarity index 83% rename from doc/source/deploy_install_guides/upcoming/ironic.rst rename to doc/source/deploy_install_guides/current/bare_metal_ironic.rst index a8fa18168..48baa3a5d 100644 --- a/doc/source/deploy_install_guides/upcoming/ironic.rst +++ b/doc/source/deploy_install_guides/current/bare_metal_ironic.rst @@ -1,17 +1,17 @@ -======================================= -Standard configuration with Ironic R2.0 -======================================= +==================================== +Bare metal Standard with Ironic R2.0 +==================================== .. contents:: :local: :depth: 1 - ------------ Introduction ------------ -Ironic is an OpenStack project which provisions bare metal machines, see +Ironic is an OpenStack project that provisions bare metal machines. For +information about the Ironic project, see `Ironic Documentation `__. End user applications can be deployed on bare metal servers (instead of @@ -20,29 +20,29 @@ more bare metal servers. .. figure:: figures/starlingx-deployment-options-ironic.png :scale: 90% - :alt: Standard configuration with Ironic + :alt: Standard with Ironic deployment configuration - *Standard configuration with Ironic* + *Figure 1: Standard with Ironic deployment configuration* -Bare metal servers: +Bare metal servers must be connected to: -- Must be connected to IPMI for OpenStack Ironic control, and -- Must have their untagged physical interface, over which they support PXE - booting, connected to the ironic-provisioning-net tenant network. +- IPMI for OpenStack Ironic control +- ironic-provisioning-net tenant network via their untagged physical interface, + which supports PXE booting As part of configuring OpenStack Ironic in StarlingX: - An ironic-provisioning-net tenant network must be identified as the boot network for bare metal nodes. - An additional untagged physical interface must be configured on controller - nodes and connected to the ironic-provisioning-net tenant network. The + nodes and connected to the ironic-provisioning-net tenant network. The OpenStack Ironic tftpboot server will PXE boot the bare metal servers over this interface. .. note:: Bare metal servers are NOT: - Running any OpenStack / StarlingX software; they are running - end user applications (i.e. Glance Images). + end user applications (for example, Glance Images). - To be connected to the internal management network. -------------------- @@ -50,16 +50,14 @@ Installation options -------------------- 1. StarlingX currently only supports a bare metal installation of Ironic. - OpenStack actually does support a virtual machine based Ironic deployment - but it has never been tested in StarlingX. 2. StarlingX currently only supports Ironic deployment in a 'Standard' - configuration (i.e. Standard with Controller-Storage or Standard with - Dedicated-Storage). + configuration, either Standard with Controller-Storage or Standard with + Dedicated-Storage. -3. This "guide" assumes that you currently already have a standard deployment +3. This guide assumes that you currently already have a standard deployment installed and configured with 2x controllers and at least 1x compute - node and the stx-openstack application applied. + node and the StarlingX OpenStack application (stx-openstack) applied. This installation & deployment procedure starts from this point. -------------------- @@ -80,17 +78,14 @@ Hardware requirements - Additional NIC port on both controller nodes for connecting to the ironic-provisioning-net. - - Alternatively a new VLAN interface could be used instead of an additional - NIC port, but this has not been verified and would require changes to - configuration commands later in this installation procedure. - For compute nodes: - - Additional NIC port on one of compute nodes if using a flat data network - for the ironic provisioning network. - - Alternatively use a VLAN data network for the Ironic provisioning network + - Additional NIC port on one of the compute nodes, if using a flat data network + for the Ironic provisioning network. + - Alternatively, use a VLAN data network for the Ironic provisioning network and simply add the new data network to an existing interface on the - compute. + compute node. - Additional switch ports / configuration for new ports on controller nodes, compute node and Ironic nodes, for connectivity to Ironic provisioning @@ -100,7 +95,7 @@ Hardware requirements BMC configuration of Ironic node(s) *********************************** -Enable BMC and allocate a static IP, username and password in BIOS setting. +Enable BMC and allocate a static IP, username and password in the BIOS settings. In this case, we set: IP address @@ -117,7 +112,7 @@ Pre-configuration to enable Ironic service ------------------------------------------ All the commands in this major section are for the StarlingX platform. -You need to acquire administrative privileges: +Acquire administrative privileges: :: @@ -163,10 +158,10 @@ Network 3. Add Ironic tenant network (e.g. ironic-data): - .. note:: This network is not the same as above platform network, you can + .. note:: This network is not the same as the above platform network. You can specify any name of this besides ‘ironic’, if so, a user override - must be generated to indicate the tenant network name. Please - refer to section `Generate user Helm overrides`_. + must be generated to indicate the tenant network name. + Refer to section `Generate user Helm overrides`_ for details. :: @@ -223,11 +218,11 @@ To enable Ironic, update the following Ironic Helm Chart attributes: --set network.pxe.neutron_subnet_gateway=10.10.20.1 \ --set network.pxe.neutron_provider_network=ironic-data -`network.pxe.neutron_subnet_alloc_start` sets the DHCP start IP to Neutron for -Ironic node provision, reserve several IPs for platform. +:command:`network.pxe.neutron_subnet_alloc_start` sets the DHCP start IP to Neutron for +Ironic node provision, reserves several IPs for the platform. If data network name for Ironic is changed, modify -`network.pxe.neutron_provider_network` to above command: +:command:`network.pxe.neutron_provider_network` to above command: :: @@ -237,7 +232,7 @@ If data network name for Ironic is changed, modify Apply stx-openstack ^^^^^^^^^^^^^^^^^^^ -Re-apply the `stx-openstack` application to apply the changes to Ironic: +Re-apply the stx-openstack application to apply the changes to Ironic: :: @@ -251,7 +246,7 @@ Start an Ironic node. All the commands in this major section are for the OpenStack application with administrative privileges. From a new shell as a root user, -without sourcing `/etc/platform/openrc`: +without sourcing ``/etc/platform/openrc``: :: @@ -330,7 +325,7 @@ Create an Ironic node --driver-info ipmi_terminal_port=623 ironic-test0 3. Set `ironic-kernel` and `ironic-ramdisk` images driver information, - on this baremetal node: + on this bare metal node: :: @@ -339,7 +334,7 @@ Create an Ironic node --driver-info deploy_ramdisk=$(openstack image list | grep ironic-ramdisk | awk '{print$2}') \ ironic-test0 -4. Set resources properties on this baremetal node based in actual +4. Set resource properties on this bare metal node based on actual Ironic node capacities: :: @@ -394,7 +389,7 @@ Deploy an instance on Ironic node. All the commands in this major section are for the OpenStack application, but this time with TENANT specific privileges. From a new shell as a -root user, without sourcing `/etc/platform/openrc`: +root user, without sourcing ``/etc/platform/openrc``: :: @@ -459,15 +454,15 @@ Enable service Create instance *************** -.. note:: This keypair creation command is optional, - not a requirement to enable a baremental instance. +.. note:: This keypair creation command is optional, it is not + a requirement for enabling a bare metal instance. :: openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey -Create 2 new servers, one bare mental and one virtual: +Create 2 new servers, one bare metal and one virtual: :: diff --git a/doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-ironic.png b/doc/source/deploy_install_guides/current/figures/starlingx-deployment-options-ironic.png similarity index 100% rename from doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-ironic.png rename to doc/source/deploy_install_guides/current/figures/starlingx-deployment-options-ironic.png diff --git a/doc/source/deploy_install_guides/current/install_openstack.rst b/doc/source/deploy_install_guides/current/install_openstack.rst new file mode 100644 index 000000000..79a077a4d --- /dev/null +++ b/doc/source/deploy_install_guides/current/install_openstack.rst @@ -0,0 +1,69 @@ +================= +Install OpenStack +================= + +.. contents:: + :local: + :depth: 1 + +These installation instructions assume that you have completed the following +OpenStack-specific configuration tasks that are required by the underlying +StarlingX Kubernetes platform: + +* All nodes have been labelled appropriately for their OpenStack role(s). +* The vSwitch type has been configured. +* The nova-local volume group has been configured on any node's host, if running + the compute function. + +-------------------------------------------- +Install application manifest and helm-charts +-------------------------------------------- + +#. Get the StarlingX OpenStack application (stx-openstack) manifest and helm-charts. + This can be from a private StarlingX build or, as shown below, from the public + Cengen StarlingX build off ``master`` branch: + + :: + + wget http://mirror.starlingx.cengn.ca/mirror/starlingx/release/2.0.0/centos/outputs/helm-charts/stx-openstack-1.0-17-centos-stable-latest.tgz + +#. Load the stx-openstack application's helm chart definitions into Starlingx: + + :: + + system application-upload stx-openstack-1.0-17-centos-stable-latest.tgz + + This will: + + * Load the helm charts. + * Internally manage helm chart override values for each chart. + * Automatically generate system helm chart overrides for each chart based on + the current state of the underlying StarlingX Kubernetes platform and the + recommended StarlingX configuration of OpenStack services. + +#. Apply the stx-openstack application in order to bring StarlingX OpenStack into + service. + + :: + + system application-apply stx-openstack + +#. Wait for the activation of stx-openstack to complete. + + This can take 5-10 minutes depending on the performance of your host machine. + + Monitor progress with the command: + + :: + + watch -n 5 system application-list + + When it completes, your OpenStack cloud is up and running. + +-------------------------- +Access StarlingX OpenStack +-------------------------- + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-openstack-start: + :end-before: incl-access-starlingx-openstack-end: diff --git a/doc/source/deploy_install_guides/current/uninstall_delete_openstack.rst b/doc/source/deploy_install_guides/current/uninstall_delete_openstack.rst new file mode 100644 index 000000000..e6e3506a3 --- /dev/null +++ b/doc/source/deploy_install_guides/current/uninstall_delete_openstack.rst @@ -0,0 +1,33 @@ +=================== +Uninstall OpenStack +=================== + +This section provides additional commands for uninstalling and deleting the +OpenStack application. + +.. warning:: + + Uninstalling the OpenStack application will terminate all OpenStack services. + +----------------------------- +Bring down OpenStack services +----------------------------- + +Use the system CLI to uninstall the OpenStack application: + +:: + + system application-remove stx-openstack + system application-list + +--------------------------------------- +Delete OpenStack application definition +--------------------------------------- + +Use the system CLI to delete the OpenStack application definition: + +:: + + system application-delete stx-openstack + system application-list + diff --git a/doc/source/deploy_install_guides/current/virtual_aio_duplex.rst b/doc/source/deploy_install_guides/current/virtual_aio_duplex.rst new file mode 100644 index 000000000..d1410414d --- /dev/null +++ b/doc/source/deploy_install_guides/current/virtual_aio_duplex.rst @@ -0,0 +1,607 @@ +============================== +Virtual All-in-one Duplex R2.0 +============================== + +.. contents:: + :local: + :depth: 1 + +----------- +Description +----------- + +.. incl-aio-duplex-intro-start: + +The All-in-one Duplex (AIO-DX) deployment option provides all three cloud +functions (controller, compute, and storage) on two servers. + +An AIO-DX configuration provides the following benefits: + +* Only a small amount of cloud processing and storage power is required +* Application consolidation using multiple virtual machines on a single pair of + physical servers +* High availability (HA) services run on the controller function across two + physical servers in either active/active or active/standby mode +* A storage backend solution using a two-node CEPH deployment across two servers +* Virtual machines scheduled on both compute functions +* Protection against overall server hardware fault, where + * All controller HA services go active on the remaining healthy server + * All virtual machines are recovered on the remaining healthy server + +.. figure:: figures/starlingx-deployment-options-duplex.png + :scale: 50% + :alt: All-in-one Duplex deployment configuration + + *Figure 1: All-in-one Duplex deployment configuration* + +.. incl-aio-duplex-intro-end: + +.. include:: virtual_aio_simplex.rst + :start-after: incl-ipv6-note-start: + :end-before: incl-ipv6-note-end: + +-------------------------- +Physical host requirements +-------------------------- + +.. include:: virtual_aio_simplex.rst + :start-after: incl-virt-physical-host-req-start: + :end-before: incl-virt-physical-host-req-end: + +----------------------------------------------------- +Preparing the virtual environment and virtual servers +----------------------------------------------------- + +Prepare the virtual environment and virtual servers with the following steps: + +#. Set up virtual platform networks for virtual deployment: + + :: + + bash setup_network.sh + +#. Create the XML definitions for the virtual servers required by this + configuration option. This creates the XML virtual server definition for: + + * duplex-controller-0 + * duplex-controller-1 + + .. note:: + + The following command will start/virtually power on: + + * the 'duplex-controller-0' virtual server + * the X-based graphical virt-manager application + + If there is no X-server present, then errors are returned. + + :: + + bash setup_configuration.sh -c duplex -i ./bootimage.iso + +-------------------- +StarlingX Kubernetes +-------------------- + +***************************************** +Install the StarlingX Kubernetes platform +***************************************** + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the last step of "Prepare the virtual environment and virtual servers" the +controller-0 virtual server 'duplex-controller-0' was started by the +:command:`setup_configuration.sh` command. + +Attach to the console of virtual controller-0 and select the appropriate +installer menu options to start the non-interactive install of +StarlingX software on controller-0. + +.. note:: + + When entering the console, it is very easy to miss the first installer menu + selection. Use ESC to navigate to previous menus, to ensure you are at the + first installer menu. + +:: + + virsh console duplex-controller-0 + +Make the following menu selections in the installer: + +#. First menu: Select 'All-in-one Controller Configuration' +#. Second menu: Select 'Graphical Console' +#. Third menu: Select 'Standard Security Profile' + +Wait for the non-interactive install of software to complete and for the server +to reboot. This can take 5-10 minutes, depending on the performance of the host +machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Bootstrap system on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Log in using the username / password of "sysadmin" / "sysadmin". + When logging in for the first time, you will be forced to change the password. + + :: + + Login: sysadmin + Password: + Changing password for sysadmin. + (current) UNIX Password: sysadmin + New Password: + (repeat) New Password: + +#. External connectivity is required to run the Ansible bootstrap playbook. + + :: + + export CONTROLLER0_OAM_CIDR=10.10.10.3/24 + export DEFAULT_OAM_GATEWAY=10.10.10.1 + sudo ip address add $CONTROLLER0_OAM_CIDR dev enp7s1 + sudo ip link set up dev enp7s1 + sudo ip route add default via $DEFAULT_OAM_GATEWAY dev enp7s1 + +#. Specify user configuration overrides for the Ansible bootstrap playbook. + + Ansible is used to bootstrap StarlingX on controller-0: + + * The default Ansible inventory file, ``/etc/ansible/hosts``, contains a single + host: localhost. + * The Ansible bootstrap playbook is at: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml`` + * The default configuration values for the bootstrap playbook are in + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/host_vars/default.yml`` + * By default Ansible looks for and imports user configuration override files + for hosts in the sysadmin home directory ($HOME), for example: ``$HOME/.yml`` + + Specify the user configuration override file for the ansible bootstrap + playbook, by either: + + * Copying the default.yml file listed above to ``$HOME/localhost.yml`` and edit + the configurable values as desired, based on the commented instructions in + the file. + + or + + * Creating the minimal user configuration override file as shown in the + example below: + + :: + + cd ~ + cat < localhost.yml + system_mode: duplex + + dns_servers: + - 8.8.8.8 + - 8.8.4.4 + + external_oam_subnet: 10.10.10.0/24 + external_oam_gateway_address: 10.10.10.1 + external_oam_floating_address: 10.10.10.2 + external_oam_node_0_address: 10.10.10.3 + external_oam_node_1_address: 10.10.10.4 + + admin_username: admin + admin_password: + ansible_become_pass: + EOF + + If you are using IPv6, provide IPv6 configuration overrides. Note that all + addressing, except pxeboot_subnet, should be updated to IPv6 addressing. + Example IPv6 override values are shown below: + + :: + + dns_servers: + ‐ 2001:4860:4860::8888 + ‐ 2001:4860:4860::8844 + pxeboot_subnet: 169.254.202.0/24 + management_subnet: 2001:db8:2::/64 + cluster_host_subnet: 2001:db8:3::/64 + cluster_pod_subnet: 2001:db8:4::/64 + cluster_service_subnet: 2001:db8:4::/112 + external_oam_subnet: 2001:db8:1::/64 + external_oam_gateway_address: 2001:db8::1 + external_oam_floating_address: 2001:db8::2 + external_oam_node_0_address: 2001:db8::3 + external_oam_node_1_address: 2001:db8::4 + management_multicast_subnet: ff08::1:1:0/124 + +#. Run the Ansible bootstrap playbook: + + :: + + ansible-playbook /usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml + + Wait for Ansible bootstrap playbook to complete. + This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-0 +^^^^^^^^^^^^^^^^^^^^^^ + +#. Acquire admin credentials: + + :: + + source /etc/platform/openrc + +#. Configure the OAM and MGMT interfaces of controller-0 and specify the + attached networks: + + :: + + OAM_IF=enp7s1 + MGMT_IF=enp7s2 + system host-if-modify controller-0 lo -c none + IFNET_UUIDS=$(system interface-network-list controller-0 | awk '{if ($6=="lo") print $4;}') + for UUID in $IFNET_UUIDS; do + system interface-network-remove ${UUID} + done + system host-if-modify controller-0 $OAM_IF -c platform + system interface-network-assign controller-0 $OAM_IF oam + system host-if-modify controller-0 $MGMT_IF -c platform + system interface-network-assign controller-0 $MGMT_IF mgmt + system interface-network-assign controller-0 $MGMT_IF cluster-host + +#. Configure NTP Servers for network time synchronization: + + .. note:: + + In a virtual environment, this can sometimes cause Ceph clock skew alarms. + Also, the virtual instances clock is synchronized with the host clock, + so it is not absolutely required to configure NTP in this step. + + :: + + system ntp-modify ntpservers=0.pool.ntp.org,1.pool.ntp.org + +#. Configure data interfaces for controller-0. + + .. note:: + + This step is **required** for OpenStack and optional for Kubernetes. + For example, do this step if you are using SRIOV network attachments in + application containers. + + For Kubernetes SRIOV network attachments: + + * Configure the SRIOV device plugin: + :: + + system host-label-assign controller-0 sriovdp=enabled + + * If planning on running DPDK in containers on this host, configure the number + of 1G Huge pages required on both NUMA nodes: + + :: + + system host-memory-modify controller-0 0 -1G 100 + system host-memory-modify controller-0 1 -1G 100 + + For both Kubernetes and OpenStack: + + :: + + DATA0IF=eth1000 + DATA1IF=eth1001 + export COMPUTE=controller-0 + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + +#. Add an OSD on controller-0 for ceph: + + :: + + system host-disk-list controller-0 + system host-disk-list controller-0 | awk '/\/dev\/sdb/{print $2}' | xargs -i system host-stor-add controller-0 {} + system host-stor-list controller-0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to controller-0 in + support of installing the stx-openstack manifest/helm-charts later. + + :: + + system host-label-assign controller-0 openstack-control-plane=enabled + system host-label-assign controller-0 openstack-compute-node=enabled + system host-label-assign controller-0 openvswitch=enabled + system host-label-assign controller-0 sriov=enabled + +#. **For OpenStack only:** A vSwitch is required. + + The default vSwitch is containerized OVS that is packaged with the + stx-openstack manifest/helm-charts. StarlingX provides the option to use + OVS-DPDK on the host, however, in the virtual environment OVS-DPDK is NOT + supported, only OVS is supported. Therefore, simply use the default OVS + vSwitch here. + +#. **For OpenStack Only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks. + + :: + + export COMPUTE=controller-0 + + echo ">>> Getting root disk info" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + echo "Root disk: $ROOT_DISK, UUID: $ROOT_DISK_UUID" + + echo ">>>> Configuring nova-local" + NOVA_SIZE=34 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${NOVA_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + sleep 2 + + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-0 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-0 to bring it into service: + +:: + + system host-unlock controller-0 + +Controller-0 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-1 node +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Power on the controller-1 virtual server, 'duplex-controller-1', and force it + to network boot by pressing F12 and selecting 'lan' as the alternative boot + option: + + :: + + virsh start duplex-controller-1 + +#. Attach to the console of virtual controller-1: + + :: + + virsh console duplex-controller-1 + + As controller-1 VM boots, a message appears on its console instructing you to + configure the personality of the node. + +#. On the console of controller-0, list hosts to see the newly discovered + controller-1 host, that is, the host with hostname of None: + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | None | None | locked | disabled | offline | + +----+--------------+-------------+----------------+-------------+--------------+ + +#. Using the host id, set the personality of this host to 'controller': + + :: + + system host-update 2 personality=controller + +#. Wait for the software installation on controller-1 to complete, controller-1 to + reboot, and controller-1 to show as locked/disabled/online in 'system host-list'. + This can take 5-10 minutes, depending on the performance of the host machine. + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | controller-1 | controller | locked | disabled | online | + +----+--------------+-------------+----------------+-------------+--------------+ + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-1 +^^^^^^^^^^^^^^^^^^^^^^ + +#. Configure the OAM and MGMT interfaces of controller-1 and specify the + attached networks. Note that the MGMT interface is partially set up + automatically by the network install procedure. + + :: + + OAM_IF= enp7s1 + system host-if-modify controller-1 $OAM_IF -c platform + system interface-network-assign controller-1 $OAM_IF oam + system interface-network-assign controller-1 mgmt0 cluster-host + +#. Configure data interfaces for controller-1. + + .. note:: + + This step is **required** for OpenStack and optional for Kubernetes. For + example, do this step if using SRIOV network attachments in application + containers. + + For Kubernetes SRIOV network attachments: + + * Configure SRIOV device plugin: + + :: + + system host-label-assign controller-1 sriovdp=enabled + + * If planning on running DPDK in containers on this hosts, configure the number + of 1G Huge pages required on both NUMA nodes: + + :: + + system host-memory-modify controller-1 0 -1G 100 + system host-memory-modify controller-1 1 -1G 100 + + For both Kubernetes and OpenStack: + + :: + + DATA0IF=eth1000 + DATA1IF=eth1001 + export COMPUTE=controller-1 + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + +#. Add an OSD on controller-1 for ceph: + + :: + + echo ">>> Add OSDs to primary tier" + system host-disk-list controller-1 + system host-disk-list controller-1 | awk '/\/dev\/sdb/{print $2}' | xargs -i system host-stor-add controller-1 {} + system host-stor-list controller-1 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to controller-1 in + support of installing the stx-openstack manifest/helm-charts later: + + :: + + system host-label-assign controller-1 openstack-control-plane=enabled + system host-label-assign controller-1 openstack-compute-node=enabled + system host-label-assign controller-1 openvswitch=enabled + system host-label-assign controller-1 sriov=enabled + +#. **For OpenStack only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks: + + :: + + export COMPUTE=controller-1 + + echo ">>> Getting root disk info" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + echo "Root disk: $ROOT_DISK, UUID: $ROOT_DISK_UUID" + + echo ">>>> Configuring nova-local" + NOVA_SIZE=34 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${NOVA_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-1 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-1 in order to bring it into service: + +:: + + system host-unlock controller-1 + +Controller-1 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +When it completes, your Kubernetes cluster is up and running. + +*************************** +Access StarlingX Kubernetes +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-kubernetes-start: + :end-before: incl-access-starlingx-kubernetes-end: + +------------------- +StarlingX OpenStack +------------------- + +*************************** +Install StarlingX OpenStack +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-install-starlingx-openstack-start: + :end-before: incl-install-starlingx-openstack-end: + +************************** +Access StarlingX OpenStack +************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-openstack-start: + :end-before: incl-access-starlingx-openstack-end: + +***************************** +Uninstall StarlingX OpenStack +***************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-uninstall-starlingx-openstack-start: + :end-before: incl-uninstall-starlingx-openstack-end: diff --git a/doc/source/deploy_install_guides/current/virtual_aio_simplex.rst b/doc/source/deploy_install_guides/current/virtual_aio_simplex.rst new file mode 100644 index 000000000..4c2a202c3 --- /dev/null +++ b/doc/source/deploy_install_guides/current/virtual_aio_simplex.rst @@ -0,0 +1,514 @@ +=============================== +Virtual All-in-one Simplex R2.0 +=============================== + +.. contents:: + :local: + :depth: 1 + +----------- +Description +----------- + +.. incl-aio-simplex-intro-start: + +The All-in-one Simplex (AIO-SX) deployment option provides all three cloud +functions (controller, compute, and storage) on a single server. + +An AIO-SX configuration provides the following benefits: + +* Only a small amount of cloud processing and storage power is required +* Application consolidation using multiple virtual machines on a single pair of + physical servers +* A storage backend solution using a single-node CEPH deployment + +An AIO-SX deployment provides no protection against overall server hardware +fault, as protection is either not required or provided at a higher level. +Hardware component protection can be enable with, for example, a hardware RAID +or 2x Port LAG in the deployment. + + +.. figure:: figures/starlingx-deployment-options-simplex.png + :scale: 50% + :alt: All-in-one Simplex deployment configuration + + *Figure 1: All-in-one Simplex deployment configuration* + +.. incl-aio-simplex-intro-end: + +.. incl-ipv6-note-start: + +.. note:: + + By default, StarlingX uses IPv4. To use StarlingX with IPv6: + + * The entire infrastructure and cluster configuration must be IPv6, with the + exception of the PXE boot network. + + * Not all external servers are reachable via IPv6 addresses (e.g. Docker + registries). Depending on your infrastructure, it may be necessary to deploy + a NAT64/DNS64 gateway to translate the IPv4 addresses to IPv6. + +.. incl-ipv6-note-end: + +-------------------------- +Physical host requirements +-------------------------- + +.. incl-virt-physical-host-req-start: + +This section describes: + +* system requirements for the workstation hosting the virtual machine(s) where StarlingX will be deployed + +* host setup + +********************* +Hardware requirements +********************* + +The host system should have at least: + +* **Processor:** x86_64 only supported architecture with BIOS enabled hardware + virtualization extensions + +* **Cores:** 8 + +* **Memory:** 32GB RAM + +* **Hard Disk:** 500GB HDD + +* **Network:** One network adapter with active Internet connection + +********************* +Software requirements +********************* + +The host system should have at least: + +* A workstation computer with Ubuntu 16.04 LTS 64-bit + +All other required packages will be installed by scripts in the StarlingX tools repository. + +********** +Host setup +********** + +Set up the host with the following steps: + +#. Update OS: + + :: + + apt-get update + +#. Clone the StarlingX tools repository: + + :: + + apt-get install -y git + cd $HOME + git clone https://opendev.org/starlingx/tools + +#. Install required packages: + + :: + + cd $HOME/tools/deployment/libvirt/ + bash install_packages.sh + apt install -y apparmor-profiles + apt-get install -y ufw + ufw disable + ufw status + +#. Get the StarlingX ISO. This can be from a private StarlingX build or from the public Cengn + StarlingX build off 'master' branch, as shown below: + + :: + + wget http://mirror.starlingx.cengn.ca/mirror/starlingx/release/2.0.0/centos/outputs/iso/bootimage.iso + +.. incl-virt-physical-host-req-end: + +----------------------------------------------------- +Preparing the virtual environment and virtual servers +----------------------------------------------------- + +Prepare the virtual environment and virtual servers with the following steps: + +#. Set up virtual platform networks for virtual deployment: + + :: + + bash setup_network.sh + +#. Create the XML definitions for the virtual servers required by this + configuration option. This creates the XML virtual server definition for: + + * simplex-controller-0 + + .. note:: + + The following command will start/virtually power on: + + * the 'simplex-controller-0' virtual server + * the X-based graphical virt-manager application + + If there is no X-server present, then errors will occur. + + :: + + bash setup_configuration.sh -c simplex -i ./bootimage.iso + +-------------------- +StarlingX Kubernetes +-------------------- + +***************************************** +Install the StarlingX Kubernetes platform +***************************************** + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the last step of "Prepare the virtual environment and virtual servers", the +controller-0 virtual server 'simplex-controller-0' was started by the +:command:`setup_configuration.sh` command. + +Attach to the console of virtual controller-0 and select the appropriate +installer menu options to start the non-interactive install of +StarlingX software on controller-0. + +.. note:: + + When entering the console, it is very easy to miss the first installer menu + selection. Use ESC to navigate to previous menus, to ensure you are at the + first installer menu. + +:: + + virsh console simplex-controller-0 + +Make the following menu selections in the installer: + +#. First menu: Select 'All-in-one Controller Configuration' +#. Second menu: Select 'Graphical Console' +#. Third menu: Select 'Standard Security Profile' + +Wait for the non-interactive install of software to complete and for the server +to reboot. This can take 5-10 minutes, depending on the performance of the host +machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Bootstrap system on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Log in using the username / password of "sysadmin" / "sysadmin". + When logging in for the first time, you will be forced to change the password. + + :: + + Login: sysadmin + Password: + Changing password for sysadmin. + (current) UNIX Password: sysadmin + New Password: + (repeat) New Password: + +#. External connectivity is required to run the Ansible bootstrap playbook. + + :: + + export CONTROLLER0_OAM_CIDR=10.10.10.3/24 + export DEFAULT_OAM_GATEWAY=10.10.10.1 + sudo ip address add $CONTROLLER0_OAM_CIDR dev enp7s1 + sudo ip link set up dev enp7s1 + sudo ip route add default via $DEFAULT_OAM_GATEWAY dev enp7s1 + +#. Specify user configuration overrides for the Ansible bootstrap playbook. + + Ansible is used to bootstrap StarlingX on controller-0: + + * The default Ansible inventory file, ``/etc/ansible/hosts``, contains a single + host: localhost. + * The Ansible bootstrap playbook is at: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml`` + * The default configuration values for the bootstrap playbook are in: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/host_vars/default.yml`` + * By default Ansible looks for and imports user configuration override files + for hosts in the sysadmin home directory ($HOME), for example: ``$HOME/.yml`` + + Specify the user configuration override file for the Ansible bootstrap + playbook, by either: + + * Copying the default.yml file listed above to ``$HOME/localhost.yml`` and edit + the configurable values as desired, based on the commented instructions in + the file. + + or + + * Creating the minimal user configuration override file as shown in the + example below: + + :: + + cd ~ + cat < localhost.yml + system_mode: simplex + + dns_servers: + - 8.8.8.8 + - 8.8.4.4 + + external_oam_subnet: 10.10.10.0/24 + external_oam_gateway_address: 10.10.10.1 + external_oam_floating_address: 10.10.10.2 + + admin_username: admin + admin_password: + ansible_become_pass: + EOF + + + If you are using IPv6, provide IPv6 configuration overrides. Note that all + addressing, except pxeboot_subnet, should be updated to IPv6 addressing. + Example IPv6 override values are shown below: + + :: + + dns_servers: + ‐ 2001:4860:4860::8888 + ‐ 2001:4860:4860::8844 + pxeboot_subnet: 169.254.202.0/24 + management_subnet: 2001:db8:2::/64 + cluster_host_subnet: 2001:db8:3::/64 + cluster_pod_subnet: 2001:db8:4::/64 + cluster_service_subnet: 2001:db8:4::/112 + external_oam_subnet: 2001:db8:1::/64 + external_oam_gateway_address: 2001:db8::1 + external_oam_floating_address: 2001:db8::2 + management_multicast_subnet: ff08::1:1:0/124 + +#. Run the Ansible bootstrap playbook: + + :: + + ansible-playbook /usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml + + Wait for Ansible bootstrap playbook to complete. + This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-0 +^^^^^^^^^^^^^^^^^^^^^^ + +#. Acquire admin credentials: + + :: + + source /etc/platform/openrc + +#. Configure the OAM interface of controller-0: + + :: + + OAM_IF=enp7s1 + system host-if-modify controller-0 $OAM_IF -c platform + system interface-network-assign controller-0 $OAM_IF oam + +#. Configure NTP Servers for network time synchronization: + + .. note:: + + In a virtual environment, this can sometimes cause Ceph clock skew alarms. + Also, the virtual instances clock is synchronized with the host clock, + so it is not absolutely required to configure NTP in this step. + + :: + + system ntp-modify ntpservers=0.pool.ntp.org,1.pool.ntp.org + +#. Configure data interfaces for controller-0. + + + .. note:: + + This step is **required** for OpenStack and optional for Kubernetes. For + example, do this step if using SRIOV network attachments in application + containers. + + For Kubernetes SRIOV network attachments: + + * Configure the SRIOV device plugin: + + :: + + system host-label-assign controller-0 sriovdp=enabled + + * If planning on running DPDK in containers on this host, configure the number + of 1G Huge pages required on both NUMA nodes: + + :: + + system host-memory-modify controller-0 0 -1G 100 + system host-memory-modify controller-0 1 -1G 100 + + For both Kubernetes and OpenStack: + + :: + + DATA0IF=eth1000 + DATA1IF=eth1001 + export COMPUTE=controller-0 + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + +#. Add an OSD on controller-0 for ceph: + + :: + + system host-disk-list controller-0 + system host-disk-list controller-0 | awk '/\/dev\/sdb/{print $2}' | xargs -i system host-stor-add controller-0 {} + system host-stor-list controller-0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to controller-0 in + support of installing the stx-openstack manifest/helm-charts later. + + :: + + system host-label-assign controller-0 openstack-control-plane=enabled + system host-label-assign controller-0 openstack-compute-node=enabled + system host-label-assign controller-0 openvswitch=enabled + system host-label-assign controller-0 sriov=enabled + +#. **For OpenStack only**: A vSwitch is required. + + The default vSwitch is containerized OVS that is packaged with the + stx-openstack manifest/helm-charts. StarlingX provides the option to use + OVS-DPDK on the host, however, in the virtual environment OVS-DPDK is NOT + supported, only OVS is supported. Therefore, simply use the default OVS + vSwitch here. + +#. **For OpenStack Only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks. + + :: + + export COMPUTE=controller-0 + + echo ">>> Getting root disk info" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + echo "Root disk: $ROOT_DISK, UUID: $ROOT_DISK_UUID" + + echo ">>>> Configuring nova-local" + NOVA_SIZE=34 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${NOVA_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + sleep 2 + + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-0 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-0 to bring it into service: + +:: + + system host-unlock controller-0 + +Controller-0 will reboot to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +When it completes, your Kubernetes cluster is up and running. + +*************************** +Access StarlingX Kubernetes +*************************** + +.. incl-access-starlingx-kubernetes-start: + +Use local/remote CLIs, GUIs, and/or REST APIs to access and manage StarlingX +Kubernetes and hosted containerized applications. Refer to details on accessing +the StarlingX Kubernetes cluster in the +:doc:`Access StarlingX Kubernetes guide `. + +.. incl-access-starlingx-kubernetes-end: + +------------------- +StarlingX OpenStack +------------------- + +*************************** +Install StarlingX OpenStack +*************************** + +.. incl-install-starlingx-openstack-start: + +Other than the OpenStack-specific configurations required in the underlying +StarlingX/Kubernetes infrastructure (described in the installation steps for the +Starlingx Kubernetes platform above), the installation of containerized OpenStack +for StarlingX is independent of deployment configuration. Refer to the +:doc:`Install OpenStack guide ` +for installation instructions. + +.. incl-install-starlingx-openstack-end: + +************************** +Access StarlingX OpenStack +************************** + +.. incl-access-starlingx-openstack-start: + +Use local/remote CLIs, GUIs and/or REST APIs to access and manage StarlingX +OpenStack and hosted virtualized applications. Refer to details on accessing +StarlingX OpenStack in the +:doc:`Access StarlingX OpenStack guide `. + +.. incl-access-starlingx-openstack-end: + +***************************** +Uninstall StarlingX OpenStack +***************************** + +.. incl-uninstall-starlingx-openstack-start: + +Refer to the :doc:`Uninstall OpenStack guide ` for +instructions on how to uninstall and delete the OpenStack application. + +.. incl-uninstall-starlingx-openstack-end: \ No newline at end of file diff --git a/doc/source/deploy_install_guides/current/virtual_controller_storage.rst b/doc/source/deploy_install_guides/current/virtual_controller_storage.rst new file mode 100644 index 000000000..c27ed447f --- /dev/null +++ b/doc/source/deploy_install_guides/current/virtual_controller_storage.rst @@ -0,0 +1,646 @@ +============================================= +Virtual Standard with Controller Storage R2.0 +============================================= + +.. contents:: + :local: + :depth: 1 + +----------- +Description +----------- + +.. incl-controller-storage-intro-start: + +The Standard with Controller Storage deployment option provides two high +availability (HA) controller nodes and two - 10 compute nodes. + +A Standard with Controller Storage configuration provides the following benefits: + +* A pool of up to 10 compute nodes +* High availability (HA) services run across the controller nodes in either + active/active or active/standby mode +* A storage backend solution using a two-node CEPH deployment across two + controller servers +* Protection against overall controller and compute node failure, where + + * On overall controller node failure, all controller HA services go active on + the remaining healthy controller node + * On overall compute node failuer, virtual machines and containers are + recovered on the remaining healthy compute nodes + +.. figure:: figures/starlingx-deployment-options-controller-storage.png + :scale: 50% + :alt: Standard with Controller Storage deployment configuration + + *Figure 1: Standard with Controller Storage deployment configuration* + +.. incl-controller-storage-intro-end: + +.. include:: virtual_aio_simplex.rst + :start-after: incl-ipv6-note-start: + :end-before: incl-ipv6-note-end: + +-------------------------- +Physical host requirements +-------------------------- + +.. include:: virtual_aio_simplex.rst + :start-after: incl-virt-physical-host-req-start: + :end-before: incl-virt-physical-host-req-end: + +----------------------------------------------------- +Preparing the virtual environment and virtual servers +----------------------------------------------------- + +#. Set up virtual platform networks for virtual deployment: + + :: + + bash setup_network.sh + +#. Create the XML definitions for the virtual servers required by this + configuration option. + + This creates the XML virtual server definition for: + + * controllerstorage-controller-0 + * controllerstorage-controller-1 + * controllerstorage-compute-0 + * controllerstorage-compute-1 + + .. note:: + + The following command will start/virtually power on: + + * the 'controllerstorage-controller-0' virtual server + * the X-based graphical virt-manager application + + If there is no X-server present, then errors are returned. + + :: + + bash setup_configuration.sh -c controllerstorage -i ./bootimage.iso + +-------------------- +StarlingX Kubernetes +-------------------- + +******************************* +Installing StarlingX Kubernetes +******************************* + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the last step of "Prepare the virtual environment and virtual servers" the +controller-0 virtual server 'controllerstorage-controller-0' was started by the +:command:`setup_configuration.sh` command. + +Attach to the console of virtual controller-0 and select the appropriate +installer menu options to start the non-interactive install of +StarlingX software on controller-0. + +.. note:: + + When entering the console, it is very easy to miss the first installer menu + selection. Use ESC to navigate to previous menus, to ensure you are at the + first installer menu. + +:: + + virsh console controllerstorage-controller-0 + +Make the following menu selections in the installer: + +#. First menu: Select 'Standard Configuration' +#. Second menu: Select 'Graphical Console' +#. Third menu: Select 'Standard Security Profile' + +Wait for the non-interactive install of software to complete and for the server +to reboot. This can take 5-10 minutes depending on the performance of the host +machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Bootstrap system on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Log in using the username / password of "sysadmin" / "sysadmin". + When logging in for the first time, you will be forced to change the password. + + :: + + Login: sysadmin + Password: + Changing password for sysadmin. + (current) UNIX Password: sysadmin + New Password: + (repeat) New Password: + +#. External connectivity is required to run the Ansible bootstrap playbook: + + :: + + export CONTROLLER0_OAM_CIDR=10.10.10.3/24 + export DEFAULT_OAM_GATEWAY=10.10.10.1 + sudo ip address add $CONTROLLER0_OAM_CIDR dev enp7s1 + sudo ip link set up dev enp7s1 + sudo ip route add default via $DEFAULT_OAM_GATEWAY dev enp7s1 + +#. Specify user configuration overrides for the Ansible bootstrap playbook. + + Ansible is used to bootstrap StarlingX on controller-0: + + * The default Ansible inventory file, ``/etc/ansible/hosts``, contains a single + host: localhost. + * The Ansible bootstrap playbook is at: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml`` + * The default configuration values for the bootstrap playbook are in: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/host_vars/default.yml`` + * By default Ansible looks for and imports user configuration override files + for hosts in the sysadmin home directory ($HOME), for example: ``$HOME/.yml`` + + Specify the user configuration override file for the ansible bootstrap + playbook, by either: + + * Copying the default.yml file listed above to ``$HOME/localhost.yml`` and edit + the configurable values as desired, based on the commented instructions in + the file. + + or + + * Creating the minimal user configuration override file as shown in the + example below: + + :: + + cd ~ + cat < localhost.yml + system_mode: standard + + dns_servers: + - 8.8.8.8 + - 8.8.4.4 + + external_oam_subnet: 10.10.10.0/24 + external_oam_gateway_address: 10.10.10.1 + external_oam_floating_address: 10.10.10.2 + external_oam_node_0_address: 10.10.10.3 + external_oam_node_1_address: 10.10.10.4 + + admin_username: admin + admin_password: + ansible_become_pass: + EOF + + If you are using IPv6, provide IPv6 configuration overrides. Note that all + addressing, except pxeboot_subnet, should be updated to IPv6 addressing. + Example IPv6 override values are shown below: + + :: + + dns_servers: + ‐ 2001:4860:4860::8888 + ‐ 2001:4860:4860::8844 + pxeboot_subnet: 169.254.202.0/24 + management_subnet: 2001:db8:2::/64 + cluster_host_subnet: 2001:db8:3::/64 + cluster_pod_subnet: 2001:db8:4::/64 + cluster_service_subnet: 2001:db8:4::/112 + external_oam_subnet: 2001:db8:1::/64 + external_oam_gateway_address: 2001:db8::1 + external_oam_floating_address: 2001:db8::2 + external_oam_node_0_address: 2001:db8::3 + external_oam_node_1_address: 2001:db8::4 + management_multicast_subnet: ff08::1:1:0/124 + +#. Run the Ansible bootstrap playbook: + + :: + + ansible-playbook /usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml + + Wait for Ansible bootstrap playbook to complete. + This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-0 +^^^^^^^^^^^^^^^^^^^^^^ + +#. Acquire admin credentials: + + :: + + source /etc/platform/openrc + +#. Configure the OAM and MGMT interfaces of controller-0 and specify the + attached networks: + + :: + + OAM_IF=enp7s1 + MGMT_IF=enp7s2 + system host-if-modify controller-0 lo -c none + IFNET_UUIDS=$(system interface-network-list controller-0 | awk '{if ($6=="lo") print $4;}') + for UUID in $IFNET_UUIDS; do + system interface-network-remove ${UUID} + done + system host-if-modify controller-0 $OAM_IF -c platform + system interface-network-assign controller-0 $OAM_IF oam + system host-if-modify controller-0 $MGMT_IF -c platform + system interface-network-assign controller-0 $MGMT_IF mgmt + system interface-network-assign controller-0 $MGMT_IF cluster-host + +#. Configure NTP Servers for network time synchronization: + + .. note:: + + In a virtual environment, this can sometimes cause Ceph clock skew alarms. + Also, the virtual instances clock is synchronized with the host clock, + so it is not absolutely required to configure NTP here. + + + :: + + system ntp-modify ntpservers=0.pool.ntp.org,1.pool.ntp.org + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to controller-0 in + support of installing the stx-openstack manifest/helm-charts later. + + :: + + system host-label-assign controller-0 openstack-control-plane=enabled + +#. **For OpenStack only:** A vSwitch is required. + + The default vSwitch is containerized OVS that is packaged with the + stx-openstack manifest/helm-charts. StarlingX provides the option to use + OVS-DPDK on the host, however, in the virtual environment OVS-DPDK is NOT + supported, only OVS is supported. Therefore, simply use the default OVS + vSwitch here. + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-0 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-0 in order to bring it into service: + + :: + + system host-unlock controller-0 + +Controller-0 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-1 and compute nodes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Power on the controller-1 virtual server, 'controllerstorage-controller-1', + and force it to network boot by pressing F12 and selecting 'lan' as the + alternative boot option: + + :: + + virsh start controllerstorage-controller-1 + +#. Attach to the console of virtual controller-1: + + :: + + virsh console controllerstorage-controller-1 + + As controller-1 VM boots, a message appears on its console instructing you to + configure the personality of the node. + +#. On console of controller-0, list hosts to see newly discovered + controller-1 host, that is, host with hostname of None: + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | None | None | locked | disabled | offline | + +----+--------------+-------------+----------------+-------------+--------------+ + +#. Using the host id, set the personality of this host to 'controller': + + :: + + system host-update 2 personality=controller + + This initiates the install of software on controller-1. + This can take 5-10 minutes, depending on the performance of the host machine. + +#. While waiting on this, repeat the same procedure for + 'controllerstorage-compute-0' and 'controllerstorage-compute-1', except for + setting the personality to 'worker' and assigning a unique hostname, for example: + + :: + + system host-update 3 personality=worker hostname=compute-0 + system host-update 4 personality=worker hostname=compute-1 + +#. Wait for the software installation on controller-1, compute-0, and compute-1 to + complete, for all virtual servers to reboot, and for all to show as + locked/disabled/online in 'system host-list'. + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | controller-1 | controller | locked | disabled | online | + | 3 | compute-0 | compute | locked | disabled | online | + | 4 | compute-1 | compute | locked | disabled | online | + +----+--------------+-------------+----------------+-------------+--------------+ + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-1 +^^^^^^^^^^^^^^^^^^^^^^ + +Configure the OAM and MGMT interfaces of controller-0 and specify the attached +networks. Note that the MGMT interface is partially set up automatically by the network +install procedure. + +:: + + OAM_IF=enp7s1 + system host-if-modify controller-1 $OAM_IF -c platform + system interface-network-assign controller-1 $OAM_IF oam + system interface-network-assign controller-1 mgmt0 cluster-host + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +**For OpenStack only:** Assign OpenStack host labels to controller-1 in support +of installing the stx-openstack manifest/helm-charts later: + +:: + + system host-label-assign controller-1 openstack-control-plane=enabled + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-1 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-1 in order to bring it into service: + +:: + + system host-unlock controller-1 + +Controller-1 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^ +Configure compute nodes +^^^^^^^^^^^^^^^^^^^^^^^ + +#. Add the third Ceph monitor to compute-0: + + (The first two Ceph monitors are automatically assigned to controller-0 and + controller-1.) + + :: + + system ceph-mon-add compute-0 + +#. Wait for the compute node monitor to complete configuration: + + :: + + system ceph-mon-list + +--------------------------------------+-------+--------------+------------+------+ + | uuid | ceph_ | hostname | state | task | + | | mon_g | | | | + | | ib | | | | + +--------------------------------------+-------+--------------+------------+------+ + | 64176b6c-e284-4485-bb2a-115dee215279 | 20 | controller-1 | configured | None | + | a9ca151b-7f2c-4551-8167-035d49e2df8c | 20 | controller-0 | configured | None | + | f76bc385-190c-4d9a-aa0f-107346a9907b | 20 | compute-0 | configured | None | + +--------------------------------------+-------+--------------+------------+------+ + +#. Assign the cluster-host network to the MGMT interface for the compute nodes. + + Note that the MGMT interfaces are partially set up automatically by the + network install procedure. + + :: + + for COMPUTE in compute-0 compute-1; do + system interface-network-assign $COMPUTE mgmt0 cluster-host + done + +#. Configure data interfaces for compute nodes. + + .. note:: + + This step is **required** for OpenStack and optional for Kubernetes. For + example, do this step if using SRIOV network attachments in application + containers. + + For Kubernetes SRIOV network attachments: + + * Configure the SRIOV device plugin: + + :: + + for COMPUTE in compute-0 compute-1; do + system host-label-assign ${COMPUTE} sriovdp=enabled + done + + * If planning on running DPDK in containers on this host, configure the number + of 1G Huge pages required on both NUMA nodes: + + :: + + for COMPUTE in compute-0 compute-1; do + system host-memory-modify ${COMPUTE} 0 -1G 100 + system host-memory-modify ${COMPUTE} 1 -1G 100 + done + + For both Kubernetes and OpenStack: + + :: + + DATA0IF=eth1000 + DATA1IF=eth1001 + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + + # configure the datanetworks in sysinv, prior to referencing it + # in the ``system host-if-modify`` command'. + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + for COMPUTE in compute-0 compute-1; do + echo "Configuring interface for: $COMPUTE" + set -ex + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + set +ex + done + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to the compute nodes in + support of installing the stx-openstack manifest/helm-charts later: + + :: + + for NODE in compute-0 compute-1; do + system host-label-assign $NODE openstack-compute-node=enabled + system host-label-assign $NODE openvswitch=enabled + system host-label-assign $NODE sriov=enabled + done + +#. **For OpenStack only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks: + + :: + + for COMPUTE in compute-0 compute-1; do + echo "Configuring Nova local for: $COMPUTE" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + PARTITION_SIZE=10 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${PARTITION_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + done + + for COMPUTE in compute-0 compute-1; do + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + done + +^^^^^^^^^^^^^^^^^^^^ +Unlock compute nodes +^^^^^^^^^^^^^^^^^^^^ + +Unlock compute nodes to bring them into service: + +:: + + for COMPUTE in compute-0 compute-1; do + system host-unlock $COMPUTE + done + +The compute nodes will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Add Ceph OSDs to controllers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Add OSDs to controller-0: + + :: + + HOST=controller-0 + DISKS=$(system host-disk-list ${HOST}) + TIERS=$(system storage-tier-list ceph_cluster) + OSDs="/dev/sdb" + for OSD in $OSDs; do + system host-stor-add ${HOST} $(echo "$DISKS" | grep "$OSD" | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') + while true; do system host-stor-list ${HOST} | grep ${OSD} | grep configuring; if [ $? -ne 0 ]; then break; fi; sleep 1; done + done + + system host-stor-list $HOST + +#. Add OSDs to controller-1: + + :: + + HOST=controller-1 + DISKS=$(system host-disk-list ${HOST}) + TIERS=$(system storage-tier-list ceph_cluster) + OSDs="/dev/sdb" + for OSD in $OSDs; do + system host-stor-add ${HOST} $(echo "$DISKS" | grep "$OSD" | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') + while true; do system host-stor-list ${HOST} | grep ${OSD} | grep configuring; if [ $? -ne 0 ]; then break; fi; sleep 1; done + done + + system host-stor-list $HOST + +Your Kubernetes cluster is now up and running. + +*************************** +Access StarlingX Kubernetes +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-kubernetes-start: + :end-before: incl-access-starlingx-kubernetes-end: + +------------------- +StarlingX OpenStack +------------------- + +*************************** +Install StarlingX OpenStack +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-install-starlingx-openstack-start: + :end-before: incl-install-starlingx-openstack-end: + +************************** +Access StarlingX OpenStack +************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-openstack-start: + :end-before: incl-access-starlingx-openstack-end: + +***************************** +Uninstall StarlingX OpenStack +***************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-uninstall-starlingx-openstack-start: + :end-before: incl-uninstall-starlingx-openstack-end: \ No newline at end of file diff --git a/doc/source/deploy_install_guides/current/virtual_dedicated_storage.rst b/doc/source/deploy_install_guides/current/virtual_dedicated_storage.rst new file mode 100644 index 000000000..6e7cfd965 --- /dev/null +++ b/doc/source/deploy_install_guides/current/virtual_dedicated_storage.rst @@ -0,0 +1,694 @@ +============================================ +Virtual Standard with Dedicated Storage R2.0 +============================================ + +.. contents:: + :local: + :depth: 1 + +----------- +Description +----------- + +.. incl-dedicated-storage-intro-start: + +The Standard with Dedicated Storage deployment option is a standard installation +with independent controller, compute, and storage nodes. + +A Standard with Dedicated Storage configuration provides the following benefits: + +* A pool of up to 100 compute nodes +* A 2x node high availability (HA) controller cluster with HA services running + across the controller nodes in either active/active or active/standby mode +* A storage backend solution using a two-to-9x node HA CEPH storage cluster + that supports a replication factor of two or three +* Up to four groups of 2x storage nodes, or up to three groups of 3x storage nodes + +.. figure:: figures/starlingx-deployment-options-dedicated-storage.png + :scale: 50% + :alt: Standard with Dedicated Storage deployment configuration + + *Figure 1: Standard with Dedicated Storage deployment configuration* + +.. incl-dedicated-storage-intro-end: + +.. include:: virtual_aio_simplex.rst + :start-after: incl-ipv6-note-start: + :end-before: incl-ipv6-note-end: + +-------------------------- +Physical host requirements +-------------------------- + +.. include:: virtual_aio_simplex.rst + :start-after: incl-virt-physical-host-req-start: + :end-before: incl-virt-physical-host-req-end: + +----------------------------------------------------- +Preparing the virtual environment and virtual servers +----------------------------------------------------- + +#. Set up virtual platform networks for virtual deployment: + + :: + + bash setup_network.sh + +#. Create the XML definitions for the virtual servers required by this + configuration option. This creates the XML virtual server definition for: + + * dedicatedstorage-controller-0 + * dedicatedstorage-controller-1 + * dedicatedstorage-storage-0 + * dedicatedstorage-storage-1 + * dedicatedstorage-compute-0 + * dedicatedstorage-compute-1 + + .. note:: + + The following command will start/virtually power on: + + * the 'dedicatedstorage-controller-0' virtual server + * the X-based graphical virt-manager application + + If there is no X-server present, then errors are returned. + + :: + + bash setup_configuration.sh -c dedicatedstorage -i ./bootimage.iso + +-------------------- +StarlingX Kubernetes +-------------------- + +******************************* +Installing StarlingX Kubernetes +******************************* + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the last step of "Prepare the virtual environment and virtual servers" the +controller-0 virtual server 'dedicatedstorage-controller-0' was started by the +:command:`setup_configuration.sh` command. + + +Attach to the console of virtual controller-0 and select the appropriate +installer menu options to start the non-interactive install of +StarlingX software on controller-0. + +.. note:: + + When entering the console, it is very easy to miss the first installer menu + selection. Use ESC to navigate to previous menus, to ensure you are at the + first installer menu. + +:: + + virsh console dedicatedstorage-controller-0 + +Make the following menu selections in the installer: + +#. First menu: Select 'Standard Configuration' +#. Second menu: Select 'Graphical Console' +#. Third menu: Select 'Standard Security Profile' + +Wait for the non-interactive install of software to complete and for the server +to reboot. This can take 5-10 minutes depending on the performance of the host +machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Bootstrap system on controller-0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Log in using the username / password of "sysadmin" / "sysadmin". + When logging in for the first time, you will be forced to change the password. + + :: + + Login: sysadmin + Password: + Changing password for sysadmin. + (current) UNIX Password: sysadmin + New Password: + (repeat) New Password: + +#. External connectivity is required to run the Ansible bootstrap playbook: + + :: + + export CONTROLLER0_OAM_CIDR=10.10.10.3/24 + export DEFAULT_OAM_GATEWAY=10.10.10.1 + sudo ip address add $CONTROLLER0_OAM_CIDR dev enp7s1 + sudo ip link set up dev enp7s1 + sudo ip route add default via $DEFAULT_OAM_GATEWAY dev enp7s1 + +#. Specify user configuration overrides for the Ansible bootstrap playbook. + + Ansible is used to bootstrap StarlingX on controller-0: + + * The default Ansible inventory file, ``/etc/ansible/hosts``, contains a single + host: localhost. + * The Ansible bootstrap playbook is at: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml`` + * The default configuration values for the bootstrap playbook are in: + ``/usr/share/ansible/stx-ansible/playbooks/bootstrap/host_vars/default.yml`` + * By default Ansible looks for and imports user configuration override files + for hosts in the sysadmin home directory ($HOME), for example: ``$HOME/.yml`` + + Specify the user configuration override file for the ansible bootstrap + playbook, by either: + + * Copying the default.yml file listed above to ``$HOME/localhost.yml`` and edit + the configurable values as desired, based on the commented instructions in + the file. + + or + + * Creating the minimal user configuration override file as shown in the + example below: + + :: + + cd ~ + cat < localhost.yml + system_mode: standard + + dns_servers: + - 8.8.8.8 + - 8.8.4.4 + + external_oam_subnet: 10.10.10.0/24 + external_oam_gateway_address: 10.10.10.1 + external_oam_floating_address: 10.10.10.2 + external_oam_node_0_address: 10.10.10.3 + external_oam_node_1_address: 10.10.10.4 + + admin_username: admin + admin_password: + ansible_become_pass: + EOF + + If you are using IPv6, provide IPv6 configuration overrides. Note that all + addressing, except pxeboot_subnet, should be updated to IPv6 addressing. + Example IPv6 override values are shown below: + + :: + + dns_servers: + ‐ 2001:4860:4860::8888 + ‐ 2001:4860:4860::8844 + pxeboot_subnet: 169.254.202.0/24 + management_subnet: 2001:db8:2::/64 + cluster_host_subnet: 2001:db8:3::/64 + cluster_pod_subnet: 2001:db8:4::/64 + cluster_service_subnet: 2001:db8:4::/112 + external_oam_subnet: 2001:db8:1::/64 + external_oam_gateway_address: 2001:db8::1 + external_oam_floating_address: 2001:db8::2 + external_oam_node_0_address: 2001:db8::3 + external_oam_node_1_address: 2001:db8::4 + management_multicast_subnet: ff08::1:1:0/124 + +#. Run the Ansible bootstrap playbook: + + :: + + ansible-playbook /usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml + + Wait for Ansible bootstrap playbook to complete. + This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-0 +^^^^^^^^^^^^^^^^^^^^^^ + +#. Acquire admin credentials: + + :: + + source /etc/platform/openrc + +#. Configure the OAM and MGMT interfaces of controller-0 and specify the + attached networks: + + :: + + OAM_IF=enp7s1 + MGMT_IF=enp7s2 + system host-if-modify controller-0 lo -c none + IFNET_UUIDS=$(system interface-network-list controller-0 | awk '{if ($6=="lo") print $4;}') + for UUID in $IFNET_UUIDS; do + system interface-network-remove ${UUID} + done + system host-if-modify controller-0 $OAM_IF -c platform + system interface-network-assign controller-0 $OAM_IF oam + system host-if-modify controller-0 $MGMT_IF -c platform + system interface-network-assign controller-0 $MGMT_IF mgmt + system interface-network-assign controller-0 $MGMT_IF cluster-host + +#. Configure NTP Servers for network time synchronization: + + .. note:: + + In a virtual environment, this can sometimes cause Ceph clock skew alarms. + Also, the virtual instance clock is synchronized with the host clock, + so it is not absolutely required to configure NTP here. + + :: + + system ntp-modify ntpservers=0.pool.ntp.org,1.pool.ntp.org + +#. Configure data interfaces for controller-0: + + :: + + DATA0IF=eth1000 + DATA1IF=eth1001 + export COMPUTE=controller-0 + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to controller-0 in + support of installing the stx-openstack manifest/helm-charts later. + + :: + + system host-label-assign controller-0 openstack-control-plane=enabled + +#. **For OpenStack only:** A vSwitch is required. + + The default vSwitch is containerized OVS that is packaged with the + stx-openstack manifest/helm-charts. StarlingX provides the option to use + OVS-DPDK on the host, however, in the virtual environment OVS-DPDK is NOT + supported, only OVS is supported. Therefore, simply use the default OVS + vSwitch here. + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-0 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-0 in order to bring it into service: + +:: + + system host-unlock controller-0 + +Controller-0 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Install software on controller-1, storage nodes, and compute nodes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Power on the controller-1 virtual server, 'dedicatedstorage-controller-1', + and force it to network boot by pressing F12 and selecting 'lan' as the + alternative boot option. + + :: + + virsh start dedicatedstorage-controller-1 + +#. Attach to the console of virtual controller-1: + + :: + + virsh console dedicatedstorage-controller-1 + +#. As controller-1 VM boots, a message appears on its console instructing you to + configure the personality of the node. + +#. On the console of controller-0, list hosts to see newly discovered + controller-1 host, that is, a host with hostname of None: + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | None | None | locked | disabled | offline | + +----+--------------+-------------+----------------+-------------+--------------+ + +#. Using the host id, set the personality of this host to 'controller': + + :: + + system host-update 2 personality=controller + + This initiates software installation on controller-1. + This can take 5-10 minutes, depending on the performance of the host machine. + +#. While waiting on this, repeat the same procedure for 'dedicatedstorage-storage-0' + and 'dedicatedstorage-storage-1', except for setting the personality to + 'storage' and assigning a unique hostname, for example: + + :: + + system host-update 3 personality=storage hostname=storage-0 + system host-update 4 personality=storage hostname=storage-1 + + This initiates software installation on storage-0 and storage-1. + This can take 5-10 minutes, depending on the performance of the host machine. + +#. While waiting on this, repeat the same procedure for + 'dedicatedstorage-compute-0' and 'dedicatedstorage-compute-1', except for + setting the personality to 'worker' and assigning a unique hostname, for example: + + :: + + system host-update 5 personality=worker hostname=compute-0 + system host-update 6 personality=worker hostname=compute-1 + + This initiates software installation on compute-0 and compute-1. + +#. Wait for the software installation on controller-1, storage-0, storage-1, + compute-0, and compute-1 to complete, for all virtual servers to reboot, and for all + to show as locked/disabled/online in 'system host-list'. + + :: + + system host-list + +----+--------------+-------------+----------------+-------------+--------------+ + | id | hostname | personality | administrative | operational | availability | + +----+--------------+-------------+----------------+-------------+--------------+ + | 1 | controller-0 | controller | unlocked | enabled | available | + | 2 | controller-1 | controller | locked | disabled | online | + | 3 | storage-0 | storage | locked | disabled | online | + | 4 | storage-1 | storage | locked | disabled | online | + | 5 | compute-0 | compute | locked | disabled | online | + | 6 | compute-1 | compute | locked | disabled | online | + +----+--------------+-------------+----------------+-------------+--------------+ + +^^^^^^^^^^^^^^^^^^^^^^ +Configure controller-1 +^^^^^^^^^^^^^^^^^^^^^^ + +Configure the OAM and MGMT interfaces of controller-0 and specify the attached +networks: + +Note that the MGMT interface is partially set up automatically by the network +install procedure. + +:: + + OAM_IF=enp7s1 + system host-if-modify controller-1 $OAM_IF -c platform + system interface-network-assign controller-1 $OAM_IF oam + system interface-network-assign controller-1 mgmt0 cluster-host + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +**For OpenStack only:** Assign OpenStack host labels to controller-1 in support +of installing the stx-openstack manifest/helm-charts later. + +:: + + system host-label-assign controller-1 openstack-control-plane=enabled + +^^^^^^^^^^^^^^^^^^^ +Unlock controller-1 +^^^^^^^^^^^^^^^^^^^ + +Unlock controller-1 in order to bring it into service: + +:: + + system host-unlock controller-1 + +Controller-1 will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^ +Configure storage nodes +^^^^^^^^^^^^^^^^^^^^^^^ + +#. Assign the cluster-host network to the MGMT interface for the storage nodes. + + Note that the MGMT interfaces are partially set up automatically by the + network install procedure. + + :: + + for COMPUTE in compute-0 compute-1; do + system interface-network-assign $COMPUTE mgmt0 cluster-host + done + +#. Add OSDs to storage-0: + + :: + + HOST=storage-0 + DISKS=$(system host-disk-list ${HOST}) + TIERS=$(system storage-tier-list ceph_cluster) + OSDs="/dev/sdb" + for OSD in $OSDs; do + system host-stor-add ${HOST} $(echo "$DISKS" | grep "$OSD" | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') + while true; do system host-stor-list ${HOST} | grep ${OSD} | grep configuring; if [ $? -ne 0 ]; then break; fi; sleep 1; done + done + + system host-stor-list $HOST + +#. Add OSDs to storage-1: + + :: + + HOST=storage-1 + DISKS=$(system host-disk-list ${HOST}) + TIERS=$(system storage-tier-list ceph_cluster) + OSDs="/dev/sdb" + for OSD in $OSDs; do + system host-stor-add ${HOST} $(echo "$DISKS" | grep "$OSD" | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') + while true; do system host-stor-list ${HOST} | grep ${OSD} | grep configuring; if [ $? -ne 0 ]; then break; fi; sleep 1; done + done + + system host-stor-list $HOST + +^^^^^^^^^^^^^^^^^^^^ +Unlock storage nodes +^^^^^^^^^^^^^^^^^^^^ + +Unlock storage nodes in order to bring them into service: + +:: + + for STORAGE in storage-0 storage-1; do + system host-unlock $STORAGE + done + +The storage nodes will reboot in order to apply configuration changes and come +into service. This can take 5-10 minutes, depending on the performance of the host machine. + +^^^^^^^^^^^^^^^^^^^^^^^ +Configure compute nodes +^^^^^^^^^^^^^^^^^^^^^^^ + +#. Assign the cluster-host network to the MGMT interface for the compute nodes. + + Note that the MGMT interfaces are partially set up automatically by the + network install procedure. + + :: + + for COMPUTE in compute-0 compute-1; do + system interface-network-assign $COMPUTE mgmt0 cluster-host + done + +#. Configure data interfaces for compute nodes. + + .. note:: + + This step is **required** for OpenStack and optional for Kubernetes. For + example, do this step if using SRIOV network attachments in application + containers. + + For Kubernetes SRIOV network attachments: + + * Configure the SRIOV device plugin + + :: + + for COMPUTE in compute-0 compute-1; do + system host-label-assign ${COMPUTE} sriovdp=enabled + done + + * If planning on running DPDK in containers on this host, configure the number + of 1G Huge pages required on both NUMA nodes: + + :: + + for COMPUTE in compute-0 compute-1; do + system host-memory-modify ${COMPUTE} 0 -1G 100 + system host-memory-modify ${COMPUTE} 1 -1G 100 + done + + + For both Kubernetes and OpenStack: + + :: + + DATA0IF=eth1000 + DATA1IF=eth1001 + PHYSNET0='physnet0' + PHYSNET1='physnet1' + SPL=/tmp/tmp-system-port-list + SPIL=/tmp/tmp-system-host-if-list + + * Configure the datanetworks in sysinv, prior to referencing it in the :command:`system host-if-modify` command. + + :: + + system datanetwork-add ${PHYSNET0} vlan + system datanetwork-add ${PHYSNET1} vlan + + for COMPUTE in compute-0 compute-1; do + echo "Configuring interface for: $COMPUTE" + set -ex + system host-port-list ${COMPUTE} --nowrap > ${SPL} + system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} + DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') + DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') + DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') + DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') + DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') + DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') + DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') + DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') + system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} + system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} + system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} + system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} + set +ex + done + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack-specific host configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + The following configuration is required only if the StarlingX OpenStack + application (stx-openstack) will be installed. + +#. **For OpenStack only:** Assign OpenStack host labels to the compute nodes in + support of installing the stx-openstack manifest/helm-charts later. + + :: + + for NODE in compute-0 compute-1; do + system host-label-assign $NODE openstack-compute-node=enabled + system host-label-assign $NODE openvswitch=enabled + system host-label-assign $NODE sriov=enabled + done + +#. **For OpenStack only:** Set up disk partition for nova-local volume group, + which is needed for stx-openstack nova ephemeral disks. + + :: + + for COMPUTE in compute-0 compute-1; do + echo "Configuring Nova local for: $COMPUTE" + ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') + ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') + PARTITION_SIZE=10 + NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${PARTITION_SIZE}) + NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') + system host-lvg-add ${COMPUTE} nova-local + system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} + done + + for COMPUTE in compute-0 compute-1; do + echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." + while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done + done + +^^^^^^^^^^^^^^^^^^^^ +Unlock compute nodes +^^^^^^^^^^^^^^^^^^^^ + +Unlock compute nodes in order to bring them into service: + +:: + + for COMPUTE in compute-0 compute-1; do + system host-unlock $COMPUTE + done + +The compute nodes will reboot in order to apply configuration changes and come into +service. This can take 5-10 minutes, depending on the performance of the host machine. + +Your Kubernetes cluster is up and running. + +*************************** +Access StarlingX Kubernetes +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-kubernetes-start: + :end-before: incl-access-starlingx-kubernetes-end: + +------------------- +StarlingX OpenStack +------------------- + +*************************** +Install StarlingX OpenStack +*************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-install-starlingx-openstack-start: + :end-before: incl-install-starlingx-openstack-end: + +************************** +Access StarlingX OpenStack +************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-access-starlingx-openstack-start: + :end-before: incl-access-starlingx-openstack-end: + +***************************** +Uninstall StarlingX OpenStack +***************************** + +.. include:: virtual_aio_simplex.rst + :start-after: incl-uninstall-starlingx-openstack-start: + :end-before: incl-uninstall-starlingx-openstack-end: diff --git a/doc/source/deploy_install_guides/index.rst b/doc/source/deploy_install_guides/index.rst index 33439722a..c7dcbfffa 100755 --- a/doc/source/deploy_install_guides/index.rst +++ b/doc/source/deploy_install_guides/index.rst @@ -2,125 +2,120 @@ Installation and Deployment Guides ================================== -.. toctree:: - :hidden: - - upcoming/installation_libvirt_qemu - - Installation and deployment guides for StarlingX are release-specific. Each guide provides instruction on a specific StarlingX configuration (e.g. All-in-one Simplex). -*********************** -Latest official release -*********************** +----------------------- +Latest release (stable) +----------------------- -The latest official release is the most recent officially -released version of StarlingX. +StarlingX R2.0 is the latest officially released version of StarlingX. -^^^^^^^^^^^^^^^^^^ -Installation guide -^^^^^^^^^^^^^^^^^^ +************************* +R2.0 virtual installation +************************* .. toctree:: :maxdepth: 1 - current/index + current/virtual_aio_simplex + current/virtual_aio_duplex + current/virtual_controller_storage + current/virtual_dedicated_storage -^^^^^^^^^^^^^^^^^ -Deployment guides -^^^^^^^^^^^^^^^^^ +**************************** +R2.0 bare metal installation +**************************** .. toctree:: :maxdepth: 1 - current/simplex - current/duplex - current/controller_storage - current/dedicated_storage - -**************** -Upcoming release -**************** - -The upcoming release is the forthcoming version under development. - -^^^^^^^^^^^^^^^^^^^ -Deployment planning -^^^^^^^^^^^^^^^^^^^ + current/bare_metal_aio_simplex + current/bare_metal_aio_duplex + current/bare_metal_controller_storage + current/bare_metal_dedicated_storage + current/bare_metal_ironic .. toctree:: :maxdepth: 1 + :hidden: - upcoming/planning - upcoming/options + current/access_starlingx_kubernetes + current/access_starlingx_openstack + current/install_openstack + current/uninstall_delete_openstack -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Installation and deployment guides -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------------- +Upcoming R3.0 release +--------------------- -.. toctree:: - :maxdepth: 1 +The upcoming R3 release is the forthcoming version of StarlingX under development. - upcoming/aio_simplex - upcoming/aio_duplex - upcoming/controller_storage - upcoming/dedicated_storage - upcoming/ironic - upcoming/multi_region - upcoming/dist_cloud - -***************** +----------------- Archived releases -***************** +----------------- -Currently, there are no archived guides. +************** +StarlingX R1.0 +************** +.. toctree:: + :maxdepth: 1 + + r1_release/index + r1_release/simplex + r1_release/duplex + r1_release/controller_storage + r1_release/dedicated_storage + + + +.. toctree:: + :maxdepth: 1 + :hidden: + + bootable_usb .. Steps you must take when a new release of the deployment and installation guides occurs: .. 1. Archive the "current" release: - 1. Rename the "current" folder to the release name using the convention (e.g. 2018_10). - 2. Get inside your new folder (i.e. the old "current" folder) and update all links in the *.rst + 1. Rename the "current" folder to the release name to the release number eg. "r1_release". + 2. Go into the renamed folder (i.e. the old "current" folder) and update all links in the *.rst files to use the new path (e.g. :doc:`Libvirt/QEMU ` becomes - :doc:`Libvirt/QEMU /installation_libvirt_qemu>` + :doc:`Libvirt/QEMU /installation_libvirt_qemu>` 3. You might want to change your working directory to / and use Git to grep for the "current" string (i.e. 'git grep "current" *'). For each applicable occurrence, make the call whether or not to convert the string to the actual archived string "". Be sure to scrub all files for the "current" string in both the "installation_guide" and "developer_guide" folders downward. 2. Add the new "current" release: - 1. Rename the existing "latest" folders to "current". This assumes that "latest" represented + 1. Rename the existing "upcoming" folders to "current". This assumes that "upcoming" represented the under-development release that just officially released. - 2. Get inside your new folder (i.e. the old "latest" folder) and update all links in the *.rst + 2. Get inside your new folder (i.e. the old "upcoming" folder) and update all links in the *.rst files to use the new path (e.g. :doc:`Libvirt/QEMU ` becomes :doc:`Libvirt/QEMU ` - 3. You might want to change your working directory to the "current" directory and use Git to grep for - the "latest" string (i.e. 'git grep "latest" *'). For each applicable occurrence, make - the call whether or not to convert the string to "current". - Be sure to scrub all files for the "latest" string in both the "installation_guide" - and "developer_guide" folders downward. - 4. Because the "current" release is now available, make sure to update these pages: + 3. Again, scrub all files as per step 1.3 above. + 4. Because the "upcoming" release is now available, make sure to update these pages: - index - installation guide - developer guide - release notes - 3. Create a new "latest" release, which are the installation and developer guides under development: - 1. Copy your "current" folders and rename them "latest". + 3. Create a new "upcoming" release, which are the installation and developer guides under development: + 1. Copy your "current" folders and rename them "upcoming". 2. Make sure the new files have the correct version in the page title and intro sentence (e.g. '2019.10.rc1 Installation Guide'). - 3. Make sure all files in new "latest" link to the correct versions of supporting + 3. Make sure all files in new "upcoming" link to the correct versions of supporting docs. You do this through the doc link, so that it resolves to the top of the page (e.g. :doc:`/installation_guide/latest/index`) 4. Make sure the new release index is labeled with the correct version name (e.g .. _index-2019-05:) 5. Add the archived version to the toctree on this page. You want all possible versions to build. - 6. Since you are adding a new version ("latest") *before* it is available + 6. Since you are adding a new version ("upcoming") *before* it is available (e.g. to begin work on new docs), make sure page text still directs user to the "current" release and not to the under development version of the manuals. diff --git a/doc/source/deploy_install_guides/current/controller_storage.rst b/doc/source/deploy_install_guides/r1_release/controller_storage.rst similarity index 97% rename from doc/source/deploy_install_guides/current/controller_storage.rst rename to doc/source/deploy_install_guides/r1_release/controller_storage.rst index 38bfc6b6c..bcfcd28ca 100644 --- a/doc/source/deploy_install_guides/current/controller_storage.rst +++ b/doc/source/deploy_install_guides/r1_release/controller_storage.rst @@ -1,6 +1,6 @@ -============================== -Controller storage stx.2018.10 -============================== +======================= +Controller storage R1.0 +======================= .. contents:: :local: diff --git a/doc/source/deploy_install_guides/current/dedicated_storage.rst b/doc/source/deploy_install_guides/r1_release/dedicated_storage.rst similarity index 97% rename from doc/source/deploy_install_guides/current/dedicated_storage.rst rename to doc/source/deploy_install_guides/r1_release/dedicated_storage.rst index 96ce418c7..7e2a798e7 100644 --- a/doc/source/deploy_install_guides/current/dedicated_storage.rst +++ b/doc/source/deploy_install_guides/r1_release/dedicated_storage.rst @@ -1,6 +1,6 @@ -============================= -Dedicated storage stx.2018.10 -============================= +====================== +Dedicated storage R1.0 +====================== .. contents:: :local: diff --git a/doc/source/deploy_install_guides/current/deployment_terminology.rst b/doc/source/deploy_install_guides/r1_release/deployment_terminology.rst similarity index 100% rename from doc/source/deploy_install_guides/current/deployment_terminology.rst rename to doc/source/deploy_install_guides/r1_release/deployment_terminology.rst diff --git a/doc/source/deploy_install_guides/current/duplex.rst b/doc/source/deploy_install_guides/r1_release/duplex.rst similarity index 97% rename from doc/source/deploy_install_guides/current/duplex.rst rename to doc/source/deploy_install_guides/r1_release/duplex.rst index b66ffec25..64a3f026f 100644 --- a/doc/source/deploy_install_guides/current/duplex.rst +++ b/doc/source/deploy_install_guides/r1_release/duplex.rst @@ -1,6 +1,6 @@ -============================= -All-in-one duplex stx.2018.10 -============================= +====================== +All-in-one Duplex R1.0 +====================== .. contents:: :local: diff --git a/doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-controller-storage.png b/doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-controller-storage.png similarity index 100% rename from doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-controller-storage.png rename to doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-controller-storage.png diff --git a/doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-dedicated-storage.png b/doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-dedicated-storage.png similarity index 100% rename from doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-dedicated-storage.png rename to doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-dedicated-storage.png diff --git a/doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-duplex-extended.png b/doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-duplex-extended.png similarity index 100% rename from doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-duplex-extended.png rename to doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-duplex-extended.png diff --git a/doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-duplex.png b/doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-duplex.png similarity index 100% rename from doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-duplex.png rename to doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-duplex.png diff --git a/doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-simplex.png b/doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-simplex.png similarity index 100% rename from doc/source/deploy_install_guides/upcoming/figures/starlingx-deployment-options-simplex.png rename to doc/source/deploy_install_guides/r1_release/figures/starlingx-deployment-options-simplex.png diff --git a/doc/source/deploy_install_guides/current/index.rst b/doc/source/deploy_install_guides/r1_release/index.rst similarity index 88% rename from doc/source/deploy_install_guides/current/index.rst rename to doc/source/deploy_install_guides/r1_release/index.rst index 31bc021ec..48d1ea3dd 100644 --- a/doc/source/deploy_install_guides/current/index.rst +++ b/doc/source/deploy_install_guides/r1_release/index.rst @@ -1,10 +1,9 @@ -============================== -Installation guide stx.2018.10 -============================== +======================= +Installation guide R1.0 +======================= -This is the installation guide for the "current" StarlingX software -(i.e. the most recently released version). -If this is not the installation guide you want to use, see the +This is the installation guide for the StarlingX R1.0 release. If this is not the +installation guide you want to use, see the :doc:`available installation guides `. ------------ @@ -271,13 +270,13 @@ Deployment options - Standard controller - - :doc:`StarlingX Cloud with Dedicated Storage ` - - :doc:`StarlingX Cloud with Controller Storage ` + - :doc:`StarlingX Cloud with Dedicated Storage R1.0 ` + - :doc:`StarlingX Cloud with Controller Storage R1.0 ` - All-in-one - - :doc:`StarlingX Cloud Duplex ` - - :doc:`StarlingX Cloud Simplex ` + - :doc:`StarlingX Cloud Duplex R1.0 ` + - :doc:`StarlingX Cloud Simplex R1.0 ` .. toctree:: :hidden: diff --git a/doc/source/deploy_install_guides/current/installation_libvirt_qemu.rst b/doc/source/deploy_install_guides/r1_release/installation_libvirt_qemu.rst similarity index 86% rename from doc/source/deploy_install_guides/current/installation_libvirt_qemu.rst rename to doc/source/deploy_install_guides/r1_release/installation_libvirt_qemu.rst index cfccced13..504cda311 100644 --- a/doc/source/deploy_install_guides/current/installation_libvirt_qemu.rst +++ b/doc/source/deploy_install_guides/r1_release/installation_libvirt_qemu.rst @@ -1,8 +1,8 @@ -===================================== -Installation libvirt qemu stx.2018.10 -===================================== +============================== +Installation libvirt qemu R1.0 +============================== -Installation for StarlingX stx.2018.10 using Libvirt/QEMU virtualization. +Installation for StarlingX R1.0 using Libvirt/QEMU virtualization. --------------------- Hardware requirements @@ -195,10 +195,10 @@ controller-0 step. - Standard controller - - :doc:`StarlingX Cloud with Dedicated Storage Virtual Environment ` - - :doc:`StarlingX Cloud with Controller Storage Virtual Environment ` + - :doc:`StarlingX Cloud with Dedicated Storage Virtual Environment ` + - :doc:`StarlingX Cloud with Controller Storage Virtual Environment ` - All-in-one - - :doc:`StarlingX Cloud Duplex Virtual Environment ` - - :doc:`StarlingX Cloud Simplex Virtual Environment ` + - :doc:`StarlingX Cloud Duplex Virtual Environment ` + - :doc:`StarlingX Cloud Simplex Virtual Environment ` diff --git a/doc/source/deploy_install_guides/current/simplex.rst b/doc/source/deploy_install_guides/r1_release/simplex.rst similarity index 97% rename from doc/source/deploy_install_guides/current/simplex.rst rename to doc/source/deploy_install_guides/r1_release/simplex.rst index fb85eedc2..6683fda8e 100644 --- a/doc/source/deploy_install_guides/current/simplex.rst +++ b/doc/source/deploy_install_guides/r1_release/simplex.rst @@ -1,6 +1,6 @@ -============================== -All-in-one simplex stx.2018.10 -============================== +======================= +All-in-one Simplex R1.0 +======================= .. contents:: :local: diff --git a/doc/source/deploy_install_guides/upcoming/aio_duplex.rst b/doc/source/deploy_install_guides/upcoming/aio_duplex.rst deleted file mode 100644 index 41dc9e093..000000000 --- a/doc/source/deploy_install_guides/upcoming/aio_duplex.rst +++ /dev/null @@ -1,1443 +0,0 @@ -====================== -All-In-One Duplex R2.0 -====================== - -.. contents:: - :local: - :depth: 1 - - ------------- -Introduction ------------- - -The All-In-One Duplex (AIO-DX) configuration option comes in standard -and extended options. - -************************** -All-In-One Duplex standard -************************** - -The AIO-DX standard configuration option provides all three cloud functions -(controller, compute, and storage) on two physical servers. -With these cloud functions, multiple application types can be deployed and -consolidated onto a protected pair of physical servers. - -.. figure:: figures/starlingx-deployment-options-duplex.png - :scale: 90% - :alt: All-In-One Simplex deployment configuration - - *All-in-one duplex standard configuration* - -Following are some benefits of the AIO-DX configuration: - -- Consolidate legacy applications that must run standalone on a server by using - multiple virtual machines on a single physical server. - -- Consolidate legacy applications that run on different operating systems or - different distributions of operating systems by using multiple virtual - machines on a single pair of physical servers. - -- Provide the storage backend solution using a two-node CEPH deployment across two servers. - -- HA Services run on the controller function across the two physical servers in either - Active/Active or Active/Standby mode. - -- Virtual machines are scheduled on both compute functions. - -- During an overall server hardware fault, the following occurs: - - - All controller HA services go Active on the remaining healthy server. - - - All virtual machines are recovered on the remaining healthy server. - -- Only a small amount of cloud processing and storage power is required - with an AIO-DX configuration and protection against overall server hardware - faults is required. - -************************** -All-In-One Duplex extended -************************** - -The AIO-DX extended configuration option extends the capacity of the -AIO-DX duplex standard configuration option by adding up to four compute -nodes to the deployment. -The extended configuration option provides a capacity growth path for -someone starting with an AIO-DX duplex standard configuration option. - -With this option, virtual machines can be scheduled on either of the -all-in-one controller nodes, the compute nodes, or both. - -.. figure:: figures/starlingx-deployment-options-duplex-extended.png - :scale: 90% - :alt: All-In-One Duplex extended configuration - - *All-In-One Duplex Extended deployment configuration* - -This configuration is limited to four compute nodes as the controller function -on the all-in-one controllers has only a portion of the processing power of the -overall server. - --------------------- -Installation options --------------------- - -StarlingX may be installed on bare metal or in a virtual environment: - -- **Bare metal**: Real deployments of StarlingX are supported only on - physical servers. - -- **Virtual environment**: A virtual environment should be used only for - evaluation or development purposes. - -StarlingX installed in a virtual environment has a single installation option: - -- :doc:`Libvirt/QEMU ` - ------------- -Requirements ------------- - -********** -Bare metal -********** - -Required server: - -- Combined server (controller + compute): 2 - -^^^^^^^^^^^^^^^^^^^^^ -Hardware requirements -^^^^^^^^^^^^^^^^^^^^^ - -The recommended minimum requirements for the physical servers where -the AIO-DX will be deployed are: - -- Minimum processor: - - - Typical hardware form factor: - - - Dual-CPU Intel Xeon E5 26xx family (SandyBridge) 8 cores/socket - - - Low cost / low power hardware form factor - - - Single-CPU Intel Xeon D-15xx family, 8 cores - - -- Memory: 64 GB - -- BIOS: - - - Hyper-Threading technology: Enabled - - - Virtualization technology: Enabled - - - VT for directed I/O: Enabled - - - CPU power and performance policy: Performance - - - CPU C state control: Disabled - - - Plug & play BMC detection: Disabled - - -- Primary disk: - - - 500 GB SDD or NVMe - - -- Additional disks: - - - Zero or more 500 GB disks (min. 10K RPM) - - -- Network ports - - .. note:: All-in-one duplex configuration requires one or more data ports. - - - Management: 10GE - - - OAM: 10GE - - - Data: n x 10GE - - -^^^^^^^^^^^^^^^^^^^^^^^^ -NVMe drive as boot drive -^^^^^^^^^^^^^^^^^^^^^^^^ - -To use a Non-Volatile Memory Express (NVMe) drive as the boot drive for any of -your nodes, you must configure your host and adjust kernel parameters during -installation: - -- Configure the host to be in UEFI mode. - -- Edit the kernel boot parameter. - After you are presented with the StarlingX ISO boot options and after - you have selected the preferred installation option - (e.g. Standard Configuration / All-in-One Controller Configuration), press the - TAB key to edit the kernel boot parameters. - Modify the **boot_device** and **rootfs_device** from the default **sda** so - that it is the correct device name for the NVMe drive (e.g. "nvme0n1"). - - :: - - vmlinuz rootwait console=tty0 inst.text inst.stage2=hd:LABEL=oe_iso_boot - inst.ks=hd:LABEL=oe_iso_boot:/smallsystem_ks.cfg boot_device=nvme0n1 - rootfs_device=nvme0n1 biosdevname=0 usbcore.autosuspend=-1 inst.gpt - security_profile=standard user_namespace.enable=1 initrd=initrd.img - -******************* -Virtual environment -******************* - -The following subsections describe the recommended minimum requirements for -the workstation hosting the virtual machine(s) where StarlingX will be -deployed. - -^^^^^^^^^^^^^^^^^^^^^ -Hardware requirements -^^^^^^^^^^^^^^^^^^^^^ - -A workstation computer with: - -- Processor: x86_64 only supported architecture with BIOS enabled - hardware virtualization extensions - -- Cores: 8 (4 with careful monitoring of cpu load) - -- Memory: At least 32 GB RAM - -- Hard Disk: 500 GB HDD - -- Network: Two network adapters with active Internet connection - -^^^^^^^^^^^^^^^^^^^^^ -Software requirements -^^^^^^^^^^^^^^^^^^^^^ - -A workstation computer with: - -- Operating System: Newly installed Ubuntu 16.04 LTS 64-bit - -- If applicable, configured proxy settings - -- Git - -- KVM/VirtManager - -- Libvirt library - -- QEMU full-system emulation binaries - -- tools project - -- StarlingX ISO image - --------------------------- -Setting up the workstation --------------------------- - -This section describes how to set up the workstation computer that -hosts the virtual machine(s) where StarlingX will be deployed. - -****************************** -Updating your operating system -****************************** - -Before building an image, ensure your Linux distribution is current. -You must first update the local database list of available packages: - -:: - - $ sudo apt-get update - -************************ -Installing tools project -************************ - -Clone the tools project. -Typically, you clone this project while in your home directory: - -:: - - $ cd $HOME - $ git clone https://opendev.org/starlingx/tools.git - -**************************************** -Installing requirements and dependencies -**************************************** - -Navigate to the tools installation libvirt directory: - -:: - - $ cd $HOME/tools/deployment/libvirt/ - -Install the required packages: - -:: - - $ bash install_packages.sh - -****************** -Disabling firewall -****************** - -Unload the firewall and disable it during boot: - -:: - - $ sudo ufw disable - Firewall stopped and disabled on system startup - $ sudo ufw status - Status: inactive - -************************** -Prepare the virtual server -************************** - -Run the libvirt QEMU setup scripts to set up -virtualized OAM and management networks: - -:: - - $ bash setup_network.sh - -Build XML for definition of virtual servers: - -:: - - $ bash setup_configuration.sh -c duplex -i - -The previous script creates the following default XML server definitions: - -- duplex-controller-0 -- duplex-controller-1 - -************************* -Power up a virtual server -************************* - -To power up the virtual server, use the following command form: - -:: - - $ sudo virsh start - -Following is an example where is "duplex-controller-0": - -:: - - $ sudo virsh start duplex-controller-0 - -******************************* -Access a virtual server console -******************************* - -The XML for virtual servers in the tools repo, deployment/libvirt, -provides both graphical and text consoles. -Access the graphical console in virt-manager by right-clicking on the -domain (i.e. the server) and selecting "Open". - -Access the textual console with the command "virsh console $DOMAIN", -where DOMAIN is the name of the server shown in virsh. - -When booting the controller-0 for the first time, both the serial and -graphical consoles present the initial configuration menu for the -cluster. You can select the serial or graphical console for controller-0. -Only serial is used for other nodes, regardless of which option is selected. - -Before powering on a virtual server, open the graphic console to observe the -boot device selection and PXI boot progress. -Run "virsh console $DOMAIN" command promptly after the boot process -completes in order to see the initial boot sequence, which follows -the boot device selection. -Once the boot process completes, you have only a few seconds to -run the command. - -------------------------------------------- -Getting or building the StarlingX ISO image -------------------------------------------- - -The following subsections describe how to get or build the -StarlingX ISO image. - -********************* -Building the software -********************* - -Follow the standard build process in the `StarlingX Build Guide -`__. - -Alternatively, you can use a pre-built ISO, which includes all -required packages provided by the `StarlingX CENGN -mirror `__. - -********** -Bare metal -********** - -For bare metal, you must have a bootable USB flash drive -that contains the StarlingX ISO image. - -******************* -Virtual environment -******************* - -For the virtual environment, copy the StarlingX ISO Image -to the tools deployment libvirt project directory: - -:: - - $ cp $HOME/tools/deployment/libvirt/ - ------------------------ -Setting up controller-0 ------------------------ - -Installing controller-0 involves initializing a host with software -and then applying a bootstrap configuration from the command line. -The configured bootstrapped host becomes controller-0. - -The general procedure is: - -1. Have a USB device that contains a bootable StarlingX ISO. - -2. Be sure that USB device is plugged into a bootable USB slot on the server that - will be the Controller-0. - -3. Power on the server. - -4. Configure the controller by running the Ansible bootstrap playbook. - -************************* -Initializing controller-0 -************************* - -This section describes how to initialize StarlingX in host controller-0. - -.. note:: Except where noted, you must execute all the commands from a console on - the host. - -Follow this procedure to initialize the controller: - -1. Be sure your USB device that has the StarlingX ISO is plugged into - a bootable USB port on the host your are configuring as controller-0. - -2. Power on the host. - -3. Wait for the console to show the StarlingX ISO booting options: - - - **All-in-one Controller Configuration:** - - For this option, select "All-in-one Controller Configuration" for the - the type of installation from the installer welcome screen. - - - **Graphical Console:** - - Select "Graphical Console" as the console to use during - installation. - - - **Standard Security Boot Profile:** - - Select "Standard Security Boot Profile" as the Security Profile. - -4. Monitor the initialization. When it completes, a reboot is initiated - on the controller-0 host and briefly displays a GNU GRUB screen after - which the reboot automatically continues into the StarlingX image. - -5. Log into controller-0 as user "sysadmin" and use "syadmin" as the password. - The first time you log in as "sysadmin", you are required to change your - password: - - :: - - Changing password for sysadmin. - (current) UNIX Password: sysadmin - - -6. Enter a new password for the "sysadmin" account and confirm the change. - Once you change the password, controller-0 is initialized with StarlingX - and is ready for configuration. - - -************************ -Configuring controller-0 -************************ - -This section describes how to configure controller-0 for local -bootstrap by running the Ansible bootstrap playbook. - -.. note:: - For ease of use in development and controlled test environments, - you can provide passwords by specifying from the command line - an override file that is an unencrypted text file. - - - The sysadmin password is used for SSH authentication. - - - In production environments, you should store sensitive - information in the Ansible vault secret file and use - SSH keys rather than passwords for authentication. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Location of the controller bootstrap playbook -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -All StarlingX playbooks are located under the directory -/usr/share/ansible/stx-ansible/playbooks. -Consequently, the controller bootstrap playbook is located -at: /usr/share/ansible/stx-ansible/playbooks/bootstrap/. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Default bootstrap playbook settings -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default inventory file, which resides in Ansible configuration directory -(i.e. /etc/ansible/hosts), contains one single host - the localhost. -You can override this file using a custom hosts file and the "-i option". -Doing so makes the file available for remote play through the Ansible -playbook. - -The /usr/share/ansible/stx-ansible/playbooks/bootstrap/host_vars/default.yml -file specifies the default configuration parameters. -You can overwrite these parameters in two ways: - -- Using either the --extra-vars or -e options at the command line. - -- Using an override file. - -Using the override file is the preferred option when multiple -parameters exist that need to be overwritten. - -By default Ansible looks for and imports user override files -in the sysadmin home directory ($HOME). -If you want to place these files in a different location, you -must specify the location by using the -e option -(e.g. -e "override_files_dir="). - -The override file must conform to the following naming convention: -:: - - .yml - -An example filename is localhost.yml. - -^^^^^^^^^^^^^^ -Password types -^^^^^^^^^^^^^^ - -For local bootstrap, two types of passwords exist: - -- **ansible_become_pass**: A Sudo password to run tasks that require - escalated privileges. - Most bootstrap tasks must be run as root. - Since the playbook is run by sysadmin user, this is the sysadmin password. - -- **admin_password**: A password used in when system commands, such as - a Horizon login, are executed. - -Additionally, if an automatic SSH login that uses an SSH key has not been -set up between the Ansible control node and the target controller, -another password is required: - -- **ansible_ssh_pass**: The password used to log into the target host(s). - -For all the passwords mentioned in this section, the defaults are -set to "St8rlingX*". - -^^^^^^^^^^^^^^^^^^^^ -Running the playbook -^^^^^^^^^^^^^^^^^^^^ - -To run the playbook, you need to first set up external connectivity. -This section describes how to set up external connectivity and -then provides two examples showing how to bootstrap controller-0 by -running the playbook. - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Setting up external connectivity -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Use these commands to set up external connectivity: - -:: - - export CONTROLLER0_OAM_CIDR=10.10.10.10/24 - export DEFAULT_OAM_GATEWAY=10.10.10.1 - sudo ip address add $CONTROLLER0_OAM_CIDR dev enp2s1 - sudo ip link set up dev enp2s1 - sudo ip route add default via $DEFAULT_OAM_GATEWAY dev enp2s1 - ping 8.8.8.8 - -~~~~~~~~~~~~~~~~~~~~~~ -Bootstrap controller-0 -~~~~~~~~~~~~~~~~~~~~~~ - -The following example runs the local playbook using all the defaults, -including passwords being "St8rlingX*": - -:: - - ansible-playbook /usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml - -This next example runs the local playbook using an override file that provides -custom parameters that include admin and sysadmin passwords. The override file -is named "localhost.yml" and is located under /home/sysadmin/ directory. - -1. Create a *localhost.yml* file under */home/sysadmin/* directory with - the following content: - - :: - - # Mandatory - system_mode: duplex - - # Optional - external_oam_subnet: - external_oam_gateway_address: - external_oam_floating_address: - external_oam_node_0_address: - external_oam_node_1_address: - management_subnet: - dns_servers: - - - admin_password: - ansible_become_pass: - - - */home/sysadmin/localhost.yml* example: - - :: - - # Mandatory - system_mode: duplex - - # Optional - external_oam_subnet: 10.10.10.0/24 - external_oam_gateway_address: 10.10.10.1 - external_oam_floating_address: 10.10.10.3 - external_oam_node_0_address: 10.10.10.4 - external_oam_node_1_address: 10.10.10.5 - management_subnet: 192.168.204.0/24 - dns_servers: - - 8.8.4.4 - admin_password: St8rlingX* - ansible_become_pass: St8rlingX* - -Run the bootstrap playbook: - -:: - - ansible-playbook /usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml - -This final example bootstraps controller-0 by running -the local playbook and using a custom sysadmin and admin password -specified in the command line: - -:: - - ansible-playbook /usr/share/ansible/stx-ansible/playbooks/bootstrap/bootstrap.yml -e "ansible_become_pass= admin_password=" - -.. note:: Ansible does not currently support specifying playbook - search paths. - Consequently, you must specify the full path to the bootstrap - playbook in the command line unless you are already in the - bootstrap playbook directory. - In the near future, a command alias called "bootstrap-controller" - will be provided for ease of use. - -************************* -Provisioning the platform -************************* - -The following subsections describe how to provision the -server being used as controller-0. -Provisioning makes many services available. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Configure OAM, management, and cluster interface for controller-0 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use the following commands to configure OAM, management, and cluster interface -for controller-0: - -:: - - source /etc/platform/openrc - OAM_IF=enp0s3 - MGMT_IF=enp0s8 - system host-if-modify controller-0 lo -c none - IFNET_UUIDS=$(system interface-network-list controller-0 | awk '{if ($6=="lo") print $4;}') - for UUID in $IFNET_UUIDS; do - system interface-network-remove ${UUID} - done - system host-if-modify controller-0 $OAM_IF -c platform - system interface-network-assign controller-0 $OAM_IF oam - system host-if-modify controller-0 $MGMT_IF -c platform - system interface-network-assign controller-0 $MGMT_IF mgmt - system interface-network-assign controller-0 $MGMT_IF cluster-host - -^^^^^^^^^^^^^^^^^^ -Set the NTP server -^^^^^^^^^^^^^^^^^^ - -.. attention:: Baremetal hardware only. Skip this step in a virtual - environment as it can cause Ceph's clock skew alarms. - Moreover, clock of virtual instances is synchronized - with the host clock so there is no need to configure - NTP here. - -Use the following command to configure the IP Addresses -of the remote Network Time Protocol (NTP) servers. -These servers are used for network time synchronization: - -:: - - source /etc/platform/openrc - system ntp-modify ntpservers=0.pool.ntp.org,1.pool.ntp.org - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Configure the host's vSwitch type -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. attention:: In a virtual environment, OVS-DPDK is NOT supported, only OVS - is supported. - -This section describes how to configure the Virtual Switch required for the -stx-openstack application, which allows network entities to connect to virtual -machines over a virtual network. - -StarlingX has OVS (kernel-based) vSwitch configured as default: - -- Running in a container; defined within the helm charts of stx-openstack - manifest. -- Shares the core(s) assigned to the Platform. - -If you require better performance, OVS-DPDK should be used: - -- Running directly on the host (i.e. NOT containerized). -- Requires that at least 1 core be assigned/dedicated to the vSwitch - function. - -To deploy the default containerized OVS: - -:: - - system modify --vswitch_type none - -I.e. do not run any vSwitch directly on the host, and use the containerized -OVS defined in the helm charts of stx-openstack manifest. - -To deploy OVS-DPDK (OVS with the Data Plane Development Kit), which is -supported only on bare metal hardware, run the following command: - -:: - - system modify --vswitch_type ovs-dpdk - system host-cpu-modify -f vswitch -p0 1 controller-0 - -Once vswitch_type is set to OVS-DPDK, any subsequent nodes that are created -will default to automatically assigning 1 vSwitch core for AIO Controllers -and 2 vSwitch cores for computes. - -When using OVS-DPDK, virtual machines must be configured to use a flavor with -property: **hw:mem_page_size=large**. - -.. important:: After controller-0 is unlocked, changing vswitch_type would - require locking and unlocking all computes (and/or AIO controllers) in - order to apply the change. - -^^^^^^^^^^^^^^^^^^^^^^^^^ -Configure data interfaces -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Part of provisioning controller-0 is to configure the -data interfaces. Use the following to configure data interfaces: - -:: - - DATA0IF=eth1000 - DATA1IF=eth1001 - export COMPUTE=controller-0 - PHYSNET0='physnet0' - PHYSNET1='physnet1' - SPL=/tmp/tmp-system-port-list - SPIL=/tmp/tmp-system-host-if-list - system host-port-list ${COMPUTE} --nowrap > ${SPL} - system host-if-list -a ${COMPUTE} --nowrap > ${SPIL} - DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') - DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') - DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') - DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') - DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') - DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') - DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') - DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') - - # Configure the datanetworks in StarlingX, prior to referencing it - # in the 'system host-if-modify command' - - system datanetwork-add ${PHYSNET0} vlan - system datanetwork-add ${PHYSNET1} vlan - - system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} - system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} - system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} - system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Prepare the host for running the containerized services -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To prepare the host for running the containerized services, -apply all the node labels for each controller and their compute -functions: - -:: - - system host-label-assign controller-0 openstack-control-plane=enabled - system host-label-assign controller-0 openstack-compute-node=enabled - system host-label-assign controller-0 openvswitch=enabled - system host-label-assign controller-0 sriov=enabled - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Set up partitions for controller-0 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You need to create partitions on the root disk and then -wait for them to become ready. - -- 34 GB partition size for nova-local (mandatory). - -The following is an example: - -:: - - export COMPUTE=controller-0 - - echo ">>> Getting root disk info" - ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') - ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') - echo "Root disk: $ROOT_DISK, UUID: $ROOT_DISK_UUID" - - echo ">>>> Configuring nova-local" - NOVA_SIZE=34 - NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${NOVA_SIZE}) - NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') - system host-lvg-add ${COMPUTE} nova-local - system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} - sleep 2 - - echo ">>> Wait for partition $NOVA_PARTITION_UUID to be ready." - while true; do system host-disk-partition-list $COMPUTE --nowrap | grep $NOVA_PARTITION_UUID | grep Ready; if [ $? -eq 0 ]; then break; fi; sleep 1; done - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Configure Ceph for controller-0 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use the following to configure Ceph for controller-0: - -:: - - echo ">>> Add OSDs to primary tier" - - system host-disk-list controller-0 - system host-disk-list controller-0 | awk '/\/dev\/sdb/{print $2}' | xargs -i system host-stor-add controller-0 {} - system host-stor-list controller-0 - -^^^^^^^^^^^^^^^^^^^^^ -Unlock the controller -^^^^^^^^^^^^^^^^^^^^^ - -You must unlock controller-0 so that you can use it to install -controller-1. Use the system host-unlock command: - -:: - - system host-unlock controller-0 - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Test for Ceph cluster operation -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Now, test that the Ceph cluster is operational: - -:: - - ceph -s - cluster 6cb8fd30-622a-4a15-a039-b9e945628133 - health HEALTH_OK - monmap e1: 1 mons at {controller-0=127.168.204.3:6789/0} - election epoch 4, quorum 0 controller-0 - osdmap e32: 1 osds: 1 up, 1 in - flags sortbitwise,require_jewel_osds - pgmap v35: 1728 pgs, 6 pools, 0 bytes data, 0 objects - 39180 kB used, 50112 MB / 50150 MB avail - 1728 active+clean - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Boot the second AIO controller -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To boot the second VM (i.e. the VM without an ISO media mounted), -press the F12 key immediately. -Select the "lan" for the alternative boot option to force a -network boot. - -As the VM boots, a message appears on the controller-1 console -instructing you to configure the personality of the node. -Do so from a controller-0 shell using the following commands: - -:: - - source /etc/platform/openrc - system host-list - -The results indicate that ID 2 is the un-provisioned controller: - -:: - - +----+--------------+-------------+----------------+-------------+--------------+ - | id | hostname | personality | administrative | operational | availability | - +----+--------------+-------------+----------------+-------------+--------------+ - | 1 | controller-0 | controller | unlocked | enabled | available | - | 2 | None | None | locked | disabled | offline | - +----+--------------+-------------+----------------+-------------+--------------+ - -:: - - system host-update 2 personality=controller - -The packages install and the controller reboots. - --------------------------------------- -Provisioning the second AIO controller --------------------------------------- - -The following subsections show how to provision controller-1. - -****************************************** -Configure data interfaces for controller-1 -****************************************** - -Configure the data interfaces as follows: - -:: - - source /etc/platform/openrc - export COMPUTE='controller-1' - PHYSNET0='physnet0' - PHYSNET1='physnet1' - OAM_IF=enp0s3 - DATA0IF=eth1000 - DATA1IF=eth1001 - NOWRAP="--nowrap" - - echo ">>> Configuring OAM Network" - system host-if-modify -n oam0 -c platform ${COMPUTE} $(system host-if-list -a $COMPUTE $NOWRAP | awk -v OAM_IF=$OAM_IF '{if ($4 == OAM_IF) { print $2;}}') - system interface-network-assign controller-1 oam0 oam - - echo ">>> Configuring Cluster Host Interface" - system interface-network-assign controller-1 mgmt0 cluster-host - - echo ">>> Configuring Data Networks" - SPL=/tmp/tmp-system-port-list - SPIL=/tmp/tmp-system-host-if-list - system host-port-list ${COMPUTE} $NOWRAP > ${SPL} - system host-if-list -a ${COMPUTE} $NOWRAP > ${SPIL} - - DATA0PCIADDR=$(cat $SPL | grep $DATA0IF |awk '{print $8}') - DATA1PCIADDR=$(cat $SPL | grep $DATA1IF |awk '{print $8}') - DATA0PORTUUID=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $2}') - DATA1PORTUUID=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $2}') - DATA0PORTNAME=$(cat $SPL | grep ${DATA0PCIADDR} | awk '{print $4}') - DATA1PORTNAME=$(cat $SPL | grep ${DATA1PCIADDR} | awk '{print $4}') - DATA0IFUUID=$(cat $SPIL | awk -v DATA0PORTNAME=$DATA0PORTNAME '($12 ~ DATA0PORTNAME) {print $2}') - DATA1IFUUID=$(cat $SPIL | awk -v DATA1PORTNAME=$DATA1PORTNAME '($12 ~ DATA1PORTNAME) {print $2}') - - system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} - system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID} - system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0} - system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1} - -*********************************************************** -Prepare controller-1 for running the containerized services -*********************************************************** - -Use the following to prepare controller-1 for running the -containerized services: - -:: - - source /etc/platform/openrc - - system host-label-assign controller-1 openstack-control-plane=enabled - system host-label-assign controller-1 openstack-compute-node=enabled - system host-label-assign controller-1 openvswitch=enabled - system host-label-assign controller-1 sriov=enabled - -********************************* -Setup partitions for controller-1 -********************************* - -You need to create partitions on the root disk and then -wait for them to become ready. - -- 34 GB partition size for nova-local (mandatory). - -:: - - source /etc/platform/openrc - export COMPUTE=controller-1 - - echo ">>> Getting root disk info" - ROOT_DISK=$(system host-show ${COMPUTE} | grep rootfs | awk '{print $4}') - ROOT_DISK_UUID=$(system host-disk-list ${COMPUTE} --nowrap | grep ${ROOT_DISK} | awk '{print $2}') - echo "Root disk: $ROOT_DISK, UUID: $ROOT_DISK_UUID" - - echo ">>>> Configuring nova-local" - NOVA_SIZE=34 - NOVA_PARTITION=$(system host-disk-partition-add -t lvm_phys_vol ${COMPUTE} ${ROOT_DISK_UUID} ${NOVA_SIZE}) - NOVA_PARTITION_UUID=$(echo ${NOVA_PARTITION} | grep -ow "| uuid | [a-z0-9\-]* |" | awk '{print $4}') - system host-lvg-add ${COMPUTE} nova-local - system host-pv-add ${COMPUTE} nova-local ${NOVA_PARTITION_UUID} - -******************************* -Configure Ceph for controller-1 -******************************* - -Use the following to configure Ceph for Controller-1: - -:: - - source /etc/platform/openrc - - echo ">>> Get disk & tier info" - HOST="controller-1" - DISKS=$(system host-disk-list ${HOST}) - TIERS=$(system storage-tier-list ceph_cluster) - echo "Disks:" - echo "$DISKS" - echo "Tiers:" - echo "$TIERS" - - echo ">>> Add OSDs to primary tier" - system host-stor-add ${HOST} $(echo "$DISKS" | grep /dev/sdb | awk '{print $2}') --tier-uuid $(echo "$TIERS" | grep storage | awk '{print $2}') - - echo ">>> system host-stor-list ${HOST}" - system host-stor-list ${HOST} - echo ">>> ceph osd tree" - ceph osd tree - -******************* -Unlock controller-1 -******************* - -You must unlock controller-1 using the following commands: - -:: - - source /etc/platform/openrc - system host-unlock controller-1 - -Wait for controller-1 to reboot before proceeding. - -------------------------------------------------------------------------- -Using the system CLI to bring up and take down the containerized services -------------------------------------------------------------------------- - -This section describes how to bring up and take down the containerized services. - -********************************************** -Generate the stx-openstack application tarball -********************************************** - -Each build on the CENGN mirror generates the `stx-openstack application -tarballs `__. - -Alternatively, in a development environment, you can run the following command -to construct the application tarballs: - -:: - - $MY_REPO_ROOT_DIR/cgcs-root/build-tools/build-helm-charts.sh - -- You can find the resulting tarballs under - $MY_WORKSPACE/std/build-helm/stx. - -- By default, the latest stable starlingx docker images are used in armada - manifest. You can build the application tarball with different image - versions by specifying the image record files/urls which contain the images - you would like to use via option --image-record (The `starlingx image build - records ` - can be found on the CENGN mirror). - -- To construct a new name of stx-openstack tarball, specify a label with - --label option. The name of the stx-openstack application tarball is - **stx-openstack-(-