From ef4d759e87d11e6d53c22cf57a3b9296fa00c642 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 12 Jul 2021 11:38:25 +0100 Subject: [PATCH] docs: Add new architecture guide As with the cells v2 docs before this, we have a number of architecture focused documents in tree. The 'user/architecture' guide is relatively up-to-date but is quite shallow, while the 'admin/arch' guide is in-depth but almost a decade out-of-date, with references to things like nova's in-built block storage service. Replace most of the latter with more up-to-date information and the merge the former into it, before renaming the file to 'admin/architecture'. Change-Id: I518bb5d586b159b4796fb6139351ba423bc19639 Signed-off-by: Stephen Finucane --- doc/source/_extra/.htaccess | 6 +- .../admin/{arch.rst => architecture.rst} | 190 ++++++++++-------- doc/source/admin/index.rst | 163 ++++++++++----- doc/source/index.rst | 4 +- doc/source/user/architecture.rst | 64 ------ doc/source/user/index.rst | 80 +------- doc/test/redirect-tests.txt | 7 +- 7 files changed, 240 insertions(+), 274 deletions(-) rename doc/source/admin/{arch.rst => architecture.rst} (62%) delete mode 100644 doc/source/user/architecture.rst diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess index 79048f1ee869..d9292223bc63 100644 --- a/doc/source/_extra/.htaccess +++ b/doc/source/_extra/.htaccess @@ -6,7 +6,7 @@ redirectmatch 301 ^/nova/([^/]+)/aggregates.html$ /nova/$1/user/aggregates.html redirectmatch 301 ^/nova/([^/]+)/api_microversion_dev.html$ /nova/$1/contributor/microversions.html redirectmatch 301 ^/nova/([^/]+)/api_microversion_history.html$ /nova/$1/reference/api-microversion-history.html redirectmatch 301 ^/nova/([^/]+)/api_plugins.html$ /nova/$1/contributor/api.html -redirectmatch 301 ^/nova/([^/]+)/architecture.html$ /nova/$1/user/architecture.html +redirectmatch 301 ^/nova/([^/]+)/architecture.html$ /nova/$1/admin/architecture.html redirectmatch 301 ^/nova/([^/]+)/block_device_mapping.html$ /nova/$1/user/block-device-mapping.html redirectmatch 301 ^/nova/([^/]+)/blueprints.html$ /nova/$1/contributor/blueprints.html redirectmatch 301 ^/nova/([^/]+)/cells.html$ /nova/$1/admin/cells.html @@ -64,6 +64,7 @@ redirectmatch 301 ^/nova/([^/]+)/testing/zero-downtime-upgrade.html$ /nova/$1/co redirectmatch 301 ^/nova/([^/]+)/threading.html$ /nova/$1/reference/threading.html redirectmatch 301 ^/nova/([^/]+)/upgrade.html$ /nova/$1/admin/upgrades.html redirectmatch 301 ^/nova/([^/]+)/user/aggregates.html$ /nova/$1/admin/aggregates.html +redirectmatch 301 ^/nova/([^/]+)/user/architecture.html$ /nova/$1/admin/architecture.html redirectmatch 301 ^/nova/([^/]+)/user/cells.html$ /nova/$1/admin/cells.html redirectmatch 301 ^/nova/([^/]+)/user/cellsv2-layout.html$ /nova/$1/admin/cells.html redirectmatch 301 ^/nova/([^/]+)/user/cellsv2_layout.html$ /nova/$1/admin/cells.html @@ -77,9 +78,10 @@ redirectmatch 301 ^/nova/([^/]+)/user/vendordata.html$ /nova/$1/user/metadata.ht redirectmatch 301 ^/nova/([^/]+)/vendordata.html$ /nova/$1/user/metadata.html redirectmatch 301 ^/nova/([^/]+)/vmstates.html$ /nova/$1/reference/vm-states.html redirectmatch 301 ^/nova/([^/]+)/wsgi.html$ /nova/$1/user/wsgi.html +redirectmatch 301 ^/nova/([^/]+)/admin/arch.html$ /nova/$1/admin/architecture.html redirectmatch 301 ^/nova/([^/]+)/admin/adv-config.html$ /nova/$1/admin/index.html redirectmatch 301 ^/nova/([^/]+)/admin/configuration/schedulers.html$ /nova/$1/admin/scheduling.html redirectmatch 301 ^/nova/([^/]+)/admin/system-admin.html$ /nova/$1/admin/index.html redirectmatch 301 ^/nova/([^/]+)/admin/port_with_resource_request.html$ /nova/$1/admin/ports-with-resource-requests.html -redirectmatch 301 ^/nova/([^/]+)/admin/manage-users.html$ /nova/$1/admin/arch.html +redirectmatch 301 ^/nova/([^/]+)/admin/manage-users.html$ /nova/$1/admin/architecture.html redirectmatch 301 ^/nova/([^/]+)/admin/mitigation-for-Intel-MDS-security-flaws.html /nova/$1/admin/cpu-models.html diff --git a/doc/source/admin/arch.rst b/doc/source/admin/architecture.rst similarity index 62% rename from doc/source/admin/arch.rst rename to doc/source/admin/architecture.rst index c141fabcbdf1..e0194dd78d8b 100644 --- a/doc/source/admin/arch.rst +++ b/doc/source/admin/architecture.rst @@ -1,45 +1,67 @@ -=================== -System architecture -=================== +======================== +Nova System Architecture +======================== -OpenStack Compute contains several main components. +Nova comprises multiple server processes, each performing different +functions. The user-facing interface is a REST API, while internally Nova +components communicate via an RPC message passing mechanism. -- The cloud controller represents the global state and interacts with the - other components. The ``API server`` acts as the web services front end for - the cloud controller. The ``compute controller`` provides compute server - resources and usually also contains the Compute service. +The API servers process REST requests, which typically involve database +reads/writes, optionally sending RPC messages to other Nova services, +and generating responses to the REST calls. +RPC messaging is done via the **oslo.messaging** library, +an abstraction on top of message queues. +Nova uses a messaging-based, ``shared nothing`` architecture and most of the +major nova components can be run on multiple servers, and have a manager that +is listening for RPC messages. +The one major exception is ``nova-compute``, where a single process runs on the +hypervisor it is managing (except when using the VMware or Ironic drivers). +The manager also, optionally, has periodic tasks. +For more details on our RPC system, please see: :doc:`/reference/rpc` -- The ``object store`` is an optional component that provides storage - services; you can also use OpenStack Object Storage instead. +Nova also uses a central database that is (logically) shared between all +components. However, to aid upgrade, the DB is accessed through an object +layer that ensures an upgraded control plane can still communicate with +a ``nova-compute`` running the previous release. +To make this possible ``nova-compute`` proxies DB requests over RPC to a +central manager called ``nova-conductor``. -- An ``auth manager`` provides authentication and authorization services when - used with the Compute system; you can also use OpenStack Identity as a - separate authentication service instead. +To horizontally expand Nova deployments, we have a deployment sharding +concept called cells. For more information please see: :doc:`/admin/cells` -- A ``volume controller`` provides fast and permanent block-level storage for - the compute servers. -- The ``network controller`` provides virtual networks to enable compute - servers to interact with each other and with the public network. You can also - use OpenStack Networking instead. +Components +---------- -- The ``scheduler`` is used to select the most suitable compute controller to - host an instance. +Below you will find a helpful explanation of the key components +of a typical Nova deployment. + +.. image:: /_static/images/architecture.svg + :width: 100% + +* **DB**: SQL database for data storage. + +* **API**: Component that receives HTTP requests, converts commands and + communicates with other components via the **oslo.messaging** queue or HTTP. + +* **Scheduler**: Decides which host gets each instance. + +* **Compute**: Manages communication with hypervisor and virtual machines. + +* **Conductor**: Handles requests that need coordination (build/resize), acts + as a database proxy, or handles object conversions. + +* **:placement-doc:`Placement <>`**: Tracks resource provider inventories and + usages. + +While all services are designed to be horizontally scalable, you should have +significantly more computes than anything else. -Compute uses a messaging-based, ``shared nothing`` architecture. All major -components exist on multiple servers, including the compute, volume, and -network controllers, and the Object Storage or Image service. The state of the -entire system is stored in a database. The cloud controller communicates with -the internal object store using HTTP, but it communicates with the scheduler, -network controller, and volume controller using Advanced Message Queuing -Protocol (AMQP). To avoid blocking a component while waiting for a response, -Compute uses asynchronous calls, with a callback that is triggered when a -response is received. Hypervisors -~~~~~~~~~~~ +----------- -Compute controls hypervisors through an API server. Selecting the best +Nova controls hypervisors through an API server. Selecting the best hypervisor to use can be difficult, and you must take budget, resource constraints, supported features, and required technical specifications into account. However, the majority of OpenStack development is done on systems @@ -47,7 +69,7 @@ using KVM-based hypervisors. For a detailed list of features and support across different hypervisors, see :doc:`/user/support-matrix`. You can also orchestrate clouds using multiple hypervisors in different -availability zones. Compute supports the following hypervisors: +availability zones. Nova supports the following hypervisors: - :ironic-doc:`Baremetal <>` @@ -75,35 +97,29 @@ For more information about hypervisors, see :doc:`/admin/configuration/hypervisors` section in the Nova Configuration Reference. -Projects, users, and roles -~~~~~~~~~~~~~~~~~~~~~~~~~~ -To begin using Compute, you must create a user with the +Projects, users, and roles +-------------------------- + +To begin using Nova, you must create a user with the :keystone-doc:`Identity service <>`. -The Compute system is designed to be used by different consumers in the form of -projects on a shared system, and role-based access assignments. Roles control +The Nova system is designed to be used by different consumers in the form of +projects on a shared system, and role-based access assignments. Roles control the actions that a user is allowed to perform. Projects are isolated resource containers that form the principal -organizational structure within the Compute service. They consist of an +organizational structure within the Nova service. They typically consist of an individual VLAN, and volumes, instances, images, keys, and users. A user can specify the project by appending ``project_id`` to their access key. If no -project is specified in the API request, Compute attempts to use a project with +project is specified in the API request, Nova attempts to use a project with the same ID as the user. -For projects, you can use quota controls to limit the: - -- Number of volumes that can be launched. - -- Number of processor cores and the amount of RAM that can be allocated. - -- Floating IP addresses assigned to any instance when it launches. This allows - instances to have the same publicly accessible IP addresses. - -- Fixed IP addresses assigned to the same instance when it launches. This - allows instances to have the same publicly or privately accessible IP - addresses. +For projects, you can use quota controls to limit the number of processor cores +and the amount of RAM that can be allocated. Other projects also allow quotas +on their own resources. For example, :neutron-doc:`neutron +` allows you to manage the amount of networks that can +be created within a project. Roles control the actions a user is allowed to perform. By default, most actions do not require a particular role, but you can configure them by editing @@ -122,8 +138,9 @@ consumption across available hardware resources. ``project``. Because of this legacy terminology, some command-line tools use ``--tenant_id`` where you would normally expect to enter a project ID. + Block storage -~~~~~~~~~~~~~ +------------- OpenStack provides two classes of block storage: ephemeral storage and persistent volume. @@ -131,7 +148,7 @@ persistent volume. .. rubric:: Ephemeral storage Ephemeral storage includes a root ephemeral volume and an additional ephemeral -volume. +volume. These are provided by nova itself. The root disk is associated with an instance, and exists only for the life of this very instance. Generally, it is used to store an instance's root file @@ -139,37 +156,34 @@ system, persists across the guest operating system reboots, and is removed on an instance deletion. The amount of the root ephemeral volume is defined by the flavor of an instance. -In addition to the ephemeral root volume, all default types of flavors, except -``m1.tiny``, which is the smallest one, provide an additional ephemeral block -device sized between 20 and 160 GB (a configurable value to suit an -environment). It is represented as a raw block device with no partition table -or file system. A cloud-aware operating system can discover, format, and mount -such a storage device. OpenStack Compute defines the default file system for -different operating systems as Ext4 for Linux distributions, VFAT for non-Linux -and non-Windows operating systems, and NTFS for Windows. However, it is -possible to specify any other filesystem type by using ``virt_mkfs`` or -``default_ephemeral_format`` configuration options. +In addition to the ephemeral root volume, flavors can provide an additional +ephemeral block device. It is represented as a raw block device with no +partition table or file system. A cloud-aware operating system can discover, +format, and mount such a storage device. Nova defines the default file system +for different operating systems as ext4 for Linux distributions, VFAT for +non-Linux and non-Windows operating systems, and NTFS for Windows. However, it +is possible to configure other filesystem types. .. note:: For example, the ``cloud-init`` package included into an Ubuntu's stock - cloud image, by default, formats this space as an Ext4 file system and + cloud image, by default, formats this space as an ext4 file system and mounts it on ``/mnt``. This is a cloud-init feature, and is not an OpenStack mechanism. OpenStack only provisions the raw storage. .. rubric:: Persistent volume A persistent volume is represented by a persistent virtualized block device -independent of any particular instance, and provided by OpenStack Block -Storage. +independent of any particular instance. These are provided by the OpenStack +Block Storage service, cinder. -Only a single configured instance can access a persistent volume. Multiple -instances cannot access a persistent volume. This type of configuration -requires a traditional network file system to allow multiple instances -accessing the persistent volume. It also requires a traditional network file -system like NFS, CIFS, or a cluster file system such as GlusterFS. These -systems can be built within an OpenStack cluster, or provisioned outside of it, -but OpenStack software does not provide these features. +Persistent volumes can be accessed by a single instance or attached to multiple +instances. This type of configuration requires a traditional network file +system to allow multiple instances accessing the persistent volume. It also +requires a traditional network file system like NFS, CIFS, or a cluster file +system such as GlusterFS. These systems can be built within an OpenStack +cluster, or provisioned outside of it, but OpenStack software does not provide +these features. You can configure a persistent volume as bootable and use it to provide a persistent virtual instance similar to the traditional non-cloud-based @@ -190,17 +204,17 @@ configuration, see :cinder-doc:`Introduction to the Block Storage service Building blocks -~~~~~~~~~~~~~~~ +--------------- In OpenStack the base operating system is usually copied from an image stored -in the OpenStack Image service. This is the most common case and results in an -ephemeral instance that starts from a known template state and loses all -accumulated states on virtual machine deletion. It is also possible to put an -operating system on a persistent volume in the OpenStack Block Storage volume -system. This gives a more traditional persistent system that accumulates states -which are preserved on the OpenStack Block Storage volume across the deletion -and re-creation of the virtual machine. To get a list of available images on -your system, run: +in the OpenStack Image service, glance. This is the most common case and +results in an ephemeral instance that starts from a known template state and +loses all accumulated states on virtual machine deletion. It is also possible +to put an operating system on a persistent volume in the OpenStack Block +Storage service. This gives a more traditional persistent system that +accumulates states which are preserved on the OpenStack Block Storage volume +across the deletion and re-creation of the virtual machine. To get a list of +available images on your system, run: .. code-block:: console @@ -230,10 +244,9 @@ The displayed image attributes are: field is blank. Virtual hardware templates are called ``flavors``. By default, these are -configurable by admin users, however that behavior can be changed by redefining -the access controls for ``compute_extension:flavormanage`` in -``/etc/nova/policy.yaml`` on the ``compute-api`` server. -For more information, refer to :doc:`/configuration/policy`. +configurable by admin users, however, that behavior can be changed by redefining +the access controls ``policy.yaml`` on the ``nova-compute`` server. For more +information, refer to :doc:`/configuration/policy`. For a list of flavors that are available on your system: @@ -250,8 +263,9 @@ For a list of flavors that are available on your system: | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | +-----+-----------+-------+------+-----------+-------+-----------+ -Compute service architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Nova service architecture +------------------------- These basic categories describe the service architecture and information about the cloud controller. diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index 960034ab8ff3..c8515f3ec1a5 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -1,6 +1,6 @@ -======= -Compute -======= +=================== +Admin Documentation +=================== The OpenStack Compute service allows you to control an Infrastructure-as-a-Service (IaaS) cloud computing platform. It gives you @@ -28,57 +28,143 @@ responsibilities of services and drivers are: .. rubric:: Services -``nova-api`` - Receives XML requests and sends them to the rest of the system. A WSGI app - routes and authenticates requests. Supports the OpenStack Compute APIs. A - ``nova.conf`` configuration file is created when Compute is installed. +:doc:`nova-api-metadata ` + A server daemon that serves the Nova Metadata API. -.. todo:: +:doc:`nova-api-os-compute ` + A server daemon that serves the Nova OpenStack Compute API. - Describe nova-api-metadata, nova-api-os-compute, nova-serialproxy and - nova-spicehtml5proxy +:doc:`nova-api ` + A server daemon that serves the metadata and compute APIs in separate + greenthreads. - nova-console, nova-dhcpbridge and nova-xvpvncproxy are all deprecated for - removal so they can be ignored. - -``nova-compute`` +:doc:`nova-compute ` Manages virtual machines. Loads a Service object, and exposes the public methods on ComputeManager through a Remote Procedure Call (RPC). -``nova-conductor`` +:doc:`nova-conductor ` Provides database-access support for compute nodes (thereby reducing security risks). -``nova-scheduler`` +:doc:`nova-scheduler ` Dispatches requests for new virtual machines to the correct node. -``nova-novncproxy`` +:doc:`nova-novncproxy ` Provides a VNC proxy for browsers, allowing VNC consoles to access virtual machines. +:doc:`nova-spicehtml5proxy ` + Provides a SPICE proxy for browsers, allowing SPICE consoles to access + virtual machines. + +:doc:`nova-serialproxy ` + Provides a serial console proxy, allowing users to access a virtual machine's + serial console. + +The architecture is covered in much greater detail in +:doc:`/admin/architecture`. + +.. toctree:: + :maxdepth: 2 + + architecture + .. note:: Some services have drivers that change how the service implements its core functionality. For example, the ``nova-compute`` service supports drivers that let you choose which hypervisor type it can use. + +Deployment Considerations +------------------------- + +There is information you might want to consider before doing your deployment, +especially if it is going to be a larger deployment. For smaller deployments +the defaults from the :doc:`install guide ` will be sufficient. + +* **Compute Driver Features Supported**: While the majority of nova deployments use + libvirt/kvm, you can use nova with other compute drivers. Nova attempts to + provide a unified feature set across these, however, not all features are + implemented on all backends, and not all features are equally well tested. + + * :doc:`Feature Support by Use Case `: A view of + what features each driver supports based on what's important to some large + use cases (General Purpose Cloud, NFV Cloud, HPC Cloud). + + * :doc:`Feature Support full list `: A detailed dive through + features in each compute driver backend. + +* :doc:`Cells v2 configuration `: For large deployments, cells v2 + cells allow sharding of your compute environment. Upfront planning is key to + a successful cells v2 layout. + +* :doc:`Availablity Zones `: Availability Zones are + an end-user visible logical abstraction for partitioning a cloud without + knowing the physical infrastructure. They can be used to partition a cloud on + arbitrary factors, such as location (country, datacenter, rack), network + layout and/or power source. + +* :placement-doc:`Placement service <>`: Overview of the placement + service, including how it fits in with the rest of nova. + +* :doc:`Running nova-api on wsgi `: Considerations for using a real + WSGI container instead of the baked-in eventlet web server. + .. toctree:: :maxdepth: 2 - manage-volumes - flavors + cells + aggregates default-ports - admin-password-injection + availability-zones + configuration/index + + +Basic configuration +------------------- + +Once you have an OpenStack deployment up and running, you will want to manage +it. The below guides cover everything from creating initial flavor and image to +log management and live migration of instances. + +* :doc:`Quotas `: Managing project quotas in nova. + +* :doc:`Scheduling `: How the scheduler is + configured, and how that will impact where compute instances land in your + environment. If you are seeing unexpected distribution of compute instances + in your hosts, you'll want to dive into this configuration. + +* :doc:`Exposing custom metadata to compute instances `: How + and when you might want to extend the basic metadata exposed to compute + instances (either via metadata server or config drive) for your specific + purposes. + +.. toctree:: + :maxdepth: 2 + manage-the-cloud + services + service-groups manage-logs root-wrap-reference + ssh-configuration configuring-migrations live-migration-usage + secure-live-migration-with-qemu-native-tls + manage-volumes + flavors + admin-password-injection remote-console-access - service-groups - node-down scheduling - upgrades + config-drive + image-caching + metadata-service + quotas + networking + security-groups + security + vendordata Advanced configuration @@ -125,34 +211,21 @@ instance for these kind of workloads. libvirt-misc -Additional guides ------------------ +Maintenance +----------- -.. TODO(mriedem): This index page has a lot of content which should be - organized into groups for things like configuration, operations, - troubleshooting, etc. +Once you are running nova, the following information is extremely useful. + +* :doc:`Upgrades `: How nova is designed to be upgraded for minimal + service impact, and the order you should do them in. .. toctree:: :maxdepth: 2 - aggregates - arch - availability-zones - cells - config-drive - configuration/index + support-compute evacuate - image-caching - metadata-service migration migrate-instance-with-snapshot - networking - quotas - security-groups - security - services - ssh-configuration - support-compute - secure-live-migration-with-qemu-native-tls - vendordata + upgrades + node-down hw-machine-type diff --git a/doc/source/index.rst b/doc/source/index.rst index 80bd8bbc3536..98da0106f8ef 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -107,7 +107,7 @@ For Operators Architecture Overview --------------------- -* :doc:`Nova architecture `: An overview of how all the parts in +* :doc:`Nova architecture `: An overview of how all the parts in nova fit together. .. # NOTE(amotoki): toctree needs to be placed at the end of the secion to @@ -115,7 +115,7 @@ Architecture Overview .. toctree:: :hidden: - user/architecture + admin/architecture Installation ------------ diff --git a/doc/source/user/architecture.rst b/doc/source/user/architecture.rst deleted file mode 100644 index 2841cc000178..000000000000 --- a/doc/source/user/architecture.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Nova System Architecture -======================== - -Nova comprises multiple server processes, each performing different -functions. The user-facing interface is a REST API, while internally Nova -components communicate via an RPC message passing mechanism. - -The API servers process REST requests, which typically involve database -reads/writes, optionally sending RPC messages to other Nova services, -and generating responses to the REST calls. -RPC messaging is done via the **oslo.messaging** library, -an abstraction on top of message queues. -Most of the major nova components can be run on multiple servers, and have -a manager that is listening for RPC messages. -The one major exception is ``nova-compute``, where a single process runs on the -hypervisor it is managing (except when using the VMware or Ironic drivers). -The manager also, optionally, has periodic tasks. -For more details on our RPC system, please see: :doc:`/reference/rpc` - -Nova also uses a central database that is (logically) shared between all -components. However, to aid upgrade, the DB is accessed through an object -layer that ensures an upgraded control plane can still communicate with -a ``nova-compute`` running the previous release. -To make this possible nova-compute proxies DB requests over RPC to a -central manager called ``nova-conductor``. - -To horizontally expand Nova deployments, we have a deployment sharding -concept called cells. For more information please see: :doc:`/admin/cells` - -Components ----------- - -Below you will find a helpful explanation of the key components -of a typical Nova deployment. - -.. image:: /_static/images/architecture.svg - :width: 100% - -* DB: sql database for data storage. -* API: component that receives HTTP requests, converts commands and communicates with other components via the **oslo.messaging** queue or HTTP. -* Scheduler: decides which host gets each instance. -* Compute: manages communication with hypervisor and virtual machines. -* Conductor: handles requests that need coordination (build/resize), acts as a - database proxy, or handles object conversions. -* :placement-doc:`Placement <>`: tracks resource provider inventories and usages. - -While all services are designed to be horizontally scalable, you should have significantly more computes than anything else. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 743519a86ebd..0ecf4344e459 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -2,6 +2,15 @@ User Documentation ================== +The OpenStack Compute service allows you to control an +Infrastructure-as-a-Service (IaaS) cloud computing platform. It gives you +control over instances and networks, and allows you to manage access to the +cloud through users and projects. + +Compute does not include virtualization software. Instead, it defines drivers +that interact with underlying virtualization mechanisms that run on your host +operating system, and exposes functionality over a web-based API. + End user guide -------------- @@ -18,74 +27,3 @@ End user guide rescue block-device-mapping /reference/api-microversion-history - -.. todo:: The rest of this document should probably move to the admin guide. - -Architecture Overview ---------------------- - -* :doc:`Nova architecture `: An overview of how all the parts in - nova fit together. - -* :doc:`Block Device Mapping `: One of the more - complicated parts to understand is the Block Device Mapping parameters used - to connect specific block devices to computes. This deserves its own deep - dive. - -See the :ref:`reference guide ` for details about more -internal subsystems. - -Deployment Considerations -------------------------- - -There is information you might want to consider before doing your deployment, -especially if it is going to be a larger deployment. For smaller deployments -the defaults from the :doc:`install guide ` will be sufficient. - -* **Compute Driver Features Supported**: While the majority of nova deployments use - libvirt/kvm, you can use nova with other compute drivers. Nova attempts to - provide a unified feature set across these, however, not all features are - implemented on all backends, and not all features are equally well tested. - - * :doc:`Feature Support by Use Case `: A view of - what features each driver supports based on what's important to some large - use cases (General Purpose Cloud, NFV Cloud, HPC Cloud). - - * :doc:`Feature Support full list `: A detailed dive through - features in each compute driver backend. - -* :doc:`Cells v2 configuration `: For large deployments, cells v2 - cells allow sharding of your compute environment. Upfront planning is key to - a successful cells v2 layout. - -* :placement-doc:`Placement service <>`: Overview of the placement - service, including how it fits in with the rest of nova. - -* :doc:`Running nova-api on wsgi `: Considerations for using a real - WSGI container instead of the baked-in eventlet web server. - -Maintenance ------------ - -Once you are running nova, the following information is extremely useful. - -* :doc:`Admin Guide `: A collection of guides for administrating - nova. - -* :doc:`Quotas `: Managing project quotas in nova. - -* :doc:`Availablity Zones `: Availability Zones are - an end-user visible logical abstraction for partitioning a cloud without - knowing the physical infrastructure. They can be used to partition a cloud on - arbitrary factors, such as location (country, datacenter, rack), network - layout and/or power source. - -* :doc:`Scheduling `: How the scheduler is - configured, and how that will impact where compute instances land in your - environment. If you are seeing unexpected distribution of compute instances - in your hosts, you'll want to dive into this configuration. - -* :doc:`Exposing custom metadata to compute instances `: How - and when you might want to extend the basic metadata exposed to compute - instances (either via metadata server or config drive) for your specific - purposes. diff --git a/doc/test/redirect-tests.txt b/doc/test/redirect-tests.txt index 012ddfe7bc15..90ae3fa6b372 100644 --- a/doc/test/redirect-tests.txt +++ b/doc/test/redirect-tests.txt @@ -1,4 +1,5 @@ /nova/latest/addmethod.openstackapi.html 301 /nova/latest/contributor/api-2.html +/nova/latest/admin/arch.html 301 /nova/latest/admin/architecture.html /nova/latest/admin/flavors2.html 301 /nova/latest/admin/flavors.html /nova/latest/admin/quotas2.html 301 /nova/latest/admin/quotas.html /nova/latest/admin/numa.html 301 /nova/latest/admin/cpu-topologies.html @@ -6,7 +7,7 @@ /nova/latest/api_microversion_dev.html 301 /nova/latest/contributor/microversions.html /nova/latest/api_microversion_history.html 301 /nova/latest/reference/api-microversion-history.html /nova/latest/api_plugins.html 301 /nova/latest/contributor/api.html -/nova/latest/architecture.html 301 /nova/latest/user/architecture.html +/nova/latest/architecture.html 301 /nova/latest/admin/architecture.html /nova/latest/block_device_mapping.html 301 /nova/latest/user/block-device-mapping.html /nova/latest/blueprints.html 301 /nova/latest/contributor/blueprints.html /nova/latest/cells.html 301 /nova/latest/admin/cells.html @@ -64,6 +65,7 @@ /nova/latest/threading.html 301 /nova/latest/reference/threading.html /nova/latest/upgrade.html 301 /nova/latest/admin/upgrades.html /nova/latest/user/aggregates.html 301 /nova/latest/admin/aggregates.html +/nova/latest/user/architecture.html 301 /nova/latest/admin/architecture.html /nova/latest/user/cells.html 301 /nova/latest/admin/cells.html /nova/latest/user/cellsv2_layout.html 301 /nova/latest/admin/cells.html /nova/latest/user/cellsv2-layout.html 301 /nova/latest/admin/cells.html @@ -77,9 +79,10 @@ /nova/latest/vendordata.html 301 /nova/latest/user/metadata.html /nova/latest/vmstates.html 301 /nova/latest/reference/vm-states.html /nova/latest/wsgi.html 301 /nova/latest/user/wsgi.html +/nova/latest/admin/arch.html 301 /nova/latest/admin/architecture.html /nova/latest/admin/adv-config.html 301 /nova/latest/admin/index.html /nova/latest/admin/configuration/schedulers.html 301 /nova/latest/admin/scheduling.html /nova/latest/admin/system-admin.html 301 /nova/latest/admin/index.html /nova/latest/admin/port_with_resource_request.html 301 /nova/latest/admin/ports-with-resource-requests.html -/nova/latest/admin/manage-users.html 301 /nova/latest/admin/arch.html +/nova/latest/admin/manage-users.html 301 /nova/latest/admin/architecture.html /nova/latest/admin/mitigation-for-Intel-MDS-security-flaws.html 301 /nova/latest/admin/cpu-models.html