diff --git a/doc-tools-check-languages.conf b/doc-tools-check-languages.conf
index 5f2e6fc764..4d27ad7084 100644
--- a/doc-tools-check-languages.conf
+++ b/doc-tools-check-languages.conf
@@ -7,8 +7,8 @@ declare -A BOOKS=(
["cs"]="install-guide"
["de"]="install-guide"
["fr"]="install-guide"
- ["id"]="image-guide install-guide networking-guide user-guide"
- ["ja"]="ha-guide image-guide install-guide networking-guide ops-guide user-guide"
+ ["id"]="image-guide install-guide user-guide"
+ ["ja"]="ha-guide image-guide install-guide ops-guide user-guide"
["ko_KR"]="install-guide"
["ru"]="install-guide"
["tr_TR"]="image-guide install-guide arch-design"
@@ -20,8 +20,8 @@ declare -A DRAFTS=(
["cs"]="install-guide"
["de"]="install-guide"
["fr"]="install-guide"
- ["id"]="install-guide networking-guide"
- ["ja"]="install-guide networking-guide"
+ ["id"]="install-guide"
+ ["ja"]="install-guide"
["ko_KR"]="install-guide"
["ru"]="install-guide"
["tr_TR"]="install-guide"
diff --git a/doc/common/app-support.rst b/doc/common/app-support.rst
index bac76c2ee7..ef893bb9db 100644
--- a/doc/common/app-support.rst
+++ b/doc/common/app-support.rst
@@ -52,7 +52,7 @@ The following books explain how to configure and run an OpenStack cloud:
* `Operations Guide `_
-* `Networking Guide `_
+* `Networking Guide `_
* `High Availability Guide `_
diff --git a/doc/networking-guide/setup.cfg b/doc/networking-guide/setup.cfg
deleted file mode 100644
index ce07239218..0000000000
--- a/doc/networking-guide/setup.cfg
+++ /dev/null
@@ -1,27 +0,0 @@
-[metadata]
-name = openstacknetworkingguide
-summary = OpenStack Networking Guide
-author = OpenStack
-author-email = openstack-docs@lists.openstack.org
-home-page = https://docs.openstack.org/
-classifier =
-Environment :: OpenStack
-Intended Audience :: Information Technology
-Intended Audience :: System Administrators
-License :: OSI Approved :: Apache Software License
-Operating System :: POSIX :: Linux
-Topic :: Documentation
-
-[global]
-setup-hooks =
- pbr.hooks.setup_hook
-
-[files]
-
-[build_sphinx]
-warning-is-error = 1
-build-dir = build
-source-dir = source
-
-[wheel]
-universal = 1
diff --git a/doc/networking-guide/setup.py b/doc/networking-guide/setup.py
deleted file mode 100644
index 736375744d..0000000000
--- a/doc/networking-guide/setup.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
-import setuptools
-
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
-setuptools.setup(
- setup_requires=['pbr'],
- pbr=True)
diff --git a/doc/networking-guide/source/common b/doc/networking-guide/source/common
deleted file mode 120000
index dc879abe93..0000000000
--- a/doc/networking-guide/source/common
+++ /dev/null
@@ -1 +0,0 @@
-../../common
\ No newline at end of file
diff --git a/doc/networking-guide/source/conf.py b/doc/networking-guide/source/conf.py
deleted file mode 100644
index 772d02deb2..0000000000
--- a/doc/networking-guide/source/conf.py
+++ /dev/null
@@ -1,305 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import os
-# import sys
-
-import openstackdocstheme
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-# sys.path.insert(0, os.path.abspath('.'))
-
-# -- General configuration ------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-# needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = ['sphinx.ext.todo',
- 'sphinxmark',
- 'openstackdocstheme']
-
-# Add any paths that contain templates here, relative to this directory.
-# templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-# source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-repository_name = "openstack/openstack-manuals"
-bug_project = 'openstack-manuals'
-project = u'Networking Guide'
-bug_tag = u'networking-guide'
-copyright = u'2015-2017, OpenStack contributors'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '15.0'
-# The full version, including alpha/beta/rc tags.
-release = '15.0.0'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-# language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-# today = ''
-# Else, today_fmt is used as the format for a strftime call.
-# today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = ['common/cli*', 'common/nova*', 'common/get-started-*']
-
-# The reST default role (used for this markup: `text`) to use for all
-# documents.
-# default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-# add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-# add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-# show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-# modindex_common_prefix = []
-
-# If true, keep warnings as "system message" paragraphs in the built documents.
-# keep_warnings = False
-
-
-# -- Options for HTML output ----------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'openstackdocs'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-# html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-# html_theme_path = [openstackdocstheme.get_html_theme_path()]
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# " v documentation".
-# html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-# html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-# html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-# html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-# html_static_path = []
-
-# Add any extra paths that contain custom files (such as robots.txt or
-# .htaccess) here, relative to this directory. These files are copied
-# directly to the root of the documentation.
-# html_extra_path = []
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-# So that we can enable "log-a-bug" links from each output HTML page, this
-# variable must be set to a format that includes year, month, day, hours and
-# minutes.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-# html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-# html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-# html_additional_pages = {}
-
-# If false, no module index is generated.
-# html_domain_indices = True
-
-# If false, no index is generated.
-html_use_index = False
-
-# If true, the index is split into individual pages for each letter.
-# html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-html_show_sourcelink = False
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-# html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-# html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-# html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-# html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'networking-guide'
-
-# If true, publish source files
-html_copy_source = False
-
-# -- Options for LaTeX output ---------------------------------------------
-pdf_theme_path = openstackdocstheme.get_pdf_theme_path()
-openstack_logo = openstackdocstheme.get_openstack_logo_path()
-
-latex_custom_template = r"""
-\newcommand{\openstacklogo}{%s}
-\usepackage{%s}
-""" % (openstack_logo, pdf_theme_path)
-
-latex_engine = 'xelatex'
-
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- 'papersize': 'a4paper',
-
- # The font size ('10pt', '11pt' or '12pt').
- 'pointsize': '11pt',
-
- #Default figure align
- 'figure_align': 'H',
-
- # Not to generate blank page after chapter
- 'classoptions': ',openany',
-
- # Additional stuff for the LaTeX preamble.
- 'preamble': latex_custom_template,
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-# author, documentclass [howto, manual, or own class]).
-latex_documents = [
- ('index', 'NetworkingGuide.tex', u'Networking Guide',
- u'OpenStack contributors', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-# latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-# latex_use_parts = False
-
-# If true, show page references after internal links.
-# latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-# latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-# latex_appendices = []
-
-# If false, no module index is generated.
-# latex_domain_indices = True
-
-
-# -- Options for manual page output ---------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'networkingguide', u'Networking Guide',
- [u'OpenStack contributors'], 1)
-]
-
-# If true, show URL addresses after external links.
-# man_show_urls = False
-
-
-# -- Options for Texinfo output -------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- ('index', 'NetworkingGuide', u'Networking Guide',
- u'OpenStack contributors', 'NetworkingGuide',
- 'This guide shows OpenStack operators and deployers how to configure'
- 'OpenStack Networking for various common scenarios. It also provides'
- 'basic networking context for those who may not be networking experts.'
- 'commands.', 'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-# texinfo_appendices = []
-
-# If false, no module index is generated.
-# texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-# texinfo_show_urls = 'footnote'
-
-# If true, do not generate a @detailmenu in the "Top" node's menu.
-# texinfo_no_detailmenu = False
-
-# -- Options for Internationalization output ------------------------------
-locale_dirs = ['locale/']
-
-# -- Options for sphinxmark -----------------------------------------------
-sphinxmark_enable = True
-sphinxmark_div = 'docs-body'
-sphinxmark_image = 'text'
-sphinxmark_text = 'Pike'
diff --git a/doc/networking-guide/source/config-address-scopes.rst b/doc/networking-guide/source/config-address-scopes.rst
deleted file mode 100644
index d976d6e4c6..0000000000
--- a/doc/networking-guide/source/config-address-scopes.rst
+++ /dev/null
@@ -1,538 +0,0 @@
-.. _config-address-scopes:
-
-==============
-Address scopes
-==============
-
-Address scopes build from subnet pools. While subnet pools provide a mechanism
-for controlling the allocation of addresses to subnets, address scopes show
-where addresses can be routed between networks, preventing the use of
-overlapping addresses in any two subnets. Because all addresses allocated in
-the address scope do not overlap, neutron routers do not NAT between your
-projects' network and your external network. As long as the addresses within
-an address scope match, the Networking service performs simple routing
-between networks.
-
-Accessing address scopes
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Anyone with access to the Networking service can create their own address
-scopes. However, network administrators can create shared address scopes,
-allowing other projects to create networks within that address scope.
-
-Access to addresses in a scope are managed through subnet pools.
-Subnet pools can either be created in an address scope, or updated to belong
-to an address scope.
-
-With subnet pools, all addresses in use within the address
-scope are unique from the point of view of the address scope owner. Therefore,
-add more than one subnet pool to an address scope if the
-pools have different owners, allowing for delegation of parts of the
-address scope. Delegation prevents address overlap across the
-whole scope. Otherwise, you receive an error if two pools have the same
-address ranges.
-
-Each router interface is associated with an address scope by looking at
-subnets connected to the network. When a router connects
-to an external network with matching address scopes, network traffic routes
-between without Network address translation (NAT).
-The router marks all traffic connections originating from each interface
-with its corresponding address scope. If traffic leaves an interface in the
-wrong scope, the router blocks the traffic.
-
-Backwards compatibility
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Networks created before the Mitaka release do not
-contain explicitly named address scopes, unless the network contains
-subnets from a subnet pool that belongs to a created or updated
-address scope. The Networking service preserves backwards compatibility with
-pre-Mitaka networks through special address scope properties so that
-these networks can perform advanced routing:
-
-#. Unlimited address overlap is allowed.
-#. Neutron routers, by default, will NAT traffic from internal networks
- to external networks.
-#. Pre-Mitaka address scopes are not visible through the API. You cannot
- list address scopes or show details. Scopes exist
- implicitly as a catch-all for addresses that are not explicitly scoped.
-
-Create shared address scopes as an administrative user
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This section shows how to set up shared address scopes to
-allow simple routing for project networks with the same subnet pools.
-
-.. note:: Irrelevant fields have been trimmed from the output of
- these commands for brevity.
-
-#. Create IPv6 and IPv4 address scopes:
-
- .. code-block:: console
-
- $ openstack address scope create --share --ip-version 6 address-scope-ip6
-
- +------------+--------------------------------------+
- | Field | Value |
- +------------+--------------------------------------+
- | headers | |
- | id | 28424dfc-9abd-481b-afa3-1da97a8fead7 |
- | ip_version | 6 |
- | name | address-scope-ip6 |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | shared | True |
- +------------+--------------------------------------+
-
- .. code-block:: console
-
- $ openstack address scope create --share --ip-version 4 address-scope-ip4
-
- +------------+--------------------------------------+
- | Field | Value |
- +------------+--------------------------------------+
- | headers | |
- | id | 3193bd62-11b5-44dc-acf8-53180f21e9f2 |
- | ip_version | 4 |
- | name | address-scope-ip4 |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | shared | True |
- +------------+--------------------------------------+
-
-
-#. Create subnet pools specifying the name (or UUID) of the address
- scope that the subnet pool belongs to. If you have existing
- subnet pools, use the :command:`openstack subnet pool set` command to put
- them in a new address scope:
-
- .. code-block:: console
-
- $ openstack subnet pool create --address-scope address-scope-ip6 \
- --share --pool-prefix 2001:db8:a583::/48 --default-prefix-length 64 \
- subnet-pool-ip6
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | address_scope_id | 28424dfc-9abd-481b-afa3-1da97a8fead7 |
- | created_at | 2016-12-13T22:53:30Z |
- | default_prefixlen | 64 |
- | default_quota | None |
- | description | |
- | id | a59ff52b-0367-41ff-9781-6318b927dd0e |
- | ip_version | 6 |
- | is_default | False |
- | max_prefixlen | 128 |
- | min_prefixlen | 64 |
- | name | subnet-pool-ip6 |
- | prefixes | 2001:db8:a583::/48 |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | revision_number | 1 |
- | shared | True |
- | updated_at | 2016-12-13T22:53:30Z |
- +-------------------+--------------------------------------+
-
-
- .. code-block:: console
-
- $ openstack subnet pool create --address-scope address-scope-ip4 \
- --share --pool-prefix 203.0.113.0/24 --default-prefix-length 26 \
- subnet-pool-ip4
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | address_scope_id | 3193bd62-11b5-44dc-acf8-53180f21e9f2 |
- | created_at | 2016-12-13T22:55:09Z |
- | default_prefixlen | 26 |
- | default_quota | None |
- | description | |
- | id | d02af70b-d622-426f-8e60-ed9df2a8301f |
- | ip_version | 4 |
- | is_default | False |
- | max_prefixlen | 32 |
- | min_prefixlen | 8 |
- | name | subnet-pool-ip4 |
- | prefixes | 203.0.113.0/24 |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | revision_number | 1 |
- | shared | True |
- | updated_at | 2016-12-13T22:55:09Z |
- +-------------------+--------------------------------------+
-
-
-#. Make sure that subnets on an external network are created
- from the subnet pools created above:
-
- .. code-block:: console
-
- $ openstack subnet show ipv6-public-subnet
- +-------------------+------------------------------------------+
- | Field | Value |
- +-------------------+------------------------------------------+
- | allocation_pools | 2001:db8:a583::2-2001:db8:a583:0:ffff:ff |
- | | ff:ffff:ffff |
- | cidr | 2001:db8:a583::/64 |
- | created_at | 2016-12-10T21:36:04Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | False |
- | gateway_ip | 2001:db8:a583::1 |
- | host_routes | |
- | id | b333bf5a-758c-4b3f-97ec-5f12d9bfceb7 |
- | ip_version | 6 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | ipv6-public-subnet |
- | network_id | 05a8d31e-330b-4d96-a3fa-884b04abfa4c |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | revision_number | 2 |
- | segment_id | None |
- | service_types | |
- | subnetpool_id | a59ff52b-0367-41ff-9781-6318b927dd0e |
- | updated_at | 2016-12-10T21:36:04Z |
- +-------------------+------------------------------------------+
-
-
- .. code-block:: console
-
- $ openstack subnet show public-subnet
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 203.0.113.2-203.0.113.62 |
- | cidr | 203.0.113.0/26 |
- | created_at | 2016-12-10T21:35:52Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | False |
- | gateway_ip | 203.0.113.1 |
- | host_routes | |
- | id | 7fd48240-3acc-4724-bc82-16c62857edec |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | public-subnet |
- | network_id | 05a8d31e-330b-4d96-a3fa-884b04abfa4c |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | revision_number | 2 |
- | segment_id | None |
- | service_types | |
- | subnetpool_id | d02af70b-d622-426f-8e60-ed9df2a8301f |
- | updated_at | 2016-12-10T21:35:52Z |
- +-------------------+--------------------------------------+
-
-Routing with address scopes for non-privileged users
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This section shows how non-privileged users can use address scopes to
-route straight to an external network without NAT.
-
-#. Create a couple of networks to host subnets:
-
- .. code-block:: console
-
- $ openstack network create network1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2016-12-13T23:21:01Z |
- | description | |
- | headers | |
- | id | 1bcf3fe9-a0cb-4d88-a067-a4d7f8e635f0 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | network1 |
- | port_security_enabled | True |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 94 |
- | revision_number | 3 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2016-12-13T23:21:01Z |
- +---------------------------+--------------------------------------+
-
-
- .. code-block:: console
-
- $ openstack network create network2
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2016-12-13T23:21:45Z |
- | description | |
- | headers | |
- | id | 6c583603-c097-4141-9c5c-288b0e49c59f |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | network2 |
- | port_security_enabled | True |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 81 |
- | revision_number | 3 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2016-12-13T23:21:45Z |
- +---------------------------+--------------------------------------+
-
-#. Create a subnet not associated with a subnet pool or
- an address scope:
-
- .. code-block:: console
-
- $ openstack subnet create --network network1 --subnet-range \
- 198.51.100.0/26 subnet-ip4-1
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 198.51.100.2-198.51.100.62 |
- | cidr | 198.51.100.0/26 |
- | created_at | 2016-12-13T23:24:16Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 198.51.100.1 |
- | headers | |
- | host_routes | |
- | id | 66874039-d31b-4a27-85d7-14c89341bbb7 |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-ip4-1 |
- | network_id | 1bcf3fe9-a0cb-4d88-a067-a4d7f8e635f0 |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | revision_number | 2 |
- | service_types | |
- | subnetpool_id | None |
- | updated_at | 2016-12-13T23:24:16Z |
- +-------------------+--------------------------------------+
-
-
- .. code-block:: console
-
- $ openstack subnet create --network network1 --ipv6-ra-mode slaac \
- --ipv6-address-mode slaac --ip-version 6 --subnet-range \
- 2001:db8:80d2:c4d3::/64 subnet-ip6-1
- +-------------------+-----------------------------------------+
- | Field | Value |
- +-------------------+-----------------------------------------+
- | allocation_pools | 2001:db8:80d2:c4d3::2-2001:db8:80d2:c4d |
- | | 3:ffff:ffff:ffff:ffff |
- | cidr | 2001:db8:80d2:c4d3::/64 |
- | created_at | 2016-12-13T23:28:28Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 2001:db8:80d2:c4d3::1 |
- | headers | |
- | host_routes | |
- | id | a7551b23-2271-4a88-9c41-c84b048e0722 |
- | ip_version | 6 |
- | ipv6_address_mode | slaac |
- | ipv6_ra_mode | slaac |
- | name | subnet-ip6-1 |
- | network_id | 1bcf3fe9-a0cb-4d88-a067-a4d7f8e635f0 |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | revision_number | 2 |
- | service_types | |
- | subnetpool_id | None |
- | updated_at | 2016-12-13T23:28:28Z |
- +-------------------+-----------------------------------------+
-
-
-#. Create a subnet using a subnet pool associated with an address scope
- from an external network:
-
- .. code-block:: console
-
- $ openstack subnet create --subnet-pool subnet-pool-ip4 \
- --network network2 subnet-ip4-2
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 203.0.113.2-203.0.113.62 |
- | cidr | 203.0.113.0/26 |
- | created_at | 2016-12-13T23:32:12Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 203.0.113.1 |
- | headers | |
- | host_routes | |
- | id | 12be8e8f-5871-4091-9e9e-4e0651b9677e |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet-ip4-2 |
- | network_id | 6c583603-c097-4141-9c5c-288b0e49c59f |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | revision_number | 2 |
- | service_types | |
- | subnetpool_id | d02af70b-d622-426f-8e60-ed9df2a8301f |
- | updated_at | 2016-12-13T23:32:12Z |
- +-------------------+--------------------------------------+
-
- .. code-block:: console
-
- $ openstack subnet create --ip-version 6 --ipv6-ra-mode slaac \
- --ipv6-address-mode slaac --subnet-pool subnet-pool-ip6 \
- --network network2 subnet-ip6-2
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 2001:db8:a583::2-2001:db8:a583:0:fff |
- | | f:ffff:ffff:ffff |
- | cidr | 2001:db8:a583::/64 |
- | created_at | 2016-12-13T23:31:17Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 2001:db8:a583::1 |
- | headers | |
- | host_routes | |
- | id | b599c2be-e3cd-449c-ba39-3cfcc744c4be |
- | ip_version | 6 |
- | ipv6_address_mode | slaac |
- | ipv6_ra_mode | slaac |
- | name | subnet-ip6-2 |
- | network_id | 6c583603-c097-4141-9c5c-288b0e49c59f |
- | project_id | 098429d072d34d3596c88b7dbf7e91b6 |
- | revision_number | 2 |
- | service_types | |
- | subnetpool_id | a59ff52b-0367-41ff-9781-6318b927dd0e |
- | updated_at | 2016-12-13T23:31:17Z |
- +-------------------+--------------------------------------+
-
- By creating subnets from scoped subnet pools, the network is
- associated with the address scope.
-
- .. code-block:: console
-
- $ openstack network show network2
- +---------------------------+------------------------------+
- | Field | Value |
- +---------------------------+------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | nova |
- | created_at | 2016-12-13T23:21:45Z |
- | description | |
- | id | 6c583603-c097-4141-9c5c- |
- | | 288b0e49c59f |
- | ipv4_address_scope | 3193bd62-11b5-44dc- |
- | | acf8-53180f21e9f2 |
- | ipv6_address_scope | 28424dfc-9abd-481b- |
- | | afa3-1da97a8fead7 |
- | mtu | 1450 |
- | name | network2 |
- | port_security_enabled | True |
- | project_id | 098429d072d34d3596c88b7dbf7e |
- | | 91b6 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 81 |
- | revision_number | 10 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | 12be8e8f-5871-4091-9e9e- |
- | | 4e0651b9677e, b599c2be-e3cd- |
- | | 449c-ba39-3cfcc744c4be |
- | tags | [] |
- | updated_at | 2016-12-13T23:32:12Z |
- +---------------------------+------------------------------+
-
-#. Connect a router to each of the project subnets that have been created, for
- example, using a router called ``router1``:
-
- .. code-block:: console
-
- $ openstack router add subnet router1 subnet-ip4-1
- $ openstack router add subnet router1 subnet-ip4-2
- $ openstack router add subnet router1 subnet-ip6-1
- $ openstack router add subnet router1 subnet-ip6-2
-
-Checking connectivity
----------------------
-
-This example shows how to check the connectivity between networks
-with address scopes.
-
-#. Launch two instances, ``instance1`` on ``network1`` and
- ``instance2`` on ``network2``. Associate a floating IP address to both
- instances.
-
-#. Adjust security groups to allow pings and SSH (both IPv4 and IPv6):
-
- .. code-block:: console
-
- $ openstack server list
- +--------------+-----------+---------------------------------------------------------------------------+------------+
- | ID | Name | Networks | Image Name |
- +--------------+-----------+---------------------------------------------------------------------------+------------+
- | 97e49c8e-... | instance1 | network1=2001:db8:80d2:c4d3:f816:3eff:fe52:b69f, 198.51.100.3, 203.0.113.3| cirros |
- | ceba9638-... | instance2 | network2=203.0.113.3, 2001:db8:a583:0:f816:3eff:fe42:1eeb, 203.0.113.4 | centos |
- +--------------+-----------+---------------------------------------------------------------------------+------------+
-
-Regardless of address scopes, the floating IPs can be pinged from the
-external network:
-
-.. code-block:: console
-
- $ ping -c 1 203.0.113.3
- 1 packets transmitted, 1 received, 0% packet loss, time 0ms
- $ ping -c 1 203.0.113.4
- 1 packets transmitted, 1 received, 0% packet loss, time 0ms
-
-You can now ping ``instance2`` directly because ``instance2`` shares the
-same address scope as the external network:
-
-.. note:: BGP routing can be used to automatically set up a static
- route for your instances.
-
-.. code-block:: console
-
- # ip route add 203.0.113.0/26 via 203.0.113.2
- $ ping -c 1 203.0.113.3
- 1 packets transmitted, 1 received, 0% packet loss, time 0ms
-
-.. code-block:: console
-
- # ip route add 2001:db8:a583::/64 via 2001:db8::1
- $ ping6 -c 1 2001:db8:a583:0:f816:3eff:fe42:1eeb
- 1 packets transmitted, 1 received, 0% packet loss, time 0ms
-
-You cannot ping ``instance1`` directly because the address scopes do not
-match:
-
-.. code-block:: console
-
- # ip route add 198.51.100.0/26 via 203.0.113.2
- $ ping -c 1 198.51.100.3
- 1 packets transmitted, 0 received, 100% packet loss, time 0ms
-
-.. code-block:: console
-
- # ip route add 2001:db8:80d2:c4d3::/64 via 2001:db8::1
- $ ping6 -c 1 2001:db8:80d2:c4d3:f816:3eff:fe52:b69f
- 1 packets transmitted, 0 received, 100% packet loss, time 0ms
-
-If the address scopes match between
-networks then pings and other traffic route directly through. If the
-scopes do not match between networks, the router either drops the
-traffic or applies NAT to cross scope boundaries.
diff --git a/doc/networking-guide/source/config-auto-allocation.rst b/doc/networking-guide/source/config-auto-allocation.rst
deleted file mode 100644
index 3326df2d2c..0000000000
--- a/doc/networking-guide/source/config-auto-allocation.rst
+++ /dev/null
@@ -1,243 +0,0 @@
-.. _config-auto-allocation:
-
-==========================================
-Automatic allocation of network topologies
-==========================================
-
-The auto-allocation feature introduced in Mitaka simplifies the procedure of
-setting up an external connectivity for end-users, and is also known as **Get
-Me A Network**.
-
-Previously, a user had to configure a range of networking resources to boot
-a server and get access to the Internet. For example, the following steps
-are required:
-
-* Create a network
-* Create a subnet
-* Create a router
-* Uplink the router on an external network
-* Downlink the router on the previously created subnet
-
-These steps need to be performed on each logical segment that a VM needs to
-be connected to, and may require networking knowledge the user might not
-have.
-
-This feature is designed to automate the basic networking provisioning for
-projects. The steps to provision a basic network are run during instance
-boot, making the networking setup hands-free.
-
-To make this possible, provide a default external network and default
-subnetpools (one for IPv4, or one for IPv6, or one of each) so that the
-Networking service can choose what to do in lieu of input. Once these are in
-place, users can boot their VMs without specifying any networking details.
-The Compute service will then use this feature automatically to wire user
-VMs.
-
-Enabling the deployment for auto-allocation
--------------------------------------------
-
-To use this feature, the neutron service must have the following extensions
-enabled:
-
-* ``auto-allocated-topology``
-* ``subnet_allocation``
-* ``external-net``
-* ``router``
-
-Before the end-user can use the auto-allocation feature, the operator must
-create the resources that will be used for the auto-allocated network
-topology creation. To perform this task, proceed with the following steps:
-
-#. Set up a default external network
-
- Setting up an external network is described in
- `OpenStack Administrator Guide
- `_.
- Assuming the external network to be used for the auto-allocation feature
- is named ``public``, make it the ``default`` external network
- with the following command:
-
- .. code-block:: console
-
- $ openstack network set public --default
-
- .. note::
-
- The flag ``--default`` (and ``--no-default`` flag) is only effective
- with external networks and has no effects on regular (or internal)
- networks.
-
-#. Create default subnetpools
-
- The auto-allocation feature requires at least one default
- subnetpool. One for IPv4, or one for IPv6, or one of each.
-
- .. code-block:: console
-
- $ openstack subnet pool create --share --default \
- --pool-prefix 192.0.2.0/24 --default-prefix-length 26 \
- shared-default
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | address_scope_id | None |
- | created_at | 2017-01-12T15:10:34Z |
- | default_prefixlen | 26 |
- | default_quota | None |
- | description | |
- | headers | |
- | id | b41b7b9c-de57-4c19-b1c5-731985bceb7f |
- | ip_version | 4 |
- | is_default | True |
- | max_prefixlen | 32 |
- | min_prefixlen | 8 |
- | name | shared-default |
- | prefixes | 192.0.2.0/24 |
- | project_id | 86acdbd1d72745fd8e8320edd7543400 |
- | revision_number | 1 |
- | shared | True |
- | updated_at | 2017-01-12T15:10:34Z |
- +-------------------+--------------------------------------+
-
- $ openstack subnet pool create --share --default \
- --pool-prefix 2001:db8:8000::/48 --default-prefix-length 64 \
- default-v6
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | address_scope_id | None |
- | created_at | 2017-01-12T15:14:35Z |
- | default_prefixlen | 64 |
- | default_quota | None |
- | description | |
- | headers | |
- | id | 6f387016-17f0-4564-96ad-e34775b6ea14 |
- | ip_version | 6 |
- | is_default | True |
- | max_prefixlen | 128 |
- | min_prefixlen | 64 |
- | name | default-v6 |
- | prefixes | 2001:db8:8000::/48 |
- | project_id | 86acdbd1d72745fd8e8320edd7543400 |
- | revision_number | 1 |
- | shared | True |
- | updated_at | 2017-01-12T15:14:35Z |
- +-------------------+--------------------------------------+
-
-Get Me A Network
-----------------
-
-In a deployment where the operator has set up the resources as described above,
-they can get their auto-allocated network topology as follows:
-
-.. code-block:: console
-
- $ openstack network auto allocated topology create --or-show
- +------------+--------------------------------------+
- | Field | Value |
- +------------+--------------------------------------+
- | id | a380c780-d6cd-4510-a4c0-1a6ec9b85a29 |
- | name | None |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- +------------+--------------------------------------+
-
-.. note::
-
- When the ``--or-show`` option is used the command returns the topology
- information if it already exists.
-
-Operators (and users with admin role) can get the auto-allocated topology for a
-project by specifying the project ID:
-
-.. code-block:: console
-
- $ openstack network auto allocated topology create --project \
- cfd1889ac7d64ad891d4f20aef9f8d7c --or-show
- +------------+--------------------------------------+
- | Field | Value |
- +------------+--------------------------------------+
- | id | a380c780-d6cd-4510-a4c0-1a6ec9b85a29 |
- | name | None |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- +------------+--------------------------------------+
-
-The ID returned by this command is a network which can be used for booting
-a VM.
-
-.. code-block:: console
-
- $ openstack server create --flavor m1.small --image \
- cirros-0.3.5-x86_64-uec --nic \
- net-id=8b835bfb-cae2-4acc-b53f-c16bb5f9a7d0 vm1
-
-The auto-allocated topology for a user never changes. In practice, when a user
-boots a server omitting the ``--nic`` option, and there is more than one
-network available, the Compute service will invoke the API behind
-``auto allocated topology create``, fetch the network UUID, and pass it on
-during the boot process.
-
-Validating the requirements for auto-allocation
------------------------------------------------
-
-To validate that the required resources are correctly set up for
-auto-allocation, without actually provisioning anything, use
-the ``--check-resources`` option:
-
-.. code-block:: console
-
- $ openstack network auto allocated topology create --check-resources
- Deployment error: No default router:external network.
-
- $ openstack network set public --default
-
- $ openstack network auto allocated topology create --check-resources
- Deployment error: No default subnetpools defined.
-
- $ openstack subnet pool set shared-default --default
-
- $ openstack network auto allocated topology create --check-resources
- +---------+-------+
- | Field | Value |
- +---------+-------+
- | dry-run | pass |
- +---------+-------+
-
-The validation option behaves identically for all users. However, it
-is considered primarily an admin or service utility since it is the
-operator who must set up the requirements.
-
-Project resources created by auto-allocation
---------------------------------------------
-
-The auto-allocation feature creates one network topology in every project
-where it is used. The auto-allocated network topology for a project contains
-the following resources:
-
-+--------------------+------------------------------+
-|Resource |Name |
-+====================+==============================+
-|network |``auto_allocated_network`` |
-+--------------------+------------------------------+
-|subnet (IPv4) |``auto_allocated_subnet_v4`` |
-+--------------------+------------------------------+
-|subnet (IPv6) |``auto_allocated_subnet_v6`` |
-+--------------------+------------------------------+
-|router |``auto_allocated_router`` |
-+--------------------+------------------------------+
-
-Compatibility notes
--------------------
-
-Nova uses the ``auto allocated topology`` feature with API micro
-version 2.37 or later. This is because, unlike the neutron feature
-which was implemented in the Mitaka release, the integration for
-nova was completed during the Newton release cycle. Note that
-the CLI option ``--nic`` can be omitted regardless of the microversion
-used as long as there is no more than one network available to the
-project, in which case nova fails with a 400 error because it
-does not know which network to use. Furthermore, nova does not start
-using the feature, regardless of whether or not a user requests
-micro version 2.37 or later, unless all of the ``nova-compute``
-services are running Newton-level code.
diff --git a/doc/networking-guide/source/config-az.rst b/doc/networking-guide/source/config-az.rst
deleted file mode 100644
index f140d16a22..0000000000
--- a/doc/networking-guide/source/config-az.rst
+++ /dev/null
@@ -1,378 +0,0 @@
-.. _config-az:
-
-==================
-Availability zones
-==================
-
-An availability zone groups network nodes that run services like DHCP, L3, FW,
-and others. It is defined as an agent's attribute on the network node. This
-allows users to associate an availability zone with their resources so that the
-resources get high availability.
-
-
-Use case
---------
-
-An availability zone is used to make network resources highly available. The
-operators group the nodes that are attached to different power sources under
-separate availability zones and configure scheduling for resources with high
-availability so that they are scheduled on different availability zones.
-
-
-Required extensions
--------------------
-
-The core plug-in must support the ``availability_zone`` extension. The core
-plug-in also must support the ``network_availability_zone`` extension to
-schedule a network according to availability zones. The ``Ml2Plugin`` supports
-it. The router service plug-in must support the ``router_availability_zone``
-extension to schedule a router according to the availability zones. The
-``L3RouterPlugin`` supports it.
-
-.. code-block:: console
-
- $ openstack extension list --network -c Alias -c Name
- +---------------------------+---------------------------+
- | Name | Alias |
- +---------------------------+---------------------------+
- ...
- | Network Availability Zone | network_availability_zone |
- ...
- | Availability Zone | availability_zone |
- ...
- | Router Availability Zone | router_availability_zone |
- ...
- +---------------------------+---------------------------+
-
-
-Availability zone of agents
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The ``availability_zone`` attribute can be defined in ``dhcp-agent`` and
-``l3-agent``. To define an availability zone for each agent, set the
-value into ``[AGENT]`` section of ``/etc/neutron/dhcp_agent.ini`` or
-``/etc/neutron/l3_agent.ini``:
-
-.. code-block:: ini
-
- [AGENT]
- availability_zone = zone-1
-
-To confirm the agent's availability zone:
-
-.. code-block:: console
-
- $ openstack network agent show 116cc128-4398-49af-a4ed-3e95494cd5fc
- +---------------------+---------------------------------------------------+
- | Field | Value |
- +---------------------+---------------------------------------------------+
- | admin_state_up | UP |
- | agent_type | DHCP agent |
- | alive | True |
- | availability_zone | zone-1 |
- | binary | neutron-dhcp-agent |
- | configurations | dhcp_driver='neutron.agent.linux.dhcp.Dnsmasq', |
- | | dhcp_lease_duration='86400', |
- | | log_agent_heartbeats='False', networks='2', |
- | | notifies_port_ready='True', ports='6', subnets='4 |
- | created_at | 2016-12-14 00:25:54 |
- | description | None |
- | heartbeat_timestamp | 2016-12-14 06:20:24 |
- | host | ankur-desktop |
- | id | 116cc128-4398-49af-a4ed-3e95494cd5fc |
- | started_at | 2016-12-14 00:25:54 |
- | topic | dhcp_agent |
- +---------------------+---------------------------------------------------+
-
- $ openstack network agent show 9632309a-2aa4-4304-8603-c4de02c4a55f
- +---------------------+-------------------------------------------------+
- | Field | Value |
- +---------------------+-------------------------------------------------+
- | admin_state_up | UP |
- | agent_type | L3 agent |
- | alive | True |
- | availability_zone | zone-1 |
- | binary | neutron-l3-agent |
- | configurations | agent_mode='legacy', ex_gw_ports='2', |
- | | external_network_bridge='', floating_ips='0', |
- | | gateway_external_network_id='', |
- | | handle_internal_only_routers='True', |
- | | interface_driver='openvswitch', interfaces='4', |
- | | log_agent_heartbeats='False', routers='2' |
- | created_at | 2016-12-14 00:25:58 |
- | description | None |
- | heartbeat_timestamp | 2016-12-14 06:20:28 |
- | host | ankur-desktop |
- | id | 9632309a-2aa4-4304-8603-c4de02c4a55f |
- | started_at | 2016-12-14 00:25:58 |
- | topic | l3_agent |
- +---------------------+-------------------------------------------------+
-
-
-Availability zone related attributes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following attributes are added into network and router:
-
-.. list-table::
- :header-rows: 1
- :widths: 25 10 10 10 50
-
- * - Attribute name
- - Access
- - Required
- - Input type
- - Description
-
- * - availability_zone_hints
- - RW(POST only)
- - No
- - list of string
- - availability zone candidates for the resource
-
- * - availability_zones
- - RO
- - N/A
- - list of string
- - availability zones for the resource
-
-Use ``availability_zone_hints`` to specify the zone in which the resource is
-hosted:
-
-.. code-block:: console
-
- $ openstack network create --availability-zone-hint zone-1 \
- --availability-zone-hint zone-2 net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | zone-1 |
- | | zone-2 |
- | availability_zones | |
- | created_at | 2016-12-14T06:23:36Z |
- | description | |
- | headers | |
- | id | ad88e059-e7fa-4cf7-8857-6731a2a3a554 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | net1 |
- | port_security_enabled | True |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 77 |
- | revision_number | 3 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2016-12-14T06:23:37Z |
- +---------------------------+--------------------------------------+
-
-
-
-.. code-block:: console
-
- $ openstack router create --ha --availability-zone-hint zone-1 \
- --availability-zone-hint zone-2 router1
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | zone-1 |
- | | zone-2 |
- | availability_zones | |
- | created_at | 2016-12-14T06:25:40Z |
- | description | |
- | distributed | False |
- | external_gateway_info | null |
- | flavor_id | None |
- | ha | False |
- | headers | |
- | id | ced10262-6cfe-47c1-8847-cd64276a868c |
- | name | router1 |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- | revision_number | 3 |
- | routes | |
- | status | ACTIVE |
- | updated_at | 2016-12-14T06:25:40Z |
- +-------------------------+--------------------------------------+
-
-
-
-Availability zone is selected from ``default_availability_zones`` in
-``/etc/neutron/neutron.conf`` if a resource is created without
-``availability_zone_hints``:
-
-.. code-block:: ini
-
- default_availability_zones = zone-1,zone-2
-
-To confirm the availability zone defined by the system:
-
-.. code-block:: console
-
- $ openstack availability zone list
- +-----------+-------------+
- | Zone Name | Zone Status |
- +-----------+-------------+
- | zone-1 | available |
- | zone-2 | available |
- | zone-1 | available |
- | zone-2 | available |
- +-----------+-------------+
-
-Look at the ``availability_zones`` attribute of each resource to confirm in
-which zone the resource is hosted:
-
-.. code-block:: console
-
- $ openstack network show net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | zone-1 |
- | | zone-2 |
- | availability_zones | zone-1 |
- | | zone-2 |
- | created_at | 2016-12-14T06:23:36Z |
- | description | |
- | headers | |
- | id | ad88e059-e7fa-4cf7-8857-6731a2a3a554 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | net1 |
- | port_security_enabled | True |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 77 |
- | revision_number | 3 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2016-12-14T06:23:37Z |
- +---------------------------+--------------------------------------+
-
-.. code-block:: console
-
- $ openstack router show router1
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | zone-1 |
- | | zone-2 |
- | availability_zones | zone-1 |
- | | zone-2 |
- | created_at | 2016-12-14T06:25:40Z |
- | description | |
- | distributed | False |
- | external_gateway_info | null |
- | flavor_id | None |
- | ha | False |
- | headers | |
- | id | ced10262-6cfe-47c1-8847-cd64276a868c |
- | name | router1 |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- | revision_number | 3 |
- | routes | |
- | status | ACTIVE |
- | updated_at | 2016-12-14T06:25:40Z |
- +-------------------------+--------------------------------------+
-
-.. note::
-
- The ``availability_zones`` attribute does not have a value until the
- resource is scheduled. Once the Networking service schedules the resource
- to zones according to ``availability_zone_hints``, ``availability_zones``
- shows in which zone the resource is hosted practically. The
- ``availability_zones`` may not match ``availability_zone_hints``. For
- example, even if you specify a zone with ``availability_zone_hints``, all
- agents of the zone may be dead before the resource is scheduled. In
- general, they should match, unless there are failures or there is no
- capacity left in the zone requested.
-
-
-Availability zone aware scheduler
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Network scheduler
------------------
-
-Set ``AZAwareWeightScheduler`` to ``network_scheduler_driver`` in
-``/etc/neutron/neutron.conf`` so that the Networking service schedules a
-network according to the availability zone:
-
-.. code-block:: ini
-
- network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler
- dhcp_load_type = networks
-
-The Networking service schedules a network to one of the agents within the
-selected zone as with ``WeightScheduler``. In this case, scheduler refers to
-``dhcp_load_type`` as well.
-
-
-Router scheduler
-----------------
-
-Set ``AZLeastRoutersScheduler`` to ``router_scheduler_driver`` in file
-``/etc/neutron/neutron.conf`` so that the Networking service schedules a router
-according to the availability zone:
-
-.. code-block:: ini
-
- router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler
-
-The Networking service schedules a router to one of the agents within the
-selected zone as with ``LeastRouterScheduler``.
-
-
-Achieving high availability with availability zone
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Although, the Networking service provides high availability for routers and
-high availability and fault tolerance for networks' DHCP services, availability
-zones provide an extra layer of protection by segmenting a Networking service
-deployment in isolated failure domains. By deploying HA nodes across different
-availability zones, it is guaranteed that network services remain available in
-face of zone-wide failures that affect the deployment.
-
-This section explains how to get high availability with the availability zone
-for L3 and DHCP. You should naturally set above configuration options for the
-availability zone.
-
-L3 high availability
---------------------
-
-Set the following configuration options in file ``/etc/neutron/neutron.conf``
-so that you get L3 high availability.
-
-.. code-block:: ini
-
- l3_ha = True
- max_l3_agents_per_router = 3
-
-HA routers are created on availability zones you selected when creating the
-router.
-
-DHCP high availability
-----------------------
-
-Set the following configuration options in file ``/etc/neutron/neutron.conf``
-so that you get DHCP high availability.
-
-.. code-block:: ini
-
- dhcp_agents_per_network = 2
-
-DHCP services are created on availability zones you selected when creating the
-network.
diff --git a/doc/networking-guide/source/config-bgp-dynamic-routing.rst b/doc/networking-guide/source/config-bgp-dynamic-routing.rst
deleted file mode 100644
index cd8dd03698..0000000000
--- a/doc/networking-guide/source/config-bgp-dynamic-routing.rst
+++ /dev/null
@@ -1,880 +0,0 @@
-.. _config-bgp-dynamic-routing:
-
-===================
-BGP dynamic routing
-===================
-
-BGP dynamic routing enables advertisement of self-service (private) network
-prefixes to physical network devices that support BGP such as routers, thus
-removing the conventional dependency on static routes. The feature relies
-on :ref:`address scopes ` and requires knowledge of
-their operation for proper deployment.
-
-BGP dynamic routing consists of a service plug-in and an agent. The service
-plug-in implements the Networking service extension and the agent manages BGP
-peering sessions. A cloud administrator creates and configures a BGP speaker
-using the CLI or API and manually schedules it to one or more hosts running
-the agent. Agents can reside on hosts with or without other Networking
-service agents. Prefix advertisement depends on the binding of external
-networks to a BGP speaker and the address scope of external and internal
-IP address ranges or subnets.
-
-.. image:: figures/bgp-dynamic-routing-overview.png
- :alt: BGP dynamic routing overview
-
-.. note::
-
- Although self-service networks generally use private IP address ranges
- (RFC1918) for IPv4 subnets, BGP dynamic routing can advertise any IPv4
- address ranges.
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-The example configuration involves the following components:
-
-* One BGP agent.
-
-* One address scope containing IP address range 203.0.113.0/24 for
- provider networks, and IP address ranges 192.0.2.0/25 and 192.0.2.128/25
- for self-service networks.
-
-* One provider network using IP address range 203.0.113.0/24.
-
-* Three self-service networks.
-
- * Self-service networks 1 and 2 use IP address ranges inside of
- the address scope.
-
- * Self-service network 3 uses a unique IP address range 198.51.100.0/24 to
- demonstrate that the BGP speaker does not advertise prefixes outside
- of address scopes.
-
-* Three routers. Each router connects one self-service network to the
- provider network.
-
- * Router 1 contains IP addresses 203.0.113.11 and 192.0.2.1
-
- * Router 2 contains IP addresses 203.0.113.12 and 192.0.2.129
-
- * Router 3 contains IP addresses 203.0.113.13 and 198.51.100.1
-
-.. note::
-
- The example configuration assumes sufficient knowledge about the
- Networking service, routing, and BGP. For basic deployment of the
- Networking service, consult one of the
- :ref:`deploy`. For more information on BGP, see
- `RFC 4271 `_.
-
-Controller node
----------------
-
-* In the ``neutron.conf`` file, enable the conventional layer-3 and BGP
- dynamic routing service plug-ins:
-
- .. code-block:: ini
-
- [DEFAULT]
- service_plugins = neutron_dynamic_routing.services.bgp.bgp_plugin.BgpPlugin,neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
-
-Agent nodes
------------
-
-* In the ``bgp_dragent.ini`` file:
-
- * Configure the driver.
-
- .. code-block:: ini
-
- [BGP]
- bgp_speaker_driver = neutron_dynamic_routing.services.bgp.agent.driver.ryu.driver.RyuBgpDriver
-
- .. note::
-
- The agent currently only supports the Ryu BGP driver.
-
- * Configure the router ID.
-
- .. code-block:: ini
-
- [BGP]
- bgp_router_id = ROUTER_ID
-
- Replace ``ROUTER_ID`` with a suitable unique 32-bit number, typically an
- IPv4 address on the host running the agent. For example, 192.0.2.2.
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of each BGP dynamic routing agent.
-
- .. code-block:: console
-
- $ neutron agent-list --agent-type="BGP dynamic routing agent"
- +--------------------------------------+---------------------------+------------+-------------------+-------+----------------+---------------------------+
- | id | agent_type | host | availability_zone | alive | admin_state_up | binary |
- +--------------------------------------+---------------------------+------------+-------------------+-------+----------------+---------------------------+
- | 37729181-2224-48d8-89ef-16eca8e2f77e | BGP dynamic routing agent | controller | | :-) | True | neutron-bgp-dragent |
- +--------------------------------------+---------------------------+------------+-------------------+-------+----------------+---------------------------+
-
-Create the address scope and subnet pools
------------------------------------------
-
-#. Create an address scope. The provider (external) and self-service networks
- must belong to the same address scope for the agent to advertise those
- self-service network prefixes.
-
- .. code-block:: console
-
- $ openstack address scope create --share --ip-version 4 bgp
-
- +------------+--------------------------------------+
- | Field | Value |
- +------------+--------------------------------------+
- | headers | |
- | id | f71c958f-dbe8-49a2-8fb9-19c5f52a37f1 |
- | ip_version | 4 |
- | name | bgp |
- | project_id | 86acdbd1d72745fd8e8320edd7543400 |
- | shared | True |
- +------------+--------------------------------------+
-
-#. Create subnet pools. The provider and self-service networks use different
- pools.
-
- * Create the provider network pool.
-
- .. code-block:: console
-
- $ openstack subnet pool create --pool-prefix 203.0.113.0/24 \
- --address-scope bgp provider
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | address_scope_id | f71c958f-dbe8-49a2-8fb9-19c5f52a37f1 |
- | created_at | 2017-01-12T14:58:57Z |
- | default_prefixlen | 8 |
- | default_quota | None |
- | description | |
- | headers | |
- | id | 63532225-b9a0-445a-9935-20a15f9f68d1 |
- | ip_version | 4 |
- | is_default | False |
- | max_prefixlen | 32 |
- | min_prefixlen | 8 |
- | name | provider |
- | prefixes | 203.0.113.0/24 |
- | project_id | 86acdbd1d72745fd8e8320edd7543400 |
- | revision_number | 1 |
- | shared | False |
- | updated_at | 2017-01-12T14:58:57Z |
- +-------------------+--------------------------------------+
-
- * Create the self-service network pool.
-
- .. code-block:: console
-
- $ openstack subnet pool create --pool-prefix 192.0.2.0/25 \
- --pool-prefix 192.0.2.128/25 --address-scope bgp \
- --share selfservice
-
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | address_scope_id | f71c958f-dbe8-49a2-8fb9-19c5f52a37f1 |
- | created_at | 2017-01-12T15:02:31Z |
- | default_prefixlen | 8 |
- | default_quota | None |
- | description | |
- | headers | |
- | id | 8d8270b1-b194-4b7e-914c-9c741dcbd49b |
- | ip_version | 4 |
- | is_default | False |
- | max_prefixlen | 32 |
- | min_prefixlen | 8 |
- | name | selfservice |
- | prefixes | 192.0.2.0/25, 192.0.2.128/25 |
- | project_id | 86acdbd1d72745fd8e8320edd7543400 |
- | revision_number | 1 |
- | shared | True |
- | updated_at | 2017-01-12T15:02:31Z |
- +-------------------+--------------------------------------+
-
-Create the provider and self-service networks
----------------------------------------------
-
-#. Create the provider network.
-
- .. code-block:: console
-
- $ openstack network create provider --external --provider-physical-network \
- provider --provider-network-type flat
- Created a new network:
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2016-12-21T08:47:41Z |
- | description | |
- | headers | |
- | id | 190ca651-2ee3-4a4b-891f-dedda47974fe |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | False |
- | mtu | 1450 |
- | name | provider |
- | port_security_enabled | True |
- | project_id | c961a8f6d3654657885226378ade8220 |
- | provider:network_type | flat |
- | provider:physical_network | provider |
- | provider:segmentation_id | 66 |
- | revision_number | 3 |
- | router:external | External |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2016-12-21T08:47:41Z |
- +---------------------------+--------------------------------------+
-
-#. Create a subnet on the provider network using an IP address range from
- the provider subnet pool.
-
- .. code-block:: console
-
- $ neutron subnet-create --name provider --subnetpool provider \
- --prefixlen 24 --allocation-pool start=203.0.113.11,end=203.0.113.254 \
- --gateway 203.0.113.1 provider
- Created a new subnet:
- +-------------------+---------------------------------------------------+
- | Field | Value |
- +-------------------+---------------------------------------------------+
- | allocation_pools | {"start": "203.0.113.11", "end": "203.0.113.254"} |
- | cidr | 203.0.113.0/24 |
- | created_at | 2016-03-17T23:17:16 |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 203.0.113.1 |
- | host_routes | |
- | id | 8ed65d41-2b2a-4f3a-9f92-45adb266e01a |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | provider |
- | network_id | 68ec148c-181f-4656-8334-8f4eb148689d |
- | subnetpool_id | 3771c0e7-7096-46d3-a3bd-699c58e70259 |
- | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- | updated_at | 2016-03-17T23:17:16 |
- +-------------------+---------------------------------------------------+
-
- .. note::
-
- The IP address allocation pool starting at ``.11`` improves clarity of
- the diagrams. You can safely omit it.
-
-#. Create the self-service networks.
-
- .. code-block:: console
-
- $ openstack network create selfservice1
- Created a new network:
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2016-12-21T08:49:38Z |
- | description | |
- | headers | |
- | id | 9d842606-ef3d-4160-9ed9-e03fa63aed96 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | selfservice1 |
- | port_security_enabled | True |
- | project_id | c961a8f6d3654657885226378ade8220 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 106 |
- | revision_number | 3 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2016-12-21T08:49:38Z |
- +---------------------------+--------------------------------------+
-
- $ openstack network create selfservice2
- Created a new network:
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2016-12-21T08:50:05Z |
- | description | |
- | headers | |
- | id | f85639e1-d23f-438e-b2b1-f40570d86b1c |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | selfservice2 |
- | port_security_enabled | True |
- | project_id | c961a8f6d3654657885226378ade8220 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 21 |
- | revision_number | 3 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2016-12-21T08:50:05Z |
- +---------------------------+--------------------------------------+
-
- $ openstack network create selfservice3
- Created a new network:
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2016-12-21T08:50:35Z |
- | description | |
- | headers | |
- | id | eeccdb82-5cf4-4999-8ab3-e7dc99e7d43b |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | selfservice3 |
- | port_security_enabled | True |
- | project_id | c961a8f6d3654657885226378ade8220 |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 86 |
- | revision_number | 3 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2016-12-21T08:50:35Z |
- +---------------------------+--------------------------------------+
-
-#. Create a subnet on the first two self-service networks using an IP address
- range from the self-service subnet pool.
-
- .. code-block:: console
-
- $ neutron subnet-create --name selfservice1 --subnetpool selfservice \
- --prefixlen 25 selfservice1
- Created a new subnet:
- +-------------------+----------------------------------------------------+
- | Field | Value |
- +-------------------+----------------------------------------------------+
- | allocation_pools | {"start": "192.0.2.2", "end": "192.0.2.127"} |
- | cidr | 192.0.2.0/25 |
- | created_at | 2016-03-17T23:20:20 |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 198.51.100.1 |
- | host_routes | |
- | id | 8edd3dc2-df40-4d71-816e-a4586d61c809 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | selfservice1 |
- | network_id | be79de1e-5f56-11e6-9dfb-233e41cec48c |
- | subnetpool_id | c7e9737a-cfd3-45b5-a861-d1cee1135a92 |
- | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- | updated_at | 2016-03-17T23:20:20 |
- +-------------------+----------------------------------------------------+
-
- $ neutron subnet-create --name selfservice2 --subnetpool selfservice \
- --prefixlen 25 selfservice2
- Created a new subnet:
- +-------------------+------------------------------------------------+
- | Field | Value |
- +-------------------+------------------------------------------------+
- | allocation_pools | {"start": "192.0.2.130", "end": "192.0.2.254"} |
- | cidr | 192.0.2.128/25 |
- | created_at | 2016-03-17T23:20:20 |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 192.0.2.129 |
- | host_routes | |
- | id | 8edd3dc2-df40-4d71-816e-a4586d61c809 |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | selfservice2 |
- | network_id | c1fd9846-5f56-11e6-a8ac-0f998d9cc0a2 |
- | subnetpool_id | c7e9737a-cfd3-45b5-a861-d1cee1135a92 |
- | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- | updated_at | 2016-03-17T23:20:20 |
- +-------------------+------------------------------------------------+
-
-#. Create a subnet on the last self-service network using an IP address
- range outside of the address scope.
-
- .. code-block:: console
-
- $ neutron subnet-create --name subnet3 selfservice3 198.51.100.0/24
- Created a new subnet:
- +-------------------+----------------------------------------------------+
- | Field | Value |
- +-------------------+----------------------------------------------------+
- | allocation_pools | {"start": "198.51.100.2", "end": "198.51.100.254"} |
- | cidr | 198.51.100.0/24 |
- | created_at | 2016-03-17T23:20:20 |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 198.51.100.1 |
- | host_routes | |
- | id | cd9f9156-5f59-11e6-aeec-172ec7ee939a |
- | ip_version | 4 |
- | ipv6_address_mode | |
- | ipv6_ra_mode | |
- | name | selfservice3 |
- | network_id | c283dc1c-5f56-11e6-bfb6-efc30e1eb73b |
- | subnetpool_id | |
- | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- | updated_at | 2016-03-17T23:20:20 |
- +-------------------+----------------------------------------------------+
-
-Create and configure the routers
---------------------------------
-
-#. Create the routers.
-
- .. code-block:: console
-
- $ openstack router create router1
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2017-01-10T13:15:19Z |
- | description | |
- | distributed | False |
- | external_gateway_info | null |
- | flavor_id | None |
- | ha | False |
- | headers | |
- | id | 3f6f4ef8-63be-11e6-bbb3-2fbcef363ab8 |
- | name | router1 |
- | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- | revision_number | 1 |
- | routes | |
- | status | ACTIVE |
- | updated_at | 2017-01-10T13:15:19Z |
- +-------------------------+--------------------------------------+
-
- $ openstack router create router2
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2017-01-10T13:15:19Z |
- | description | |
- | distributed | False |
- | external_gateway_info | null |
- | flavor_id | None |
- | ha | False |
- | headers | |
- | id | 3fd21a60-63be-11e6-9c95-5714c208c499 |
- | name | router2 |
- | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- | revision_number | 1 |
- | routes | |
- | status | ACTIVE |
- | updated_at | 2017-01-10T13:15:19Z |
- +-------------------------+--------------------------------------+
-
- $ openstack router create router3
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2017-01-10T13:15:19Z |
- | description | |
- | distributed | False |
- | external_gateway_info | null |
- | flavor_id | None |
- | ha | False |
- | headers | |
- | id | 40069a4c-63be-11e6-9ecc-e37c1eaa7e84 |
- | name | router3 |
- | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- | revision_number | 1 |
- | routes | |
- | status | ACTIVE |
- | updated_at | 2017-01-10T13:15:19Z |
- +-------------------------+--------------------------------------+
-
-#. For each router, add one self-service subnet as an interface on the router.
-
- .. code-block:: console
-
- $ neutron router-interface-add router1 selfservice1
- Added interface 90e3880a-5f5c-11e6-914c-9f3e20c8c151 to router router1.
-
- $ neutron router-interface-add router2 selfservice2
- Added interface 91628362-5f5c-11e6-826a-7322fb03a821 to router router2.
-
- $ neutron router-interface-add router3 selfservice3
- Added interface 91d51044-5f5c-11e6-bf55-ffd180541cc2 to router router3.
-
-#. Add the provider network as a gateway on each router.
-
- .. code-block:: console
-
- $ neutron router-gateway-set router1 provider
- Set gateway for router router1
-
- $ neutron router-gateway-set router2 provider
- Set gateway for router router2
-
- $ neutron router-gateway-set router3 provider
- Set gateway for router router3
-
-Create and configure the BGP speaker
-------------------------------------
-
-The BGP speaker advertises the next-hop IP address for eligible self-service
-networks and floating IP addresses for instances using those networks.
-
-#. Create the BGP speaker.
-
- .. code-block:: console
-
- $ neutron bgp-speaker-create --ip-version 4 \
- --local-as LOCAL_AS bgpspeaker
- Created a new bgp_speaker:
- +-----------------------------------+--------------------------------------+
- | Field | Value |
- +-----------------------------------+--------------------------------------+
- | advertise_floating_ip_host_routes | True |
- | advertise_tenant_networks | True |
- | id | 5f227f14-4f46-4eca-9524-fc5a1eabc358 |
- | ip_version | 4 |
- | local_as | 1234 |
- | name | bgpspeaker |
- | networks | |
- | peers | |
- | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- +-----------------------------------+--------------------------------------+
-
- Replace ``LOCAL_AS`` with an appropriate local autonomous system number.
- The example configuration uses AS 1234.
-
-#. A BGP speaker requires association with a provider network to determine
- eligible prefixes. The association builds a list of all virtual routers
- with gateways on provider and self-service networks in the same address
- scope so the BGP speaker can advertise self-service network prefixes with
- the corresponding router as the next-hop IP address. Associate the BGP
- speaker with the provider network.
-
- .. code-block:: console
-
- $ neutron bgp-speaker-network-add bgpspeaker provider
- Added network provider to BGP speaker bgpspeaker.
-
-#. Verify association of the provider network with the BGP speaker.
-
- .. code-block:: console
-
- $ neutron bgp-speaker-show bgpspeaker
- +-----------------------------------+--------------------------------------+
- | Field | Value |
- +-----------------------------------+--------------------------------------+
- | advertise_floating_ip_host_routes | True |
- | advertise_tenant_networks | True |
- | id | 5f227f14-4f46-4eca-9524-fc5a1eabc358 |
- | ip_version | 4 |
- | local_as | 1234 |
- | name | bgpspeaker |
- | networks | 68ec148c-181f-4656-8334-8f4eb148689d |
- | peers | |
- | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- +-----------------------------------+--------------------------------------+
-
-#. Verify the prefixes and next-hop IP addresses that the BGP speaker
- advertises.
-
- .. code-block:: console
-
- $ neutron bgp-speaker-advertiseroute-list bgpspeaker
- +-----------------+--------------+
- | destination | next_hop |
- +-----------------+--------------+
- | 192.0.2.0/25 | 203.0.113.11 |
- | 192.0.2.128/25 | 203.0.113.12 |
- +-----------------+--------------+
-
-#. Create a BGP peer.
-
- .. code-block:: console
-
- $ neutron bgp-peer-create --peer-ip 192.0.2.1 \
- --remote-as REMOTE_AS bgppeer
- Created a new bgp_peer:
- +-----------+--------------------------------------+
- | Field | Value |
- +-----------+--------------------------------------+
- | auth_type | none |
- | id | 35c89ca0-ac5a-4298-a815-0b073c2362e9 |
- | name | bgppeer |
- | peer_ip | 192.0.2.1 |
- | remote_as | 4321 |
- | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- +-----------+--------------------------------------+
-
- Replace ``REMOTE_AS`` with an appropriate remote autonomous system number.
- The example configuration uses AS 4321 which triggers EBGP peering.
-
- .. note::
-
- The host containing the BGP agent must have layer-3 connectivity to
- the provider router.
-
-#. Add a BGP peer to the BGP speaker.
-
- .. code-block:: console
-
- $ neutron bgp-speaker-peer-add bgpspeaker bgppeer
- Added BGP peer bgppeer to BGP speaker bgpspeaker.
-
-#. Verify addition of the BGP peer to the BGP speaker.
-
- .. code-block:: console
-
- $ neutron bgp-speaker-show bgpspeaker
- +-----------------------------------+--------------------------------------+
- | Field | Value |
- +-----------------------------------+--------------------------------------+
- | advertise_floating_ip_host_routes | True |
- | advertise_tenant_networks | True |
- | id | 5f227f14-4f46-4eca-9524-fc5a1eabc358 |
- | ip_version | 4 |
- | local_as | 1234 |
- | name | bgpspeaker |
- | networks | 68ec148c-181f-4656-8334-8f4eb148689d |
- | peers | 35c89ca0-ac5a-4298-a815-0b073c2362e9 |
- | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d |
- +-----------------------------------+--------------------------------------+
-
- .. note::
-
- After creating a peering session, you cannot change the local or remote
- autonomous system numbers.
-
-Schedule the BGP speaker to an agent
-------------------------------------
-
-#. Unlike most agents, BGP speakers require manual scheduling to an agent.
- BGP speakers only form peering sessions and begin prefix advertisement
- after scheduling to an agent. Schedule the BGP speaker to agent
- ``37729181-2224-48d8-89ef-16eca8e2f77e``.
-
- .. code-block:: console
-
- $ neutron bgp-dragent-speaker-add 37729181-2224-48d8-89ef-16eca8e2f77e bgpspeaker
- Associated BGP speaker bgpspeaker to the Dynamic Routing agent.
-
-#. Verify scheduling of the BGP speaker to the agent.
-
- .. code-block:: console
-
- $ neutron bgp-dragent-list-hosting-speaker bgpspeaker
- +--------------------------------------+------------+----------------+-------+
- | id | host | admin_state_up | alive |
- +--------------------------------------+------------+----------------+-------+
- | 37729181-2224-48d8-89ef-16eca8e2f77e | controller | True | :-) |
- +--------------------------------------+------------+----------------+-------+
-
- $ neutron bgp-speaker-list-on-dragent 37729181-2224-48d8-89ef-16eca8e2f77e
- +--------------------------------------+------------+----------+------------+
- | id | name | local_as | ip_version |
- +--------------------------------------+------------+----------+------------+
- | 5f227f14-4f46-4eca-9524-fc5a1eabc358 | bgpspeaker | 1234 | 4 |
- +--------------------------------------+------------+----------+------------+
-
-Prefix advertisement
-~~~~~~~~~~~~~~~~~~~~
-
-BGP dynamic routing advertises prefixes for self-service networks and host
-routes for floating IP addresses.
-
-Advertisement of a self-service network requires satisfying the following
-conditions:
-
-* The external and self-service network reside in the same address scope.
-
-* The router contains an interface on the self-service subnet and a gateway
- on the external network.
-
-* The BGP speaker associates with the external network that provides a
- gateway on the router.
-
-* The BGP speaker has the ``advertise_tenant_networks`` attribute set to
- ``True``.
-
-.. image:: figures/bgp-dynamic-routing-example1.png
- :alt: Example of prefix advertisements with self-service networks
-
-Advertisement of a floating IP address requires satisfying the following
-conditions:
-
-* The router with the floating IP address binding contains a gateway on
- an external network with the BGP speaker association.
-
-* The BGP speaker has the ``advertise_floating_ip_host_routes`` attribute
- set to ``True``.
-
-.. image:: figures/bgp-dynamic-routing-example2.png
- :alt: Example of prefix advertisements with floating IP addresses
-
-Operation with Distributed Virtual Routers (DVR)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In deployments using DVR, the BGP speaker advertises floating IP
-addresses and self-service networks differently. For floating IP
-addresses, the BGP speaker advertises the floating IP agent gateway
-on the corresponding compute node as the next-hop IP address. For
-self-service networks using SNAT, the BGP speaker advertises the
-DVR SNAT node as the next-hop IP address.
-
-For example, consider the following components:
-
-#. A provider network using IP address range 203.0.113.0/24, and supporting
- floating IP addresses 203.0.113.101, 203.0.113.102, and 203.0.113.103.
-
-#. A self-service network using IP address range 198.51.100.0/24.
-
-#. The SNAT gateway resides on 203.0.113.11.
-
-#. The floating IP agent gateways (one per compute node) reside on
- 203.0.113.12, 203.0.113.13, and 203.0.113.14.
-
-#. Three instances, one per compute node, each with a floating IP
- address.
-
-.. code-block:: console
-
- $ neutron bgp-speaker-advertiseroute-list bgpspeaker
- +------------------+--------------+
- | destination | next_hop |
- +------------------+--------------+
- | 198.51.100.0/24 | 203.0.113.11 |
- | 203.0.113.101/32 | 203.0.113.12 |
- | 203.0.113.102/32 | 203.0.113.13 |
- | 203.0.113.103/32 | 203.0.113.14 |
- +------------------+--------------+
-
-.. note::
-
- DVR lacks support for routing directly to a fixed IP address via the
- floating IP agent gateway port and thus prevents the BGP speaker from
- advertising fixed IP addresses.
-
-You can also identify floating IP agent gateways in your environment to
-assist with verifying operation of the BGP speaker.
-
-.. code-block:: console
-
- $ neutron port-list --device_owner="network:floatingip_agent_gateway"
- +--------------------------------------+------+-------------------+--------------------------------------------------------------------------------------------------------+
- | id | name | mac_address | fixed_ips |
- +--------------------------------------+------+-------------------+--------------------------------------------------------------------------------------------------------+
- | 87cf2970-4970-462e-939e-00e808295dfa | | fa:16:3e:7c:68:e3 | {"subnet_id": "8ed65d41-2b2a-4f3a-9f92-45adb266e01a", "ip_address": "203.0.113.12"} |
- | 8d218440-0d2e-49d0-8a7b-3266a6146dc1 | | fa:16:3e:9d:78:cf | {"subnet_id": "8ed65d41-2b2a-4f3a-9f92-45adb266e01a", "ip_address": "203.0.113.13"} |
- | 87cf2970-4970-462e-939e-00e802281dfa | | fa:16:3e:6b:18:e0 | {"subnet_id": "8ed65d41-2b2a-4f3a-9f92-45adb266e01a", "ip_address": "203.0.113.14"} |
- +--------------------------------------+------+-------------------+--------------------------------------------------------------------------------------------------------+
-
-IPv6
-~~~~
-
-BGP dynamic routing supports peering via IPv6 and advertising IPv6 prefixes.
-
-* To enable peering via IPv6, create a BGP peer and use an IPv6 address for
- ``peer_ip``.
-
-* To enable advertising IPv6 prefixes, create an address scope with
- ``ip_version=6`` and a BGP speaker with ``ip_version=6``.
-
-.. note::
-
- DVR with IPv6 functions similarly to DVR with IPv4.
-
-High availability
-~~~~~~~~~~~~~~~~~
-
-BGP dynamic routing supports scheduling a BGP speaker to multiple agents
-which effectively multiplies prefix advertisements to the same peer. If
-an agent fails, the peer continues to receive advertisements from one or
-more operational agents.
-
-#. Show available dynamic routing agents.
-
- .. code-block:: console
-
- $ neutron agent-list --agent-type="BGP dynamic routing agent"
- +--------------------------------------+---------------------------+----------+-------------------+-------+----------------+---------------------------+
- | id | agent_type | host | availability_zone | alive | admin_state_up | binary |
- +--------------------------------------+---------------------------+----------+-------------------+-------+----------------+---------------------------+
- | 37729181-2224-48d8-89ef-16eca8e2f77e | BGP dynamic routing agent | bgp-ha1 | | :-) | True | neutron-bgp-dragent |
- | 1a2d33bb-9321-30a2-76ab-22eff3d2f56a | BGP dynamic routing agent | bgp-ha2 | | :-) | True | neutron-bgp-dragent |
- +--------------------------------------+---------------------------+----------+-------------------+-------+----------------+---------------------------+
-
-#. Schedule BGP speaker to multiple agents.
-
- .. code-block:: console
-
- $ neutron bgp-dragent-speaker-add 37729181-2224-48d8-89ef-16eca8e2f77e bgpspeaker
- Associated BGP speaker bgpspeaker to the Dynamic Routing agent.
-
- $ neutron bgp-dragent-speaker-add 1a2d33bb-9321-30a2-76ab-22eff3d2f56a bgpspeaker
- Associated BGP speaker bgpspeaker to the Dynamic Routing agent.
-
- $ neutron bgp-dragent-list-hosting-speaker bgpspeaker
- +--------------------------------------+---------+----------------+-------+
- | id | host | admin_state_up | alive |
- +--------------------------------------+---------+----------------+-------+
- | 37729181-2224-48d8-89ef-16eca8e2f77e | bgp-ha1 | True | :-) |
- | 1a2d33bb-9321-30a2-76ab-22eff3d2f56a | bgp-ha2 | True | :-) |
- +--------------------------------------+---------+----------------+-------+
-
- $ neutron bgp-speaker-list-on-dragent 37729181-2224-48d8-89ef-16eca8e2f77e
- +--------------------------------------+------------+----------+------------+
- | id | name | local_as | ip_version |
- +--------------------------------------+------------+----------+------------+
- | 5f227f14-4f46-4eca-9524-fc5a1eabc358 | bgpspeaker | 1234 | 4 |
- +--------------------------------------+------------+----------+------------+
-
- $ neutron bgp-speaker-list-on-dragent 1a2d33bb-9321-30a2-76ab-22eff3d2f56a
- +--------------------------------------+------------+----------+------------+
- | id | name | local_as | ip_version |
- +--------------------------------------+------------+----------+------------+
- | 5f227f14-4f46-4eca-9524-fc5a1eabc358 | bgpspeaker | 1234 | 4 |
- +--------------------------------------+------------+----------+------------+
diff --git a/doc/networking-guide/source/config-dhcp-ha.rst b/doc/networking-guide/source/config-dhcp-ha.rst
deleted file mode 100644
index e50093132f..0000000000
--- a/doc/networking-guide/source/config-dhcp-ha.rst
+++ /dev/null
@@ -1,504 +0,0 @@
-.. _config-dhcp-ha:
-
-==========================
-High-availability for DHCP
-==========================
-
-This section describes how to use the agent management (alias agent) and
-scheduler (alias agent_scheduler) extensions for DHCP agents
-scalability and HA.
-
-.. note::
-
- Use the :command:`openstack extension list` command to check if these
- extensions are enabled. Check ``agent`` and ``agent_scheduler``
- are included in the output.
-
- .. code-block:: console
-
- $ openstack extension list --network -c Name -c Alias
- +-------------------------------------------------------------+---------------------------+
- | Name | Alias |
- +-------------------------------------------------------------+---------------------------+
- | Default Subnetpools | default-subnetpools |
- | Network IP Availability | network-ip-availability |
- | Network Availability Zone | network_availability_zone |
- | Auto Allocated Topology Services | auto-allocated-topology |
- | Neutron L3 Configurable external gateway mode | ext-gw-mode |
- | Port Binding | binding |
- | Neutron Metering | metering |
- | agent | agent |
- | Subnet Allocation | subnet_allocation |
- | L3 Agent Scheduler | l3_agent_scheduler |
- | Tag support | tag |
- | Neutron external network | external-net |
- | Neutron Service Flavors | flavors |
- | Network MTU | net-mtu |
- | Availability Zone | availability_zone |
- | Quota management support | quotas |
- | HA Router extension | l3-ha |
- | Provider Network | provider |
- | Multi Provider Network | multi-provider |
- | Address scope | address-scope |
- | Neutron Extra Route | extraroute |
- | Subnet service types | subnet-service-types |
- | Resource timestamps | standard-attr-timestamp |
- | Neutron Service Type Management | service-type |
- | Router Flavor Extension | l3-flavors |
- | Tag support for resources: subnet, subnetpool, port, router | tag-ext |
- | Neutron Extra DHCP opts | extra_dhcp_opt |
- | Resource revision numbers | standard-attr-revisions |
- | Pagination support | pagination |
- | Sorting support | sorting |
- | security-group | security-group |
- | DHCP Agent Scheduler | dhcp_agent_scheduler |
- | Router Availability Zone | router_availability_zone |
- | RBAC Policies | rbac-policies |
- | standard-attr-description | standard-attr-description |
- | Neutron L3 Router | router |
- | Allowed Address Pairs | allowed-address-pairs |
- | project_id field enabled | project-id |
- | Distributed Virtual Router | dvr |
- +-------------------------------------------------------------+---------------------------+
-
-Demo setup
-~~~~~~~~~~
-
-.. figure:: figures/demo_multiple_dhcp_agents.png
-
-There will be three hosts in the setup.
-
-.. list-table::
- :widths: 25 50
- :header-rows: 1
-
- * - Host
- - Description
- * - OpenStack controller host - controlnode
- - Runs the Networking, Identity, and Compute services that are required
- to deploy VMs. The node must have at least one network interface that
- is connected to the Management Network. Note that ``nova-network`` should
- not be running because it is replaced by Neutron.
- * - HostA
- - Runs ``nova-compute``, the Neutron L2 agent and DHCP agent
- * - HostB
- - Same as HostA
-
-Configuration
-~~~~~~~~~~~~~
-
-**controlnode: neutron server**
-
-#. Neutron configuration file ``/etc/neutron/neutron.conf``:
-
- .. code-block:: ini
-
- [DEFAULT]
- core_plugin = linuxbridge
- rabbit_host = controlnode
- allow_overlapping_ips = True
- host = controlnode
- agent_down_time = 5
- dhcp_agents_per_network = 1
-
- .. note::
-
- In the above configuration, we use ``dhcp_agents_per_network = 1``
- for this demonstration. In usual deployments, we suggest setting
- ``dhcp_agents_per_network`` to more than one to match the number of
- DHCP agents in your deployment.
- See :ref:`conf-dhcp-agents-per-network`.
-
-#. Update the plug-in configuration file
- ``/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini``:
-
- .. code-block:: ini
-
- [vlans]
- tenant_network_type = vlan
- network_vlan_ranges = physnet1:1000:2999
- [database]
- connection = mysql://root:root@127.0.0.1:3306/neutron_linux_bridge
- retry_interval = 2
- [linux_bridge]
- physical_interface_mappings = physnet1:eth0
-
-**HostA and HostB: L2 agent**
-
-#. Neutron configuration file ``/etc/neutron/neutron.conf``:
-
- .. code-block:: ini
-
- [DEFAULT]
- rabbit_host = controlnode
- rabbit_password = openstack
- # host = HostB on hostb
- host = HostA
-
-#. Update the plug-in configuration file
- ``/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini``:
-
- .. code-block:: ini
-
- [vlans]
- tenant_network_type = vlan
- network_vlan_ranges = physnet1:1000:2999
- [database]
- connection = mysql://root:root@127.0.0.1:3306/neutron_linux_bridge
- retry_interval = 2
- [linux_bridge]
- physical_interface_mappings = physnet1:eth0
-
-#. Update the nova configuration file ``/etc/nova/nova.conf``:
-
- .. code-block:: ini
-
- [DEFAULT]
- use_neutron=True
- firewall_driver=nova.virt.firewall.NoopFirewallDriver
-
- [neutron]
- admin_username=neutron
- admin_password=servicepassword
- admin_auth_url=http://controlnode:35357/v2.0/
- auth_strategy=keystone
- admin_tenant_name=servicetenant
- url=http://203.0.113.10:9696/
-
-**HostA and HostB: DHCP agent**
-
-- Update the DHCP configuration file ``/etc/neutron/dhcp_agent.ini``:
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-Prerequisites for demonstration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Admin role is required to use the agent management and scheduler extensions.
-Ensure you run the following commands under a project with an admin role.
-
-To experiment, you need VMs and a neutron network:
-
-.. code-block:: console
-
- $ openstack server list
- +--------------------------------------+-----------+--------+----------------+------------+
- | ID | Name | Status | Networks | Image Name |
- +--------------------------------------+-----------+--------+----------------+------------+
- | c394fcd0-0baa-43ae-a793-201815c3e8ce | myserver1 | ACTIVE | net1=192.0.2.3 | cirros |
- | 2d604e05-9a6c-4ddb-9082-8a1fbdcc797d | myserver2 | ACTIVE | net1=192.0.2.4 | ubuntu |
- | c7c0481c-3db8-4d7a-a948-60ce8211d585 | myserver3 | ACTIVE | net1=192.0.2.5 | centos |
- +--------------------------------------+-----------+--------+----------------+------------+
-
- $ openstack network list
- +--------------------------------------+------+--------------------------------------+
- | ID | Name | Subnets |
- +--------------------------------------+------+--------------------------------------+
- | ad88e059-e7fa-4cf7-8857-6731a2a3a554 | net1 | 8086db87-3a7a-4cad-88c9-7bab9bc69258 |
- +--------------------------------------+------+--------------------------------------+
-
-Managing agents in neutron deployment
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-#. List all agents:
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
- | 22467163-01ea-4231-ba45-3bd316f425e6 | Linux bridge agent | HostA | None | True | UP | neutron-linuxbridge-agent |
- | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | DHCP agent | HostA | None | True | UP | neutron-dhcp-agent |
- | 3066d20c-9f8f-440c-ae7c-a40ffb4256b6 | Linux bridge agent | HostB | nova | True | UP | neutron-linuxbridge-agent |
- | 55569f4e-6f31-41a6-be9d-526efce1f7fe | DHCP agent | HostB | nova | True | UP | neutron-dhcp-agent |
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
-
- Every agent that supports these extensions will register itself with the
- neutron server when it starts up.
-
- The output shows information for four agents. The ``alive`` field shows
- ``True`` if the agent reported its state within the period defined by the
- ``agent_down_time`` option in the ``neutron.conf`` file. Otherwise the
- ``alive`` is ``False``.
-
-#. List DHCP agents that host a specified network:
-
- .. code-block:: console
-
- $ openstack network agent list --network net1
- +--------------------------------------+---------------+----------------+-------+
- | ID | Host | Admin State Up | Alive |
- +--------------------------------------+---------------+----------------+-------+
- | 22467163-01ea-4231-ba45-3bd316f425e6 | HostA | UP | True |
- +--------------------------------------+---------------+----------------+-------+
-
-#. List the networks hosted by a given DHCP agent:
-
- This command is to show which networks a given dhcp agent is managing.
-
- .. code-block:: console
-
- $ openstack network list --agent 22467163-01ea-4231-ba45-3bd316f425e6
- +--------------------------------+------------------------+---------------------------------+
- | ID | Name | Subnets |
- +--------------------------------+------------------------+---------------------------------+
- | ad88e059-e7fa- | net1 | 8086db87-3a7a-4cad- |
- | 4cf7-8857-6731a2a3a554 | | 88c9-7bab9bc69258 |
- +--------------------------------+------------------------+---------------------------------+
-
-#. Show agent details.
-
- The :command:`openstack network agent show` command shows details for a
- specified agent:
-
- .. code-block:: console
-
- $ openstack network agent show 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b
- +---------------------+--------------------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------------------+
- | admin_state_up | UP |
- | agent_type | DHCP agent |
- | alive | True |
- | availability_zone | nova |
- | binary | neutron-dhcp-agent |
- | configurations | dhcp_driver='neutron.agent.linux.dhcp.Dnsmasq', |
- | | dhcp_lease_duration='86400', |
- | | log_agent_heartbeats='False', networks='1', |
- | | notifies_port_ready='True', ports='3', |
- | | subnets='1' |
- | created_at | 2016-12-14 00:25:54 |
- | description | None |
- | last_heartbeat_at | 2016-12-14 06:53:24 |
- | host | HostA |
- | id | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b |
- | started_at | 2016-12-14 00:25:54 |
- | topic | dhcp_agent |
- +---------------------+--------------------------------------------------+
-
- In this output, ``last_heartbeat_at`` is the time on the neutron
- server. You do not need to synchronize all agents to this time for this
- extension to run correctly. ``configurations`` describes the static
- configuration for the agent or run time data. This agent is a DHCP agent
- and it hosts one network, one subnet, and three ports.
-
- Different types of agents show different details. The following output
- shows information for a Linux bridge agent:
-
- .. code-block:: console
-
- $ openstack network agent show 22467163-01ea-4231-ba45-3bd316f425e6
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | admin_state_up | UP |
- | agent_type | Linux bridge agent |
- | alive | True |
- | availability_zone | nova |
- | binary | neutron-linuxbridge-agent |
- | configurations | { |
- | | "physnet1": "eth0", |
- | | "devices": "4" |
- | | } |
- | created_at | 2016-12-14 00:26:54 |
- | description | None |
- | last_heartbeat_at | 2016-12-14 06:53:24 |
- | host | HostA |
- | id | 22467163-01ea-4231-ba45-3bd316f425e6 |
- | started_at | 2016-12-14T06:48:39.000000 |
- | topic | N/A |
- +---------------------+--------------------------------------+
-
- The output shows ``bridge-mapping`` and the number of virtual network
- devices on this L2 agent.
-
-Managing assignment of networks to DHCP agent
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A single network can be assigned to more than one DHCP agents and
-one DHCP agent can host more than one network.
-You can add a network to a DHCP agent and remove one from it.
-
-#. Default scheduling.
-
- When you create a network with one port, the network will be scheduled to
- an active DHCP agent. If many active DHCP agents are running, select one
- randomly. You can design more sophisticated scheduling algorithms in the
- same way as nova-schedule later on.
-
- .. code-block:: console
-
- $ openstack network create net2
- $ openstack subnet create --network net2 --subnet-range 198.51.100.0/24 subnet2
- $ openstack port create port2 --network net2
- $ openstack network agent list --network net2
- +--------------------------------------+---------------+----------------+-------+
- | ID | Host | Admin State Up | Alive |
- +--------------------------------------+---------------+----------------+-------+
- | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | HostA | UP | True |
- +--------------------------------------+---------------+----------------+-------+
-
- It is allocated to DHCP agent on HostA. If you want to validate the
- behavior through the :command:`dnsmasq` command, you must create a subnet for
- the network because the DHCP agent starts the dnsmasq service only if
- there is a DHCP.
-
-#. Assign a network to a given DHCP agent.
-
- To add another DHCP agent to host the network, run this command:
-
- .. code-block:: console
-
- $ openstack network agent add network --dhcp \
- 55569f4e-6f31-41a6-be9d-526efce1f7fe net2
- $ openstack network agent list --network net2
- +--------------------------------------+-------+----------------+--------+
- | ID | Host | Admin State Up | Alive |
- +--------------------------------------+-------+----------------+--------+
- | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | HostA | UP | True |
- | 55569f4e-6f31-41a6-be9d-526efce1f7fe | HostB | UP | True |
- +--------------------------------------+-------+----------------+--------+
-
- Both DHCP agents host the ``net2`` network.
-
-#. Remove a network from a specified DHCP agent.
-
- This command is the sibling command for the previous one. Remove
- ``net2`` from the DHCP agent for HostA:
-
- .. code-block:: console
-
- $ openstack network agent remove network --dhcp \
- 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b net2
- $ openstack network agent list --network net2
- +--------------------------------------+-------+----------------+-------+
- | ID | Host | Admin State Up | Alive |
- +--------------------------------------+-------+----------------+-------+
- | 55569f4e-6f31-41a6-be9d-526efce1f7fe | HostB | UP | True |
- +--------------------------------------+-------+----------------+-------+
-
- You can see that only the DHCP agent for HostB is hosting the ``net2``
- network.
-
-HA of DHCP agents
-~~~~~~~~~~~~~~~~~
-
-Boot a VM on ``net2``. Let both DHCP agents host ``net2``. Fail the agents
-in turn to see if the VM can still get the desired IP.
-
-#. Boot a VM on ``net2``:
-
- .. code-block:: console
-
- $ openstack network list
- +--------------------------------------+------+--------------------------------------+
- | ID | Name | Subnets |
- +--------------------------------------+------+--------------------------------------+
- | ad88e059-e7fa-4cf7-8857-6731a2a3a554 | net1 | 8086db87-3a7a-4cad-88c9-7bab9bc69258 |
- | 9b96b14f-71b8-4918-90aa-c5d705606b1a | net2 | 6979b71a-0ae8-448c-aa87-65f68eedcaaa |
- +--------------------------------------+------+--------------------------------------+
- $ openstack server create --image tty --flavor 1 myserver4 \
- --nic net-id=9b96b14f-71b8-4918-90aa-c5d705606b1a
- ...
- $ openstack server list
- +--------------------------------------+-----------+--------+-------------------+------------+
- | ID | Name | Status | Networks | Image Name |
- +--------------------------------------+-----------+--------+-------------------+------------+
- | c394fcd0-0baa-43ae-a793-201815c3e8ce | myserver1 | ACTIVE | net1=192.0.2.3 | cirros |
- | 2d604e05-9a6c-4ddb-9082-8a1fbdcc797d | myserver2 | ACTIVE | net1=192.0.2.4 | ubuntu |
- | c7c0481c-3db8-4d7a-a948-60ce8211d585 | myserver3 | ACTIVE | net1=192.0.2.5 | centos |
- | f62f4731-5591-46b1-9d74-f0c901de567f | myserver4 | ACTIVE | net2=198.51.100.2 | cirros1 |
- +--------------------------------------+-----------+--------+-------------------+------------+
-
-#. Make sure both DHCP agents hosting ``net2``:
-
- Use the previous commands to assign the network to agents.
-
- .. code-block:: console
-
- $ openstack network agent list --network net2
- +--------------------------------------+-------+----------------+-------+
- | ID | Host | Admin State Up | Alive |
- +--------------------------------------+-------+----------------+-------+
- | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | HostA | UP | True |
- | 55569f4e-6f31-41a6-be9d-526efce1f7fe | HostB | UP | True |
- +--------------------------------------+-------+----------------+-------+
-
-To test the HA of DHCP agent:
-
-#. Log in to the ``myserver4`` VM, and run ``udhcpc``, ``dhclient`` or
- other DHCP client.
-
-#. Stop the DHCP agent on HostA. Besides stopping the
- ``neutron-dhcp-agent`` binary, you must stop the ``dnsmasq`` processes.
-
-#. Run a DHCP client in VM to see if it can get the wanted IP.
-
-#. Stop the DHCP agent on HostB too.
-
-#. Run ``udhcpc`` in the VM; it cannot get the wanted IP.
-
-#. Start DHCP agent on HostB. The VM gets the wanted IP again.
-
-Disabling and removing an agent
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-An administrator might want to disable an agent if a system hardware or
-software upgrade is planned. Some agents that support scheduling also
-support disabling and enabling agents, such as L3 and DHCP agents. After
-the agent is disabled, the scheduler does not schedule new resources to
-the agent.
-
-After the agent is disabled, you can safely remove the agent.
-Even after disabling the agent, resources on the agent are kept assigned.
-Ensure you remove the resources on the agent before you delete the agent.
-
-Disable the DHCP agent on HostA before you stop it:
-
-.. code-block:: console
-
- $ openstack network agent set 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b --disable
- $ openstack network agent list
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
- | 22467163-01ea-4231-ba45-3bd316f425e6 | Linux bridge agent | HostA | None | True | UP | neutron-linuxbridge-agent |
- | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | DHCP agent | HostA | None | True | DOWN | neutron-dhcp-agent |
- | 3066d20c-9f8f-440c-ae7c-a40ffb4256b6 | Linux bridge agent | HostB | nova | True | UP | neutron-linuxbridge-agent |
- | 55569f4e-6f31-41a6-be9d-526efce1f7fe | DHCP agent | HostB | nova | True | UP | neutron-dhcp-agent |
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
-
-After you stop the DHCP agent on HostA, you can delete it by the following
-command:
-
-.. code-block:: console
-
- $ openstack network agent delete 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b
- $ openstack network agent list
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
- | 22467163-01ea-4231-ba45-3bd316f425e6 | Linux bridge agent | HostA | None | True | UP | neutron-linuxbridge-agent |
- | 3066d20c-9f8f-440c-ae7c-a40ffb4256b6 | Linux bridge agent | HostB | nova | True | UP | neutron-linuxbridge-agent |
- | 55569f4e-6f31-41a6-be9d-526efce1f7fe | DHCP agent | HostB | nova | True | UP | neutron-dhcp-agent |
- +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+
-
-After deletion, if you restart the DHCP agent, it appears on the agent
-list again.
-
-.. _conf-dhcp-agents-per-network:
-
-Enabling DHCP high availability by default
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You can control the default number of DHCP agents assigned to a network
-by setting the following configuration option
-in the file ``/etc/neutron/neutron.conf``.
-
-.. code-block:: ini
-
- dhcp_agents_per_network = 3
diff --git a/doc/networking-guide/source/config-dns-int.rst b/doc/networking-guide/source/config-dns-int.rst
deleted file mode 100644
index 8b0cc2e9cd..0000000000
--- a/doc/networking-guide/source/config-dns-int.rst
+++ /dev/null
@@ -1,841 +0,0 @@
-.. _config-dns-int:
-
-===============
-DNS integration
-===============
-
-This page serves as a guide for how to use the DNS integration functionality of
-the Networking service. The functionality described covers DNS from two points
-of view:
-
-* The internal DNS functionality offered by the Networking service and its
- interaction with the Compute service.
-* Integration of the Compute service and the Networking service with an
- external DNSaaS (DNS-as-a-Service).
-
-Users can control the behavior of the Networking service in regards to DNS
-using two attributes associated with ports, networks, and floating IPs. The
-following table shows the attributes available for each one of these resources:
-
-.. list-table::
- :header-rows: 1
- :widths: 30 30 30
-
- * - Resource
- - dns_name
- - dns_domain
- * - Ports
- - Yes
- - No
- * - Networks
- - No
- - Yes
- * - Floating IPs
- - Yes
- - Yes
-
-.. _config-dns-int-dns-resolution:
-
-The Networking service internal DNS resolution
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The Networking service enables users to control the name assigned to ports by
-the internal DNS. To enable this functionality, do the following:
-
-1. Edit the ``/etc/neutron/neutron.conf`` file and assign a value different to
- ``openstacklocal`` (its default value) to the ``dns_domain`` parameter in
- the ``[default]`` section. As an example:
-
- .. code-block:: ini
-
- dns_domain = example.org.
-
-2. Add ``dns`` to ``extension_drivers`` in the ``[ml2]`` section of
- ``/etc/neutron/plugins/ml2/ml2_conf.ini``. The following is an example:
-
- .. code-block:: console
-
- [ml2]
- extension_drivers = port_security,dns
-
-After re-starting the ``neutron-server``, users will be able to assign a
-``dns_name`` attribute to their ports.
-
-.. note::
- The enablement of this functionality is prerequisite for the enablement of
- the Networking service integration with an external DNS service, which is
- described in detail in :ref:`config-dns-int-ext-serv`.
-
-The following illustrates the creation of a port with ``my-port``
-in its ``dns_name`` attribute.
-
-.. note::
- The name assigned to the port by the Networking service internal DNS is now
- visible in the response in the ``dns_assignment`` attribute.
-
-.. code-block:: console
-
- $ neutron port-create my-net --dns-name my-port
- Created a new port:
- +-----------------------+-------------------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+-------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | allowed_address_pairs | |
- | binding:vnic_type | normal |
- | device_id | |
- | device_owner | |
- | dns_assignment | {"hostname": "my-port", "ip_address": "192.0.2.67", "fqdn": "my-port.example.org."} |
- | dns_name | my-port |
- | fixed_ips | {"subnet_id":"6141b474-56cd-430f-b731-71660bb79b79", "ip_address": "192.0.2.67"} |
- | id | fb3c10f4-017e-420c-9be1-8f8c557ae21f |
- | mac_address | fa:16:3e:aa:9b:e1 |
- | name | |
- | network_id | bf2802a0-99a0-4e8c-91e4-107d03f158ea |
- | port_security_enabled | True |
- | security_groups | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 |
- | status | DOWN |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +-----------------------+-------------------------------------------------------------------------------------+
-
-When this functionality is enabled, it is leveraged by the Compute service when
-creating instances. When allocating ports for an instance during boot, the
-Compute service populates the ``dns_name`` attributes of these ports with
-the ``hostname`` attribute of the instance, which is a DNS sanitized version of
-its display name. As a consequence, at the end of the boot process, the
-allocated ports will be known in the dnsmasq associated to their networks by
-their instance ``hostname``.
-
-The following is an example of an instance creation, showing how its
-``hostname`` populates the ``dns_name`` attribute of the allocated port:
-
-.. code-block:: console
-
- $ openstack server create --image cirros --flavor 42 \
- --nic net-id=37aaff3a-6047-45ac-bf4f-a825e56fd2b3 my_vm
- +--------------------------------------+----------------------------------------------------------------+
- | Field | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | dB45Zvo8Jpfe |
- | config_drive | |
- | created | 2016-02-05T21:35:04Z |
- | flavor | m1.nano (42) |
- | hostId | |
- | id | 66c13cb4-3002-4ab3-8400-7efc2659c363 |
- | image | cirros-0.3.5-x86_64-uec(b9d981eb-d21c-4ce2-9dbc-dd38f3d9015f) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | my_vm |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- | updated | 2016-02-05T21:35:04Z |
- | user_id | 8bb6e578cba24e7db9d3810633124525 |
- +--------------------------------------+----------------------------------------------------------------+
-
- $ neutron port-list --device_id 66c13cb4-3002-4ab3-8400-7efc2659c363
- +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------------+
- | id | name | mac_address | fixed_ips |
- +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------------+
- | b3ecc464-1263-44a7-8c38-2d8a52751773 | | fa:16:3e:a8:ce:b8 | {"subnet_id": "277eca5d-9869-474b-960e-6da5951d09f7", "ip_address": "203.0.113.8"} |
- | | | | {"subnet_id": "eab47748-3f0a-4775-a09f-b0c24bb64bc4", "ip_address":"2001:db8:10::8"} |
- +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------------+
-
- $ neutron port-show b3ecc464-1263-44a7-8c38-2d8a52751773
- +-----------------------+---------------------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+---------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | allowed_address_pairs | |
- | binding:vnic_type | normal |
- | device_id | 66c13cb4-3002-4ab3-8400-7efc2659c363 |
- | device_owner | compute:None |
- | dns_assignment | {"hostname": "my-vm", "ip_address": "203.0.113.8", "fqdn": "my-vm.example.org."} |
- | | {"hostname": "my-vm", "ip_address": "2001:db8:10::8", "fqdn": "my-vm.example.org."} |
- | dns_name | my-vm |
- | extra_dhcp_opts | |
- | fixed_ips | {"subnet_id": "277eca5d-9869-474b-960e-6da5951d09f7", "ip_address": "203.0.113.8"} |
- | | {"subnet_id": "eab47748-3f0a-4775-a09f-b0c24bb64bc4", "ip_address": "2001:db8:10::8"} |
- | id | b3ecc464-1263-44a7-8c38-2d8a52751773 |
- | mac_address | fa:16:3e:a8:ce:b8 |
- | name | |
- | network_id | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 |
- | port_security_enabled | True |
- | security_groups | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 |
- | status | ACTIVE |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +-----------------------+---------------------------------------------------------------------------------------+
-
-In the above example notice that:
-
-* The name given to the instance by the user, ``my_vm``, is sanitized by the
- Compute service and becomes ``my-vm`` as the port's ``dns_name``.
-* The port's ``dns_assignment`` attribute shows that its FQDN is
- ``my-vm.example.org.`` in the Networking service internal DNS, which is
- the result of concatenating the port's ``dns_name`` with the value configured
- in the ``dns_domain`` parameter in ``neutron.conf``, as explained previously.
-* The ``dns_assignment`` attribute also shows that the port's ``hostname`` in
- the Networking service internal DNS is ``my-vm``.
-* Instead of having the Compute service create the port for the instance, the
- user might have created it and assigned a value to its ``dns_name``
- attribute. In this case, the value assigned to the ``dns_name`` attribute
- must be equal to the value that Compute service will assign to the instance's
- ``hostname``, in this example ``my-vm``. Otherwise, the instance boot will
- fail.
-
-Integration with an external DNS service
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Users can also integrate the Networking and Compute services with an external
-DNS. To accomplish this, the users have to:
-
-#. Enable the functionality described in
- :ref:`config-dns-int-dns-resolution`.
-#. Configure an external DNS driver. The Networking service provides a driver
- reference implementation based on the OpenStack DNS service. It is expected
- that third party vendors will provide other implementations in the future.
- For detailed configuration instructions, see
- :ref:`config-dns-int-ext-serv`.
-
-Once the ``neutron-server`` has been configured and restarted, users will have
-functionality that covers three use cases, described in the following sections.
-In each of the use cases described below:
-
-* The examples assume the OpenStack DNS service as the external DNS.
-* A, AAAA and PTR records will be created in the DNS service.
-* Before executing any of the use cases, the user must create in the DNS
- service under his project a DNS zone where the A and AAAA records will be
- created. For the description of the use cases below, it is assumed the zone
- ``example.org.`` was created previously.
-* The PTR records will be created in zones owned by a project with admin
- privileges. See :ref:`config-dns-int-ext-serv` for more details.
-
-.. _config-dns-use-case-1:
-
-Use case 1: Ports are published directly in the external DNS service
---------------------------------------------------------------------
-
-In this case, the user is creating ports or booting instances on a network
-that is accessible externally. The steps to publish the port in the external
-DNS service are the following:
-
-#. Assign a valid domain name to the network's ``dns_domain`` attribute. This
- name must end with a period (``.``).
-#. Boot an instance specifying the externally accessible network.
- Alternatively, create a port on the externally accessible network specifying
- a valid value to its ``dns_name`` attribute. If the port is going to be used
- for an instance boot, the value assigned to ``dns_name`` must be equal to
- the ``hostname`` that the Compute service will assign to the instance.
- Otherwise, the boot will fail.
-
-Once these steps are executed, the port's DNS data will be published in the
-external DNS service. This is an example:
-
-.. code-block:: console
-
- $ neutron net-list
- +--------------------------------------+----------+----------------------------------------------------------+
- | id | name | subnets |
- +--------------------------------------+----------+----------------------------------------------------------+
- | 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a | public | a67cfdf7-9d5d-406f-8a19-3f38e4fc3e74 |
- | | | cbd8c6dc-ca81-457e-9c5d-f8ece7ef67f8 |
- | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 | external | 277eca5d-9869-474b-960e-6da5951d09f7 203.0.113.0/24 |
- | | | eab47748-3f0a-4775-a09f-b0c24bb64bc4 2001:db8:10::/64 |
- | bf2802a0-99a0-4e8c-91e4-107d03f158ea | my-net | 6141b474-56cd-430f-b731-71660bb79b79 192.0.2.64/26 |
- | 38c5e950-b450-4c30-83d4-ee181c28aad3 | private | 43414c53-62ae-49bc-aa6c-c9dd7705818a fda4:653e:71b0::/64 |
- | | | 5b9282a1-0be1-4ade-b478-7868ad2a16ff 192.0.2.0/26 |
- +--------------------------------------+----------+----------------------------------------------------------+
-
- $ neutron net-update 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 --dns_domain example.org.
- Updated network: 37aaff3a-6047-45ac-bf4f-a825e56fd2b3
-
- $ neutron net-show 37aaff3a-6047-45ac-bf4f-a825e56fd2b3
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | |
- | availability_zones | nova |
- | dns_domain | example.org. |
- | id | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 |
- | mtu | 1450 |
- | name | external |
- | port_security_enabled | True |
- | provider:network_type | vlan |
- | provider:physical_network | |
- | provider:segmentation_id | 2016 |
- | router:external | False |
- | shared | True |
- | status | ACTIVE |
- | subnets | eab47748-3f0a-4775-a09f-b0c24bb64bc4 |
- | | 277eca5d-9869-474b-960e-6da5951d09f7 |
- | tenant_id | 04fc2f83966245dba907efb783f8eab9 |
- +---------------------------+--------------------------------------+
-
- $ designate record-list example.org.
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
- | 10a36008-6ecf-47c3-b321-05652a929b04 | SOA | example.org. | ns1.devstack.org. malavall.us.ibm.com. 1454729414 3600 600 86400 3600 |
- | 56ca0b88-e343-4c98-8faa-19746e169baf | NS | example.org. | ns1.devstack.org. |
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
-
- $ neutron port-create 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 --dns_name my-vm
- Created a new port:
- +-----------------------+---------------------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+---------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | allowed_address_pairs | |
- | binding:vnic_type | normal |
- | device_id | |
- | device_owner | |
- | dns_assignment | {"hostname": "my-vm", "ip_address": "203.0.113.9", "fqdn": "my-vm.example.org."} |
- | | {"hostname": "my-vm", "ip_address": "2001:db8:10::9", "fqdn": "my-vm.example.org."} |
- | dns_name | my-vm |
- | fixed_ips | {"subnet_id": "277eca5d-9869-474b-960e-6da5951d09f7", "ip_address": "203.0.113.9"} |
- | | {"subnet_id": "eab47748-3f0a-4775-a09f-b0c24bb64bc4", "ip_address": "2001:db8:10::9"} |
- | id | 04be331b-dc5e-410a-9103-9c8983aeb186 |
- | mac_address | fa:16:3e:0f:4b:e4 |
- | name | |
- | network_id | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 |
- | port_security_enabled | True |
- | security_groups | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 |
- | status | DOWN |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +-----------------------+---------------------------------------------------------------------------------------+
-
- $ designate record-list example.org.
- +--------------------------------------+------+--------------------+-----------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+--------------------+-----------------------------------------------------------------------+
- | 10a36008-6ecf-47c3-b321-05652a929b04 | SOA | example.org. | ns1.devstack.org. malavall.us.ibm.com. 1455563035 3600 600 86400 3600 |
- | 56ca0b88-e343-4c98-8faa-19746e169baf | NS | example.org. | ns1.devstack.org. |
- | 3593591b-181f-4beb-9ab7-67fad7413b37 | A | my-vm.example.org. | 203.0.113.9 |
- | 5649c68f-7a88-48f5-9f87-ccb1f6ae67ca | AAAA | my-vm.example.org. | 2001:db8:10::9 |
- +--------------------------------------+------+--------------------+-----------------------------------------------------------------------+
-
- $ openstack server create --image cirros --flavor 42 \
- --nic port-id=04be331b-dc5e-410a-9103-9c8983aeb186 my_vm
- +--------------------------------------+----------------------------------------------------------------+
- | Field | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | TDc9EpBT3B9W |
- | config_drive | |
- | created | 2016-02-15T19:10:43Z |
- | flavor | m1.nano (42) |
- | hostId | |
- | id | 62c19691-d1c7-4d7b-a88e-9cc4d95d4f41 |
- | image | cirros-0.3.5-x86_64-uec (b9d981eb-d21c-4ce2-9dbc-dd38f3d9015f) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | my_vm |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- | updated | 2016-02-15T19:10:43Z |
- | user_id | 8bb6e578cba24e7db9d3810633124525 |
- +--------------------------------------+----------------------------------------------------------------+
-
- $ openstack server list
- +--------------------------------------+-------+--------+------------+-------------+--------------------------------------+------------+
- | ID | Name | Status | Task State | Power State | Networks | Image Name |
- +--------------------------------------+-------+--------+------------+-------------+--------------------------------------+------------+
- | 62c19691-d1c7-4d7b-a88e-9cc4d95d4f41 | my_vm | ACTIVE | - | Running | external=203.0.113.9, 2001:db8:10::9 | cirros |
- +--------------------------------------+-------+--------+------------+-------------+--------------------------------------+------------+
-
-In this example the port is created manually by the user and then used to boot
-an instance. Notice that:
-
-* The port's data was visible in the DNS service as soon as it was created.
-* See :ref:`config-dns-performance-considerations` for an explanation of
- the potential performance impact associated with this use case.
-
-Following are the PTR records created for this example. Note that for
-IPv4, the value of ipv4_ptr_zone_prefix_size is 24. In the case of IPv6, the
-value of ipv6_ptr_zone_prefix_size is 116. For more details, see
-:ref:`config-dns-int-ext-serv`:
-
-.. code-block:: console
-
- $ designate record-list 113.0.203.in-addr.arpa.
- +--------------------------------------+------+---------------------------+---------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+---------------------------+---------------------------------------------------------------------+
- | ab7ada72-7e64-4bed-913e-04718a80fafc | NS | 113.0.203.in-addr.arpa. | ns1.devstack.org. |
- | 28346a94-790c-4ae1-9f7b-069d98d9efbd | SOA | 113.0.203.in-addr.arpa. | ns1.devstack.org. admin.example.org. 1455563035 3600 600 86400 3600 |
- | cfcaf537-844a-4c1b-9b5f-464ff07dca33 | PTR | 9.113.0.203.in-addr.arpa. | my-vm.example.org. |
- +--------------------------------------+------+---------------------------+---------------------------------------------------------------------+
-
- $ designate record-list 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
- +--------------------------------------+------+---------------------------------------------------------------------------+---------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+---------------------------------------------------------------------------+---------------------------------------------------------------------+
- | d8923354-13eb-4bd9-914a-0a2ae5f95989 | SOA | 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.8.b.d.0.1.0.0.2.ip6.arpa. | ns1.devstack.org. admin.example.org. 1455563036 3600 600 86400 3600 |
- | 72e60acd-098d-41ea-9771-5b6546c9c06f | NS | 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.8.b.d.0.1.0.0.2.ip6.arpa. | ns1.devstack.org. |
- | 877e0215-2ddf-4d01-a7da-47f1092dfd56 | PTR | 9.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.8.b.d.0.1.0.0.2.ip6.arpa. | my-vm.example.org. |
- +--------------------------------------+------+---------------------------------------------------------------------------+---------------------------------------------------------------------+
-
-See :ref:`config-dns-int-ext-serv` for detailed instructions on how
-to create the externally accessible network.
-
-Use case 2: Floating IPs are published with associated port DNS attributes
---------------------------------------------------------------------------
-
-In this use case, the address of a floating IP is published in the external
-DNS service in conjunction with the ``dns_name`` of its associated port and the
-``dns_domain`` of the port's network. The steps to execute in this use case are
-the following:
-
-#. Assign a valid domain name to the network's ``dns_domain`` attribute. This
- name must end with a period (``.``).
-#. Boot an instance or alternatively, create a port specifying a valid value to
- its ``dns_name`` attribute. If the port is going to be used for an instance
- boot, the value assigned to ``dns_name`` must be equal to the ``hostname``
- that the Compute service will assign to the instance. Otherwise, the boot
- will fail.
-#. Create a floating IP and associate it to the port.
-
-Following is an example of these steps:
-
-.. code-block:: console
-
- $ neutron net-update 38c5e950-b450-4c30-83d4-ee181c28aad3 --dns_domain example.org.
- Updated network: 38c5e950-b450-4c30-83d4-ee181c28aad3
-
- $ neutron net-show 38c5e950-b450-4c30-83d4-ee181c28aad3
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | |
- | availability_zones | nova |
- | dns_domain | example.org. |
- | id | 38c5e950-b450-4c30-83d4-ee181c28aad3 |
- | mtu | 1450 |
- | name | private |
- | port_security_enabled | True |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | 43414c53-62ae-49bc-aa6c-c9dd7705818a |
- | | 5b9282a1-0be1-4ade-b478-7868ad2a16ff |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +-------------------------+--------------------------------------+
-
- $ openstack server create --image cirros --flavor 42 \
- --nic net-id=38c5e950-b450-4c30-83d4-ee181c28aad3 my_vm
- +--------------------------------------+----------------------------------------------------------------+
- | Field | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | oTLQLR3Kezmt |
- | config_drive | |
- | created | 2016-02-15T19:27:34Z |
- | flavor | m1.nano (42) |
- | hostId | |
- | id | 43f328bb-b2d1-4cf1-a36f-3b2593397cb1 |
- | image | cirros-0.3.5-x86_64-uec (b9d981eb-d21c-4ce2-9dbc-dd38f3d9015f) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | my_vm |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- | updated | 2016-02-15T19:27:34Z |
- | user_id | 8bb6e578cba24e7db9d3810633124525 |
- +--------------------------------------+----------------------------------------------------------------+
-
- $ openstack server list
- +--------------------------------------+-------+--------+------------+-------------+----------------------------------------------------------+------------+
- | ID | Name | Status | Task State | Power State | Networks | Image Name |
- +--------------------------------------+-------+--------+------------+-------------+----------------------------------------------------------+------------+
- | 43f328bb-b2d1-4cf1-a36f-3b2593397cb1 | my_vm | ACTIVE | - | Running | private=fda4:653e:71b0:0:f816:3eff:fe16:b5f2, 192.0.2.15 | cirros |
- +--------------------------------------+-------+--------+------------+-------------+----------------------------------------------------------+------------+
-
- $ neutron port-list --device_id 43f328bb-b2d1-4cf1-a36f-3b2593397cb1
- +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+
- | id | name | mac_address | fixed_ips |
- +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+
- | da0b1f75-c895-460f-9fc1-4d6ec84cf85f | | fa:16:3e:16:b5:f2 | {"subnet_id": "5b9282a1-0be1-4ade-b478-7868ad2a16ff", "ip_address": "192.0.2.15"} |
- | | | | {"subnet_id": "43414c53-62ae-49bc-aa6c-c9dd7705818a", "ip_address": "fda4:653e:71b0:0:f816:3eff:fe16:b5f2"} |
- +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+
-
- $ neutron port-show da0b1f75-c895-460f-9fc1-4d6ec84cf85f
- +-----------------------+-------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+-------------------------------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | allowed_address_pairs | |
- | binding:vnic_type | normal |
- | device_id | 43f328bb-b2d1-4cf1-a36f-3b2593397cb1 |
- | device_owner | compute:None |
- | dns_assignment | {"hostname": "my-vm", "ip_address": "192.0.2.15", "fqdn": "my-vm.example.org."} |
- | | {"hostname": "my-vm", "ip_address": "fda4:653e:71b0:0:f816:3eff:fe16:b5f2", "fqdn": "my-vm.example.org."} |
- | dns_name | my-vm |
- | extra_dhcp_opts | |
- | fixed_ips | {"subnet_id": "5b9282a1-0be1-4ade-b478-7868ad2a16ff", "ip_address": "192.0.2.15"} |
- | | {"subnet_id": "43414c53-62ae-49bc-aa6c-c9dd7705818a", "ip_address": "fda4:653e:71b0:0:f816:3eff:fe16:b5f2"} |
- | id | da0b1f75-c895-460f-9fc1-4d6ec84cf85f |
- | mac_address | fa:16:3e:16:b5:f2 |
- | name | |
- | network_id | 38c5e950-b450-4c30-83d4-ee181c28aad3 |
- | port_security_enabled | True |
- | security_groups | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 |
- | status | ACTIVE |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +-----------------------+-------------------------------------------------------------------------------------------------------------+
-
- $ designate record-list example.org.
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
- | 10a36008-6ecf-47c3-b321-05652a929b04 | SOA | example.org. | ns1.devstack.org. malavall.us.ibm.com. 1455563783 3600 600 86400 3600 |
- | 56ca0b88-e343-4c98-8faa-19746e169baf | NS | example.org. | ns1.devstack.org. |
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
-
- $ neutron floatingip-create 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a \
- --port_id da0b1f75-c895-460f-9fc1-4d6ec84cf85f
- Created a new floatingip:
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | dns_domain | |
- | dns_name | |
- | fixed_ip_address | 192.0.2.15 |
- | floating_ip_address | 198.51.100.4 |
- | floating_network_id | 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a |
- | id | e78f6eb1-a35f-4a90-941d-87c888d5fcc7 |
- | port_id | da0b1f75-c895-460f-9fc1-4d6ec84cf85f |
- | router_id | 970ebe83-c4a3-4642-810e-43ab7b0c2b5f |
- | status | DOWN |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +---------------------+--------------------------------------+
-
- $ designate record-list example.org.
- +--------------------------------------+------+--------------------+-----------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+--------------------+-----------------------------------------------------------------------+
- | 10a36008-6ecf-47c3-b321-05652a929b04 | SOA | example.org. | ns1.devstack.org. malavall.us.ibm.com. 1455564861 3600 600 86400 3600 |
- | 56ca0b88-e343-4c98-8faa-19746e169baf | NS | example.org. | ns1.devstack.org. |
- | 5ff53fd0-3746-48da-b9c9-77ed3004ec67 | A | my-vm.example.org. | 198.51.100.4 |
- +--------------------------------------+------+--------------------+-----------------------------------------------------------------------+
-
-In this example, notice that the data is published in the DNS service when the
-floating IP is associated to the port.
-
-Following are the PTR records created for this example. Note that for
-IPv4, the value of ``ipv4_ptr_zone_prefix_size`` is 24. For more details, see
-:ref:`config-dns-int-ext-serv`:
-
-.. code-block:: console
-
- $ designate record-list 100.51.198.in-addr.arpa.
- +--------------------------------------+------+----------------------------+---------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+----------------------------+---------------------------------------------------------------------+
- | 2dd0b894-25fa-4563-9d32-9f13bd67f329 | NS | 100.51.198.in-addr.arpa. | ns1.devstack.org. |
- | 47b920f1-5eff-4dfa-9616-7cb5b7cb7ca6 | SOA | 100.51.198.in-addr.arpa. | ns1.devstack.org. admin.example.org. 1455564862 3600 600 86400 3600 |
- | fb1edf42-abba-410c-8397-831f45fd0cd7 | PTR | 4.100.51.198.in-addr.arpa. | my-vm.example.org. |
- +--------------------------------------+------+----------------------------+---------------------------------------------------------------------+
-
-
-Use case 3: Floating IPs are published in the external DNS service
-------------------------------------------------------------------
-
-In this use case, the user assigns ``dns_name`` and ``dns_domain`` attributes
-to a floating IP when it is created. The floating IP data becomes visible in
-the external DNS service as soon as it is created. The floating IP can be
-associated with a port on creation or later on. The following example shows a
-user booting an instance and then creating a floating IP associated to the port
-allocated for the instance:
-
-.. code-block:: console
-
- $ neutron net-show 38c5e950-b450-4c30-83d4-ee181c28aad3
- +-------------------------+--------------------------------------+
- | Field | Value |
- +-------------------------+--------------------------------------+
- | admin_state_up | True |
- | availability_zone_hints | |
- | availability_zones | nova |
- | dns_domain | example.org. |
- | id | 38c5e950-b450-4c30-83d4-ee181c28aad3 |
- | mtu | 1450 |
- | name | private |
- | port_security_enabled | True |
- | router:external | False |
- | shared | False |
- | status | ACTIVE |
- | subnets | 43414c53-62ae-49bc-aa6c-c9dd7705818a |
- | | 5b9282a1-0be1-4ade-b478-7868ad2a16ff |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +-------------------------+--------------------------------------+
-
- $ openstack server create --image cirros --flavor 42 \
- --nic net-id=38c5e950-b450-4c30-83d4-ee181c28aad3 my_vm
- +--------------------------------------+----------------------------------------------------------------+
- | Field | Value |
- +--------------------------------------+----------------------------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | - |
- | OS-SRV-USG:terminated_at | - |
- | accessIPv4 | |
- | accessIPv6 | |
- | adminPass | HLXGznYqXM4J |
- | config_drive | |
- | created | 2016-02-15T19:42:44Z |
- | flavor | m1.nano (42) |
- | hostId | |
- | id | 71fb4ac8-eed8-4644-8113-0641962bb125 |
- | image | cirros-0.3.5-x86_64-uec (b9d981eb-d21c-4ce2-9dbc-dd38f3d9015f) |
- | key_name | - |
- | locked | False |
- | metadata | {} |
- | name | my_vm |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | security_groups | default |
- | status | BUILD |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- | updated | 2016-02-15T19:42:44Z |
- | user_id | 8bb6e578cba24e7db9d3810633124525 |
- +--------------------------------------+----------------------------------------------------------------+
-
- $ openstack server list
- +--------------------------------------+-------+--------+------------+-------------+----------------------------------------------------------+------------+
- | ID | Name | Status | Task State | Power State | Networks | Image Name |
- +--------------------------------------+-------+--------+------------+-------------+----------------------------------------------------------+------------+
- | 71fb4ac8-eed8-4644-8113-0641962bb125 | my_vm | ACTIVE | - | Running | private=fda4:653e:71b0:0:f816:3eff:fe24:8614, 192.0.2.16 | cirros |
- +--------------------------------------+-------+--------+------------+-------------+----------------------------------------------------------+------------+
-
- $ neutron port-list --device_id 71fb4ac8-eed8-4644-8113-0641962bb125
- +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+
- | id | name | mac_address | fixed_ips |
- +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+
- | 1e7033fb-8e9d-458b-89ed-8312cafcfdcb | | fa:16:3e:24:86:14 | {"subnet_id": "5b9282a1-0be1-4ade-b478-7868ad2a16ff", "ip_address": "192.0.2.16"} |
- | | | | {"subnet_id": "43414c53-62ae-49bc-aa6c-c9dd7705818a", "ip_address": "fda4:653e:71b0:0:f816:3eff:fe24:8614"} |
- +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+
-
- $ neutron port-show 1e7033fb-8e9d-458b-89ed-8312cafcfdcb
- +-----------------------+-------------------------------------------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+-------------------------------------------------------------------------------------------------------------+
- | admin_state_up | True |
- | allowed_address_pairs | |
- | binding:vnic_type | normal |
- | device_id | 71fb4ac8-eed8-4644-8113-0641962bb125 |
- | device_owner | compute:None |
- | dns_assignment | {"hostname": "my-vm", "ip_address": "192.0.2.16", "fqdn": "my-vm.example.org."} |
- | | {"hostname": "my-vm", "ip_address": "fda4:653e:71b0:0:f816:3eff:fe24:8614", "fqdn": "my-vm.example.org."} |
- | dns_name | my-vm |
- | extra_dhcp_opts | |
- | fixed_ips | {"subnet_id": "5b9282a1-0be1-4ade-b478-7868ad2a16ff", "ip_address": "192.0.2.16"} |
- | | {"subnet_id": "43414c53-62ae-49bc-aa6c-c9dd7705818a", "ip_address": "fda4:653e:71b0:0:f816:3eff:fe24:8614"} |
- | id | 1e7033fb-8e9d-458b-89ed-8312cafcfdcb |
- | mac_address | fa:16:3e:24:86:14 |
- | name | |
- | network_id | 38c5e950-b450-4c30-83d4-ee181c28aad3 |
- | port_security_enabled | True |
- | security_groups | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 |
- | status | ACTIVE |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +-----------------------+-------------------------------------------------------------------------------------------------------------+
-
- $ designate record-list example.org.
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
- | 10a36008-6ecf-47c3-b321-05652a929b04 | SOA | example.org. | ns1.devstack.org. malavall.us.ibm.com. 1455565110 3600 600 86400 3600 |
- | 56ca0b88-e343-4c98-8faa-19746e169baf | NS | example.org. | ns1.devstack.org. |
- +--------------------------------------+------+--------------+-----------------------------------------------------------------------+
-
- $ neutron floatingip-create 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a \
- --dns_domain example.org. --dns_name my-floatingip
- Created a new floatingip:
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | dns_domain | example.org. |
- | dns_name | my-floatingip |
- | fixed_ip_address | |
- | floating_ip_address | 198.51.100.5 |
- | floating_network_id | 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a |
- | id | 9f23a9c6-eceb-42eb-9f45-beb58c473728 |
- | port_id | |
- | router_id | |
- | status | DOWN |
- | tenant_id | d5660cb1e6934612a01b4fb2fb630725 |
- +---------------------+--------------------------------------+
-
- $ designate record-list example.org.
- +--------------------------------------+------+----------------------------+-----------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+----------------------------+-----------------------------------------------------------------------+
- | 10a36008-6ecf-47c3-b321-05652a929b04 | SOA | example.org. | ns1.devstack.org. malavall.us.ibm.com. 1455566486 3600 600 86400 3600 |
- | 56ca0b88-e343-4c98-8faa-19746e169baf | NS | example.org. | ns1.devstack.org. |
- | 8884c56f-3ef5-446e-ae4d-8053cc8bc2b4 | A | my-floatingip.example.org. | 198.51.100.53 |
- +--------------------------------------+------+----------------------------+-----------------------------------------------------------------------+
-
-Note that in this use case:
-
-* The ``dns_name`` and ``dns_domain`` attributes of a floating IP must be
- specified together on creation. They cannot be assigned to the floating IP
- separately.
-* The ``dns_name`` and ``dns_domain`` of a floating IP have precedence, for
- purposes of being published in the external DNS service, over the
- ``dns_name`` of its associated port and the ``dns_domain`` of the port's
- network, whether they are specified or not. Only the ``dns_name`` and the
- ``dns_domain`` of the floating IP are published in the external DNS service.
-
-Following are the PTR records created for this example. Note that for
-IPv4, the value of ipv4_ptr_zone_prefix_size is 24. For more details, see
-:ref:`config-dns-int-ext-serv`:
-
-.. code-block:: console
-
- $ designate record-list 100.51.198.in-addr.arpa.
- +--------------------------------------+------+----------------------------+---------------------------------------------------------------------+
- | id | type | name | data |
- +--------------------------------------+------+----------------------------+---------------------------------------------------------------------+
- | 2dd0b894-25fa-4563-9d32-9f13bd67f329 | NS | 100.51.198.in-addr.arpa. | ns1.devstack.org. |
- | 47b920f1-5eff-4dfa-9616-7cb5b7cb7ca6 | SOA | 100.51.198.in-addr.arpa. | ns1.devstack.org. admin.example.org. 1455566487 3600 600 86400 3600 |
- | 589a0171-e77a-4ab6-ba6e-23114f2b9366 | PTR | 5.100.51.198.in-addr.arpa. | my-floatingip.example.org. |
- +--------------------------------------+------+----------------------------+---------------------------------------------------------------------+
-
-.. _config-dns-performance-considerations:
-
-Performance considerations
---------------------------
-
-Only for :ref:`config-dns-use-case-1`, if the port binding extension is
-enabled in the Networking service, the Compute service will execute one
-additional port update operation when allocating the port for the instance
-during the boot process. This may have a noticeable adverse effect in the
-performance of the boot process that must be evaluated before adoption of this
-use case.
-
-.. _config-dns-int-ext-serv:
-
-Configuring OpenStack Networking for integration with an external DNS service
------------------------------------------------------------------------------
-
-The first step to configure the integration with an external DNS service is to
-enable the functionality described in :ref:`config-dns-int-dns-resolution`.
-Once this is done, the user has to take the following steps and restart
-``neutron-server``.
-
-#. Edit the ``[default]`` section of ``/etc/neutron/neutron.conf`` and specify
- the external DNS service driver to be used in parameter
- ``external_dns_driver``. The valid options are defined in namespace
- ``neutron.services.external_dns_drivers``. The following example shows how
- to set up the driver for the OpenStack DNS service:
-
- .. code-block:: console
-
- external_dns_driver = designate
-
-#. If the OpenStack DNS service is the target external DNS, the ``[designate]``
- section of ``/etc/neutron/neutron.conf`` must define the following
- parameters:
-
- * ``url``: the OpenStack DNS service public endpoint URL.
- * ``allow_reverse_dns_lookup``: a boolean value specifying whether to enable
- or not the creation of reverse lookup (PTR) records.
- * ``admin_auth_url``: the Identity service admin authorization endpoint url.
- This endpoint will be used by the Networking service to authenticate as an
- admin user to create and update reverse lookup (PTR) zones.
- * ``admin_username``: the admin user to be used by the Networking service to
- create and update reverse lookup (PTR) zones.
- * ``admin_password``: the password of the admin user to be used by
- Networking service to create and update reverse lookup (PTR) zones.
- * ``admin_tenant_name``: the project of the admin user to be used by the
- Networking service to create and update reverse lookup (PTR) zones.
- * ``ipv4_ptr_zone_prefix_size``: the size in bits of the prefix for the IPv4
- reverse lookup (PTR) zones.
- * ``ipv6_ptr_zone_prefix_size``: the size in bits of the prefix for the IPv6
- reverse lookup (PTR) zones.
- * ``insecure``: Disable SSL certificate validation. By default, certificates
- are validated.
- * ``cafile``: Path to a valid Certificate Authority (CA) certificate.
- * ``auth_uri``: the unversioned public endpoint of the Identity service.
- * ``project_domain_id``: the domain ID of the admin user's project.
- * ``user_domain_id``: the domain ID of the admin user to be used by the
- Networking service.
- * ``project_name``: the project of the admin user to be used by the
- Networking service.
- * ``username``: the admin user to be used by the Networking service to
- create and update reverse lookup (PTR) zones.
- * ``password``: the password of the admin user to be used by
- Networking service.
-
- The following is an example:
-
- .. code-block:: console
-
- [designate]
- url = http://192.0.2.240:9001/v2
- auth_uri = http://192.0.2.240:5000
- admin_auth_url = http://192.0.2.240:35357
- admin_username = neutron
- admin_password = PASSWORD
- admin_tenant_name = service
- project_domain_id = default
- user_domain_id = default
- project_name = service
- username = neutron
- password = PASSWORD
- allow_reverse_dns_lookup = True
- ipv4_ptr_zone_prefix_size = 24
- ipv6_ptr_zone_prefix_size = 116
- cafile = /etc/ssl/certs/my_ca_cert
-
-Configuration of the externally accessible network for use case 1
------------------------------------------------------------------
-
-In :ref:`config-dns-use-case-1`, the externally accessible network must
-meet the following requirements:
-
-* The network cannot have attribute ``router:external`` set to ``True``.
-* The network type can be FLAT, VLAN, GRE, VXLAN or GENEVE.
-* For network types VLAN, GRE, VXLAN or GENEVE, the segmentation ID must be
- outside the ranges assigned to project networks.
diff --git a/doc/networking-guide/source/config-dns-res.rst b/doc/networking-guide/source/config-dns-res.rst
deleted file mode 100644
index 9756174998..0000000000
--- a/doc/networking-guide/source/config-dns-res.rst
+++ /dev/null
@@ -1,97 +0,0 @@
-.. _config-dns-res:
-
-=============================
-Name resolution for instances
-=============================
-
-The Networking service offers several methods to configure name
-resolution (DNS) for instances. Most deployments should implement
-case 1 or 2. Case 3 requires security considerations to prevent
-leaking internal DNS information to instances.
-
-Case 1: Each virtual network uses unique DNS resolver(s)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In this case, the DHCP agent offers one or more unique DNS resolvers
-to instances via DHCP on each virtual network. You can configure a DNS
-resolver when creating or updating a subnet. To configure more than
-one DNS resolver, use a comma between each value.
-
-* Configure a DNS resolver when creating a subnet.
-
- .. code-block:: console
-
- $ neutron subnet-create --dns-nameserver DNS_RESOLVER
-
- Replace ``DNS_RESOLVER`` with the IP address of a DNS resolver reachable
- from the virtual network. For example:
-
- .. code-block:: console
-
- $ neutron subnet-create --dns-nameserver 203.0.113.8,198.51.100.53
-
- .. note::
-
- This command requires other options outside the scope of this
- content.
-
-* Configure a DNS resolver on an existing subnet.
-
- .. code-block:: console
-
- $ neutron subnet-update --dns-nameserver DNS_RESOLVER SUBNET_ID_OR_NAME
-
- Replace ``DNS_RESOLVER`` with the IP address of a DNS resolver reachable
- from the virtual network and ``SUBNET_ID_OR_NAME`` with the UUID or name
- of the subnet. For example, using the ``selfservice`` subnet:
-
- .. code-block:: console
-
- $ neutron subnet-update --dns-nameserver 203.0.113.8,198.51.100.53 selfservice
-
-Case 2: All virtual networks use same DNS resolver(s)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In this case, the DHCP agent offers the same DNS resolver(s) to
-instances via DHCP on all virtual networks.
-
-* In the ``dhcp_agent.ini`` file, configure one or more DNS resolvers. To
- configure more than one DNS resolver, use a comma between each value.
-
- .. code-block:: ini
-
- [DEFAULT]
- dnsmasq_dns_servers = DNS_RESOLVER
-
- Replace ``DNS_RESOLVER`` with the IP address of a DNS resolver reachable
- from all virtual networks. For example:
-
- .. code-block:: ini
-
- [DEFAULT]
- dnsmasq_dns_servers = 203.0.113.8, 198.51.100.53
-
- .. note::
-
- You must configure this option for all eligible DHCP agents and
- restart them to activate the values.
-
-Case 3: All virtual networks use DNS resolver(s) on the host
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In this case, the DHCP agent offers the DNS resolver(s) in the
-``resolv.conf`` file on the host running the DHCP agent via DHCP to
-instances on all virtual networks.
-
-* In the ``dhcp_agent.ini`` file, enable advertisement of the DNS resolver(s)
- on the host.
-
- .. code-block:: ini
-
- [DEFAULT]
- dnsmasq_local_resolv = True
-
- .. note::
-
- You must configure this option for all eligible DHCP agents and
- restart them to activate the values.
diff --git a/doc/networking-guide/source/config-dvr-ha-snat.rst b/doc/networking-guide/source/config-dvr-ha-snat.rst
deleted file mode 100644
index e08ba4fd26..0000000000
--- a/doc/networking-guide/source/config-dvr-ha-snat.rst
+++ /dev/null
@@ -1,195 +0,0 @@
-.. _config-dvr-snat-ha-ovs:
-
-=====================================
-Distributed Virtual Routing with VRRP
-=====================================
-
-:ref:`deploy-ovs-ha-dvr` supports augmentation
-using Virtual Router Redundancy Protocol (VRRP). Using this configuration,
-virtual routers support both the ``--distributed`` and ``--ha`` options.
-
-Similar to legacy HA routers, DVR/SNAT HA routers provide a quick fail over of
-the SNAT service to a backup DVR/SNAT router on an l3-agent running on a
-different node.
-
-SNAT high availability is implemented in a manner similar to the
-:ref:`deploy-lb-ha-vrrp` and :ref:`deploy-ovs-ha-vrrp` examples where
-``keepalived`` uses VRRP to provide quick failover of SNAT services.
-
-During normal operation, the master router periodically transmits *heartbeat*
-packets over a hidden project network that connects all HA routers for a
-particular project.
-
-If the DVR/SNAT backup router stops receiving these packets, it assumes failure
-of the master DVR/SNAT router and promotes itself to master router by
-configuring IP addresses on the interfaces in the ``snat`` namespace. In
-environments with more than one backup router, the rules of VRRP are followed
-to select a new master router.
-
-.. warning::
-
- There is a known bug with ``keepalived`` v1.2.15 and earlier which can
- cause packet loss when ``max_l3_agents_per_router`` is set to 3 or more.
- Therefore, we recommend that you upgrade to ``keepalived`` v1.2.16
- or greater when using this feature.
-
-.. note::
-
- Experimental feature or incomplete documentation.
-
-
-Configuration example
-~~~~~~~~~~~~~~~~~~~~~
-
-The basic deployment model consists of one controller node, two or more network
-nodes, and multiple computes nodes.
-
-Controller node configuration
------------------------------
-
-#. Add the following to ``/etc/neutron/neutron.conf``:
-
- .. code-block:: ini
-
- [DEFAULT]
- core_plugin = ml2
- service_plugins = router
- allow_overlapping_ips = True
- router_distributed = True
- l3_ha = True
- l3_ha_net_cidr = 169.254.192.0/18
- max_l3_agents_per_router = 3
-
- When the ``router_distributed = True`` flag is configured, routers created
- by all users are distributed. Without it, only privileged users can create
- distributed routers by using ``--distributed True``.
-
- Similarly, when the ``l3_ha = True`` flag is configured, routers created
- by all users default to HA.
-
- It follows that with these two flags set to ``True`` in the configuration
- file, routers created by all users will default to distributed HA routers
- (DVR HA).
-
- The same can explicitly be accomplished by a user with administrative
- credentials setting the flags in the :command:`neutron router-create`
- command:
-
-
- .. code-block:: console
-
- $ neutron router-create name-of-router --distributed=True --ha=True
-
- .. note::
-
- The *max_l3_agents_per_router* determine the number of backup
- DVR/SNAT routers which will be instantiated.
-
-#. Add the following to ``/etc/neutron/plugins/ml2/ml2_conf.ini``:
-
- .. code-block:: ini
-
- [ml2]
- type_drivers = flat,vxlan
- tenant_network_types = vxlan
- mechanism_drivers = openvswitch,l2population
- extension_drivers = port_security
-
- [ml2_type_flat]
- flat_networks = external
-
- [ml2_type_vxlan]
- vni_ranges = MIN_VXLAN_ID:MAX_VXLAN_ID
-
- Replace ``MIN_VXLAN_ID`` and ``MAX_VXLAN_ID`` with VXLAN ID minimum and
- maximum values suitable for your environment.
-
- .. note::
-
- The first value in the ``tenant_network_types`` option becomes the
- default project network type when a regular user creates a network.
-
-Network nodes
--------------
-
-#. Configure the Open vSwitch agent. Add the following to
- ``/etc/neutron/plugins/ml2/ml2_conf.ini``:
-
- .. code-block:: ini
-
- [ovs]
- local_ip = TUNNEL_INTERFACE_IP_ADDRESS
- bridge_mappings = external:br-ex
-
- [agent]
- enable_distributed_routing = True
- tunnel_types = vxlan
- l2_population = True
-
- Replace ``TUNNEL_INTERFACE_IP_ADDRESS`` with the IP address of the interface
- that handles VXLAN project networks.
-
-#. Configure the L3 agent. Add the following to ``/etc/neutron/l3_agent.ini``:
-
- .. code-block:: ini
-
- [DEFAULT]
- ha_vrrp_auth_password = password
- interface_driver = openvswitch
- external_network_bridge =
- agent_mode = dvr_snat
-
- .. note::
-
- The ``external_network_bridge`` option intentionally contains
- no value.
-
-Compute nodes
--------------
-
-#. Configure the Open vSwitch agent. Add the following to
- ``/etc/neutron/plugins/ml2/ml2_conf.ini``:
-
- .. code-block:: ini
-
- [ovs]
- local_ip = TUNNEL_INTERFACE_IP_ADDRESS
- bridge_mappings = external:br-ex
-
- [agent]
- enable_distributed_routing = True
- tunnel_types = vxlan
- l2_population = True
-
- [securitygroup]
- firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-
-#. Configure the L3 agent. Add the following to ``/etc/neutron/l3_agent.ini``:
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = openvswitch
- external_network_bridge =
- agent_mode = dvr
-
- Replace ``TUNNEL_INTERFACE_IP_ADDRESS`` with the IP address of the interface
- that handles VXLAN project networks.
-
-Keepalived VRRP health check
-----------------------------
-
-.. include:: shared/keepalived-vrrp-healthcheck.txt
-
-Known limitations
-~~~~~~~~~~~~~~~~~
-
-* Migrating a router from distributed only, HA only, or legacy to distributed
- HA is not supported at this time. The router must be created as distributed
- HA.
- The reverse direction is also not supported. You cannot reconfigure a
- distributed HA router to be only distributed, only HA, or legacy.
-
-* There are certain scenarios where l2pop and distributed HA routers do not
- interact in an expected manner. These situations are the same that affect HA
- only routers and l2pop.
diff --git a/doc/networking-guide/source/config-ipam.rst b/doc/networking-guide/source/config-ipam.rst
deleted file mode 100644
index de7b79c411..0000000000
--- a/doc/networking-guide/source/config-ipam.rst
+++ /dev/null
@@ -1,49 +0,0 @@
-.. _config-ipam:
-
-==================
-IPAM configuration
-==================
-
-.. note::
-
- Experimental feature or incomplete documentation.
-
-Starting with the Liberty release, OpenStack Networking includes a pluggable
-interface for the IP Address Management (IPAM) function. This interface creates
-a driver framework for the allocation and de-allocation of subnets and IP
-addresses, enabling the integration of alternate IPAM implementations or
-third-party IP Address Management systems.
-
-The basics
-~~~~~~~~~~
-
-In Liberty and Mitaka, the IPAM implementation within OpenStack Networking
-provided a pluggable and non-pluggable flavor. As of Newton, the non-pluggable
-flavor is no longer available. Instead, it is completely replaced with a
-reference driver implementation of the pluggable framework. All data will
-be automatically migrated during the upgrade process, unless you have
-previously configured a pluggable IPAM driver. In that case, no migration
-is necessary.
-
-To configure a driver other than the reference driver, specify it
-in the ``neutron.conf`` file. Do this after the migration is
-complete. There is no need to specify any value if you wish to use the
-reference driver.
-
-.. code-block:: ini
-
- ipam_driver = ipam-driver-name
-
-There is no need to specify any value if you wish to use the reference
-driver, though specifying ``internal`` will explicitly choose the reference
-driver. The documentation for any alternate drivers will include the value to
-use when specifying that driver.
-
-Known limitations
-~~~~~~~~~~~~~~~~~
-
-* The driver interface is designed to allow separate drivers for each
- subnet pool. However, the current implementation allows only a single
- IPAM driver system-wide.
-* Third-party drivers must provide their own migration mechanisms to convert
- existing OpenStack installations to their IPAM.
diff --git a/doc/networking-guide/source/config-ipv6.rst b/doc/networking-guide/source/config-ipv6.rst
deleted file mode 100644
index 480f5d7f21..0000000000
--- a/doc/networking-guide/source/config-ipv6.rst
+++ /dev/null
@@ -1,750 +0,0 @@
-.. _config-ipv6:
-
-====
-IPv6
-====
-
-This section describes the following items:
-
-* How to enable dual-stack (IPv4 and IPv6 enabled) instances.
-* How those instances receive an IPv6 address.
-* How those instances communicate across a router to other subnets or
- the internet.
-* How those instances interact with other OpenStack services.
-
-Enabling a dual-stack network in OpenStack Networking simply requires
-creating a subnet with the ``ip_version`` field set to ``6``, then the
-IPv6 attributes (``ipv6_ra_mode`` and ``ipv6_address_mode``) set. The
-``ipv6_ra_mode`` and ``ipv6_address_mode`` will be described in detail in
-the next section. Finally, the subnets ``cidr`` needs to be provided.
-
-This section does not include the following items:
-
-* Single stack IPv6 project networking
-* OpenStack control communication between servers and services over an IPv6
- network.
-* Connection to the OpenStack APIs via an IPv6 transport network
-* IPv6 multicast
-* IPv6 support in conjunction with any out of tree routers, switches, services
- or agents whether in physical or virtual form factors.
-
-Neutron subnets and the IPv6 API attributes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As of Juno, the OpenStack Networking service (neutron) provides two
-new attributes to the subnet object, which allows users of the API to
-configure IPv6 subnets.
-
-There are two IPv6 attributes:
-
-* ``ipv6_ra_mode``
-* ``ipv6_address_mode``
-
-These attributes can be set to the following values:
-
-* ``slaac``
-* ``dhcpv6-stateful``
-* ``dhcpv6-stateless``
-
-The attributes can also be left unset.
-
-
-IPv6 addressing
----------------
-
-The ``ipv6_address_mode`` attribute is used to control how addressing is
-handled by OpenStack. There are a number of different ways that guest
-instances can obtain an IPv6 address, and this attribute exposes these
-choices to users of the Networking API.
-
-
-Router advertisements
----------------------
-
-The ``ipv6_ra_mode`` attribute is used to control router
-advertisements for a subnet.
-
-The IPv6 Protocol uses Internet Control Message Protocol packets
-(ICMPv6) as a way to distribute information about networking. ICMPv6
-packets with the type flag set to 134 are called "Router
-Advertisement" packets, which contain information about the router
-and the route that can be used by guest instances to send network
-traffic.
-
-The ``ipv6_ra_mode`` is used to specify if the Networking service should
-generate Router Advertisement packets for a subnet.
-
-ipv6_ra_mode and ipv6_address_mode combinations
------------------------------------------------
-
-.. list-table::
- :header-rows: 1
- :widths: 10 10 10 10 60
-
- * - ipv6 ra mode
- - ipv6 address mode
- - radvd A,M,O
- - External Router A,M,O
- - Description
- * - *N/S*
- - *N/S*
- - Off
- - Not Defined
- - Backwards compatibility with pre-Juno IPv6 behavior.
- * - *N/S*
- - slaac
- - Off
- - 1,0,0
- - Guest instance obtains IPv6 address from non-OpenStack router using SLAAC.
- * - *N/S*
- - dhcpv6-stateful
- - Off
- - 0,1,1
- - Not currently implemented in the reference implementation.
- * - *N/S*
- - dhcpv6-stateless
- - Off
- - 1,0,1
- - Not currently implemented in the reference implementation.
- * - slaac
- - *N/S*
- - 1,0,0
- - Off
- - Not currently implemented in the reference implementation.
- * - dhcpv6-stateful
- - *N/S*
- - 0,1,1
- - Off
- - Not currently implemented in the reference implementation.
- * - dhcpv6-stateless
- - *N/S*
- - 1,0,1
- - Off
- - Not currently implemented in the reference implementation.
- * - slaac
- - slaac
- - 1,0,0
- - Off
- - Guest instance obtains IPv6 address from OpenStack managed radvd using SLAAC.
- * - dhcpv6-stateful
- - dhcpv6-stateful
- - 0,1,1
- - Off
- - Guest instance obtains IPv6 address from dnsmasq using DHCPv6
- stateful and optional info from dnsmasq using DHCPv6.
- * - dhcpv6-stateless
- - dhcpv6-stateless
- - 1,0,1
- - Off
- - Guest instance obtains IPv6 address from OpenStack managed
- radvd using SLAAC and optional info from dnsmasq using
- DHCPv6.
- * - slaac
- - dhcpv6-stateful
- -
- -
- - *Invalid combination.*
- * - slaac
- - dhcpv6-stateless
- -
- -
- - *Invalid combination.*
- * - dhcpv6-stateful
- - slaac
- -
- -
- - *Invalid combination.*
- * - dhcpv6-stateful
- - dhcpv6-stateless
- -
- -
- - *Invalid combination.*
- * - dhcpv6-stateless
- - slaac
- -
- -
- - *Invalid combination.*
- * - dhcpv6-stateless
- - dhcpv6-stateful
- -
- -
- - *Invalid combination.*
-
-Project network considerations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Dataplane
----------
-
-Both the Linux bridge and the Open vSwitch dataplane modules support
-forwarding IPv6
-packets amongst the guests and router ports. Similar to IPv4, there is no
-special configuration or setup required to enable the dataplane to properly
-forward packets from the source to the destination using IPv6. Note that these
-dataplanes will forward Link-local Address (LLA) packets between hosts on the
-same network just fine without any participation or setup by OpenStack
-components after the ports are all connected and MAC addresses learned.
-
-Addresses for subnets
----------------------
-
-There are three methods currently implemented for a subnet to get its
-``cidr`` in OpenStack:
-
-#. Direct assignment during subnet creation via command line or Horizon
-#. Referencing a subnet pool during subnet creation
-#. Using a Prefix Delegation (PD) client to request a prefix for a
- subnet from a PD server
-
-In the future, additional techniques could be used to allocate subnets
-to projects, for example, use of an external IPAM module.
-
-Address modes for ports
------------------------
-
-.. note::
-
- An external DHCPv6 server in theory could override the full
- address OpenStack assigns based on the EUI-64 address, but that
- would not be wise as it would not be consistent through the system.
-
-IPv6 supports three different addressing schemes for address configuration and
-for providing optional network information.
-
-Stateless Address Auto Configuration (SLAAC)
- Address configuration using Router Advertisement (RA).
-
-DHCPv6-stateless
- Address configuration using RA and optional information
- using DHCPv6.
-
-DHCPv6-stateful
- Address configuration and optional information using DHCPv6.
-
-OpenStack can be setup such that OpenStack Networking directly
-provides RA, DHCP
-relay and DHCPv6 address and optional information for their networks
-or this can be delegated to external routers and services based on the
-drivers that are in use. There are two neutron subnet attributes -
-``ipv6_ra_mode`` and ``ipv6_address_mode`` – that determine how IPv6
-addressing and network information is provided to project instances:
-
-* ``ipv6_ra_mode``: Determines who sends RA.
-* ``ipv6_address_mode``: Determines how instances obtain IPv6 address,
- default gateway, or optional information.
-
-For the above two attributes to be effective, ``enable_dhcp`` of the
-subnet object must be set to True.
-
-Using SLAAC for addressing
---------------------------
-
-When using SLAAC, the currently supported combinations for ``ipv6_ra_mode`` and
-``ipv6_address_mode`` are as follows.
-
-.. list-table::
- :header-rows: 1
- :widths: 10 10 50
-
- * - ipv6_ra_mode
- - ipv6_address_mode
- - Result
- * - Not specified.
- - SLAAC
- - Addresses are assigned using EUI-64, and an external router
- will be used for routing.
- * - SLAAC
- - SLAAC
- - Address are assigned using EUI-64, and OpenStack Networking
- provides routing.
-
-Setting ``ipv6_ra_mode`` to ``slaac`` will result in OpenStack Networking
-routers being configured to send RA packets, when they are created.
-This results in the following values set for the address configuration
-flags in the RA messages:
-
-* Auto Configuration Flag = 1
-* Managed Configuration Flag = 0
-* Other Configuration Flag = 0
-
-New or existing neutron networks that contain a SLAAC enabled IPv6 subnet will
-result in all neutron ports attached to the network receiving IPv6 addresses.
-This is because when RA broadcast messages are sent out on a neutron
-network, they are received by all IPv6 capable ports on the network,
-and each port will then configure an IPv6 address based on the
-information contained in the RA packet. In some cases, an IPv6 SLAAC
-address will be added to a port, in addition to other IPv4 and IPv6 addresses
-that the port already has been assigned.
-
-DHCPv6
-------
-
-For DHCPv6, the currently supported combinations are as
-follows:
-
-.. list-table::
- :header-rows: 1
- :widths: 10 10 50
-
- * - ipv6_ra_mode
- - ipv6_address_mode
- - Result
- * - DHCPv6-stateless
- - DHCPv6-stateless
- - Addresses are assigned through RAs (see SLAAC above) and optional
- information is delivered through DHCPv6.
- * - DHCPv6-stateful
- - DHCPv6-stateful
- - Addresses and optional information are assigned using DHCPv6.
-
-Setting DHCPv6-stateless for ``ipv6_ra_mode`` configures the neutron
-router with radvd agent to send RAs. The list below captures the
-values set for the address configuration flags in the RA packet in
-this scenario. Similarly, setting DHCPv6-stateless for
-``ipv6_address_mode`` configures neutron DHCP implementation to provide
-the additional network information.
-
-* Auto Configuration Flag = 1
-* Managed Configuration Flag = 0
-* Other Configuration Flag = 1
-
-Setting DHCPv6-stateful for ``ipv6_ra_mode`` configures the neutron
-router with radvd agent to send RAs. The list below captures the
-values set for the address configuration flags in the RA packet in
-this scenario. Similarly, setting DHCPv6-stateful for
-``ipv6_address_mode`` configures neutron DHCP implementation to provide
-addresses and additional network information through DHCPv6.
-
-* Auto Configuration Flag = 0
-* Managed Configuration Flag = 1
-* Other Configuration Flag = 1
-
-Router support
-~~~~~~~~~~~~~~
-
-The behavior of the neutron router for IPv6 is different than for IPv4 in
-a few ways.
-
-Internal router ports, that act as default gateway ports for a network, will
-share a common port for all IPv6 subnets associated with the network. This
-implies that there will be an IPv6 internal router interface with multiple
-IPv6 addresses from each of the IPv6 subnets associated with the network and a
-separate IPv4 internal router interface for the IPv4 subnet. On the other
-hand, external router ports are allowed to have a dual-stack configuration
-with both an IPv4 and an IPv6 address assigned to them.
-
-Neutron project networks that are assigned Global Unicast Address (GUA)
-prefixes and addresses don’t require NAT on the neutron router external gateway
-port to access the outside world. As a consequence of the lack of NAT the
-external router port doesn’t require a GUA to send and receive to the external
-networks. This implies a GUA IPv6 subnet prefix is not necessarily needed for
-the neutron external network. By default, a IPv6 LLA associated with the
-external gateway port can be used for routing purposes. To handle this
-scenario, the implementation of router-gateway-set API in neutron has been
-modified so that an IPv6 subnet is not required for the external network that
-is associated with the neutron router. The LLA address of the upstream router
-can be learned in two ways.
-
-#. In the absence of an upstream RA support, ``ipv6_gateway`` flag can be set
- with the external router gateway LLA in the neutron L3 agent configuration
- file. This also requires that no subnet is associated with that port.
-#. The upstream router can send an RA and the neutron router will
- automatically learn the next-hop LLA, provided again that no subnet is
- assigned and the ``ipv6_gateway`` flag is not set.
-
-Effectively the ``ipv6_gateway`` flag takes precedence over an RA that
-is received from the upstream router. If it is desired to use a GUA
-next hop that is accomplished by allocating a subnet to the external
-router port and assigning the upstream routers GUA address as the
-gateway for the subnet.
-
-.. note::
-
- It should be possible for projects to communicate with each other
- on an isolated network (a network without a router port) using LLA
- with little to no participation on the part of OpenStack. The authors
- of this section have not proven that to be true for all scenarios.
-
-.. note::
-
- When using the neutron L3 agent in a configuration where it is
- auto-configuring an IPv6 address via SLAAC, and the agent is
- learning its default IPv6 route from the ICMPv6 Router Advertisement,
- it may be necessary to set the
- ``net.ipv6.conf..accept_ra`` sysctl to the
- value ``2`` in order for routing to function correctly.
- For a more detailed description, please see the `bug `__.
-
-
-Neutron's Distributed Router feature and IPv6
----------------------------------------------
-
-IPv6 does work when the Distributed Virtual Router functionality is enabled,
-but all ingress/egress traffic is via the centralized router (hence, not
-distributed). More work is required to fully enable this functionality.
-
-
-Advanced services
-~~~~~~~~~~~~~~~~~
-
-VPNaaS
-------
-
-VPNaaS supports IPv6, but support in Kilo and prior releases will have
-some bugs that may limit how it can be used. More thorough and
-complete testing and bug fixing is being done as part of the Liberty
-release. IPv6-based VPN-as-a-Service is configured similar to the IPv4
-configuration. Either or both the ``peer_address`` and the
-``peer_cidr`` can specified as an IPv6 address. The choice of
-addressing modes and router modes described above should not impact
-support.
-
-
-LBaaS
------
-
-TODO
-
-FWaaS
------
-
-FWaaS allows creation of IPv6 based rules.
-
-NAT & Floating IPs
-------------------
-
-At the current time OpenStack Networking does not provide any facility
-to support any flavor of NAT with IPv6. Unlike IPv4 there is no
-current embedded support for floating IPs with IPv6. It is assumed
-that the IPv6 addressing amongst the projects is using GUAs with no
-overlap across the projects.
-
-Security considerations
-~~~~~~~~~~~~~~~~~~~~~~~
-
-.. todo:: Initially this is probably just stating the security group rules
- relative to IPv6 that are applied. Need some help for these
-
-Configuring interfaces of the guest
------------------------------------
-
-OpenStack currently doesn't support the privacy extensions defined by RFC 4941.
-The interface identifier and DUID used must be directly derived from the MAC
-as described in RFC 2373. The compute hosts must not be setup to utilize the
-privacy extensions when generating their interface identifier.
-
-There is no provisions for an IPv6-based metadata service similar to what is
-provided for IPv4. In the case of dual stacked guests though it is always
-possible to use the IPv4 metadata service instead.
-
-Unlike IPv4 the MTU of a given network can be conveyed in the RA messages sent
-by the router as well as in the DHCP messages.
-
-OpenStack control & management network considerations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As of the Kilo release, considerable effort has gone in to ensuring
-the project network can handle dual stack IPv6 and IPv4 transport
-across the variety of configurations described above. OpenStack control
-network can be run in a dual stack configuration and OpenStack API
-endpoints can be accessed via an IPv6 network. At this time, Open vSwitch
-(OVS) tunnel types - STT, VXLAN, GRE, support both IPv4 and IPv6 endpoints.
-
-
-Prefix delegation
-~~~~~~~~~~~~~~~~~
-
-From the Liberty release onwards, OpenStack Networking supports IPv6 prefix
-delegation. This section describes the configuration and workflow steps
-necessary to use IPv6 prefix delegation to provide automatic allocation of
-subnet CIDRs. This allows you as the OpenStack administrator to rely on an
-external (to the OpenStack Networking service) DHCPv6 server to manage your
-project network prefixes.
-
-.. note::
-
- Prefix delegation became available in the Liberty release, it is
- not available in the Kilo release. HA and DVR routers
- are not currently supported by this feature.
-
-Configuring OpenStack Networking for prefix delegation
-------------------------------------------------------
-
-To enable prefix delegation, edit the ``/etc/neutron/neutron.conf`` file.
-
-.. code-block:: console
-
- ipv6_pd_enabled = True
-
-.. note::
-
- If you are not using the default dibbler-based driver for prefix
- delegation, then you also need to set the driver in
- ``/etc/neutron/neutron.conf``:
-
- .. code-block:: console
-
- pd_dhcp_driver =
-
- Drivers other than the default one may require extra configuration,
- please refer to :ref:`extra-driver-conf`
-
-This tells OpenStack Networking to use the prefix delegation mechanism for
-subnet allocation when the user does not provide a CIDR or subnet pool id when
-creating a subnet.
-
-Requirements
-------------
-
-To use this feature, you need a prefix delegation capable DHCPv6 server that is
-reachable from your OpenStack Networking node(s). This could be software
-running on the OpenStack Networking node(s) or elsewhere, or a physical router.
-For the purposes of this guide we are using the open-source DHCPv6 server,
-Dibbler. Dibbler is available in many Linux package managers, or from source at
-`tomaszmrugalski/dibbler `_.
-
-When using the reference implementation of the OpenStack Networking prefix
-delegation driver, Dibbler must also be installed on your OpenStack Networking
-node(s) to serve as a DHCPv6 client. Version 1.0.1 or higher is required.
-
-This guide assumes that you are running a Dibbler server on the network node
-where the external network bridge exists. If you already have a prefix
-delegation capable DHCPv6 server in place, then you can skip the following
-section.
-
-Configuring the Dibbler server
-------------------------------
-
-After installing Dibbler, edit the ``/etc/dibbler/server.conf`` file:
-
-.. code-block:: none
-
- script "/var/lib/dibbler/pd-server.sh"
-
- iface "br-ex" {
- pd-class {
- pd-pool 2001:db8:2222::/48
- pd-length 64
- }
- }
-
-The options used in the configuration file above are:
-
-- ``script``
- Points to a script to be run when a prefix is delegated or
- released. This is only needed if you want instances on your
- subnets to have external network access. More on this below.
-- ``iface``
- The name of the network interface on which to listen for
- prefix delegation messages.
-- ``pd-pool``
- The larger prefix from which you want your delegated
- prefixes to come. The example given is sufficient if you do
- not need external network access, otherwise a unique
- globally routable prefix is necessary.
-- ``pd-length``
- The length that delegated prefixes will be. This must be
- 64 to work with the current OpenStack Networking reference implementation.
-
-To provide external network access to your instances, your Dibbler server also
-needs to create new routes for each delegated prefix. This is done using the
-script file named in the config file above. Edit the
-``/var/lib/dibbler/pd-server.sh`` file:
-
-.. code-block:: bash
-
- if [ "$PREFIX1" != "" ]; then
- if [ "$1" == "add" ]; then
- sudo ip -6 route add ${PREFIX1}/64 via $REMOTE_ADDR dev $IFACE
- fi
- if [ "$1" == "delete" ]; then
- sudo ip -6 route del ${PREFIX1}/64 via $REMOTE_ADDR dev $IFACE
- fi
- fi
-
-The variables used in the script file above are:
-
-- ``$PREFIX1``
- The prefix being added/deleted by the Dibbler server.
-- ``$1``
- The operation being performed.
-- ``$REMOTE_ADDR``
- The IP address of the requesting Dibbler client.
-- ``$IFACE``
- The network interface upon which the request was received.
-
-The above is all you need in this scenario, but more information on
-installing, configuring, and running Dibbler is available in the Dibbler user
-guide, at `Dibbler – a portable DHCPv6
-`_.
-
-To start your Dibbler server, run:
-
-.. code-block:: console
-
- # dibbler-server run
-
-Or to run in headless mode:
-
-.. code-block:: console
-
- # dibbler-server start
-
-When using DevStack, it is important to start your server after the
-``stack.sh`` script has finished to ensure that the required network
-interfaces have been created.
-
-User workflow
--------------
-
-First, create a network and IPv6 subnet:
-
-.. code-block:: console
-
- $ openstack network create ipv6-pd
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2017-01-25T19:26:01Z |
- | description | |
- | headers | |
- | id | 4b782725-6abe-4a2d-b061-763def1bb029 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | ipv6-pd |
- | port_security_enabled | True |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 46 |
- | revision_number | 3 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- | updated_at | 2017-01-25T19:26:01Z |
- +---------------------------+--------------------------------------+
-
- $ openstack subnet create --ip-version 6 --ipv6-ra-mode slaac \
- --ipv6-address-mode slaac --use-default-subnet-pool \
- --network ipv6-pd ipv6-pd-1
- +------------------------+--------------------------------------+
- | Field | Value |
- +------------------------+--------------------------------------+
- | allocation_pools | ::2-::ffff:ffff:ffff:ffff |
- | cidr | ::/64 |
- | created_at | 2017-01-25T19:31:53Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | ::1 |
- | headers | |
- | host_routes | |
- | id | 1319510d-c92c-4532-bf5d-8bcf3da761a1 |
- | ip_version | 6 |
- | ipv6_address_mode | slaac |
- | ipv6_ra_mode | slaac |
- | name | ipv6-pd-1 |
- | network_id | 4b782725-6abe-4a2d-b061-763def1bb029 |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | revision_number | 2 |
- | service_types | |
- | subnetpool_id | prefix_delegation |
- | updated_at | 2017-01-25T19:31:53Z |
- | use_default_subnetpool | True |
- +------------------------+--------------------------------------+
-
-The subnet is initially created with a temporary CIDR before one can be
-assigned by prefix delegation. Any number of subnets with this temporary CIDR
-can exist without raising an overlap error. The subnetpool_id is automatically
-set to ``prefix_delegation``.
-
-To trigger the prefix delegation process, create a router interface between
-this subnet and a router with an active interface on the external network:
-
-.. code-block:: console
-
- $ openstack router add subnet router1 ipv6-pd-1
-
-The prefix delegation mechanism then sends a request via the external network
-to your prefix delegation server, which replies with the delegated prefix. The
-subnet is then updated with the new prefix, including issuing new IP addresses
-to all ports:
-
-.. code-block:: console
-
- $ openstack subnet show ipv6-pd-1
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 2001:db8:2222:6977::2-2001:db8:2222: |
- | | 6977:ffff:ffff:ffff:ffff |
- | cidr | 2001:db8:2222:6977::/64 |
- | created_at | 2017-01-25T19:31:53Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 2001:db8:2222:6977::1 |
- | host_routes | |
- | id | 1319510d-c92c-4532-bf5d-8bcf3da761a1 |
- | ip_version | 6 |
- | ipv6_address_mode | slaac |
- | ipv6_ra_mode | slaac |
- | name | ipv6-pd-1 |
- | network_id | 4b782725-6abe-4a2d-b061-763def1bb029 |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | revision_number | 4 |
- | service_types | |
- | subnetpool_id | prefix_delegation |
- | updated_at | 2017-01-25T19:35:26Z |
- +-------------------+--------------------------------------+
-
-
-If the prefix delegation server is configured to delegate globally routable
-prefixes and setup routes, then any instance with a port on this subnet should
-now have external network access.
-
-Deleting the router interface causes the subnet to be reverted to the temporary
-CIDR, and all ports have their IPs updated. Prefix leases are released and
-renewed automatically as necessary.
-
-References
-----------
-
-The following link provides a great step by step tutorial on setting up IPv6
-with OpenStack: `Tenant IPV6 deployment in OpenStack Kilo release
-`_.
-
-.. _extra-driver-conf:
-
-Extra configuration
--------------------
-
-Neutron dhcpv6_pd_agent
-^^^^^^^^^^^^^^^^^^^^^^^
-
-To enable the driver for the dhcpv6_pd_agent, set pd_dhcp_driver to this in
-``/etc/neutron/neutron.conf``:
-
-.. code-block:: console
-
- pd_dhcp_driver = neutron_pd_agent
-
-To allow the neutron-pd-agent to communicate with prefix delegation servers,
-you must set which network interface to use for external communication. In
-DevStack the default for this is ``br-ex``:
-
-.. code-block:: console
-
- pd_interface = br-ex
-
-Once you have stacked run the command below to start the neutron-pd-agent:
-
-.. code-block:: console
-
- neutron-pd-agent --config-file /etc/neutron/neutron.conf
diff --git a/doc/networking-guide/source/config-lbaas.rst b/doc/networking-guide/source/config-lbaas.rst
deleted file mode 100644
index 65aca6f36e..0000000000
--- a/doc/networking-guide/source/config-lbaas.rst
+++ /dev/null
@@ -1,503 +0,0 @@
-.. _config-lbaas:
-
-==================================
-Load Balancer as a Service (LBaaS)
-==================================
-
-The Networking service offers a load balancer feature called "LBaaS v2"
-through the ``neutron-lbaas`` service plug-in.
-
-LBaaS v2 adds the concept of listeners to the LBaaS v1 load balancers.
-LBaaS v2 allows you to configure multiple listener ports on a single load
-balancer IP address.
-
-There are two reference implementations of LBaaS v2.
-The one is an agent based implementation with HAProxy.
-The agents handle the HAProxy configuration and manage the HAProxy daemon.
-Another LBaaS v2 implementation, `Octavia
-`_, has a separate API and
-separate worker processes that build load balancers within virtual machines on
-hypervisors that are managed by the Compute service. You do not need an agent
-for Octavia.
-
-.. note::
-
- LBaaS v1 was removed in the Newton release. These links provide more
- details about how LBaaS v1 works and how to configure it:
-
- * `Load-Balancer-as-a-Service (LBaaS) overview `__
- * `Basic Load-Balancer-as-a-Service operations `__
-
-.. warning::
-
- Currently, no migration path exists between v1 and v2 load balancers. If you
- choose to switch from v1 to v2, you must recreate all load balancers, pools,
- and health monitors.
-
-.. TODO(amotoki): Data mirgation from v1 to v2 is provided in Newton release,
- but its usage is not documented enough. It should be added here.
-
-LBaaS v2 Concepts
-~~~~~~~~~~~~~~~~~
-
-LBaaS v2 has several new concepts to understand:
-
-.. image:: figures/lbaasv2-diagram.png
- :alt: LBaaS v2 layout
-
-Load balancer
- The load balancer occupies a neutron network port and has an IP address
- assigned from a subnet.
-
-Listener
- Load balancers can listen for requests on multiple ports. Each one of those
- ports is specified by a listener.
-
-Pool
- A pool holds a list of members that serve content through the load balancer.
-
-Member
- Members are servers that serve traffic behind a load balancer. Each member
- is specified by the IP address and port that it uses to serve traffic.
-
-Health monitor
- Members may go offline from time to time and health monitors divert traffic
- away from members that are not responding properly. Health monitors are
- associated with pools.
-
-LBaaS v2 has multiple implementations via different service plug-ins. The two
-most common implementations use either an agent or the Octavia services. Both
-implementations use the `LBaaS v2 API `_.
-
-Configurations
-~~~~~~~~~~~~~~
-
-Configuring LBaaS v2 with an agent
-----------------------------------
-
-#. Add the LBaaS v2 service plug-in to the ``service_plugins`` configuration
- directive in ``/etc/neutron/neutron.conf``. The plug-in list is
- comma-separated:
-
- .. code-block:: console
-
- service_plugins = [existing service plugins],neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
-
-#. Add the LBaaS v2 service provider to the ``service_provider`` configuration
- directive within the ``[service_providers]`` section in
- ``/etc/neutron/neutron_lbaas.conf``:
-
- .. code-block:: console
-
- service_provider = LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-
- If you have existing service providers for other networking service
- plug-ins, such as VPNaaS or FWaaS, add the ``service_provider`` line shown
- above in the ``[service_providers]`` section as a separate line. These
- configuration directives are repeatable and are not comma-separated.
-
-#. Select the driver that manages virtual interfaces in
- ``/etc/neutron/lbaas_agent.ini``:
-
- .. code-block:: console
-
- [DEFAULT]
- device_driver = neutron_lbaas.drivers.haproxy.namespace_driver.HaproxyNSDriver
- interface_driver = INTERFACE_DRIVER
- [haproxy]
- user_group = haproxy
-
- Replace ``INTERFACE_DRIVER`` with the interface driver that the layer-2
- agent in your environment uses. For example, ``openvswitch`` for Open
- vSwitch or ``linuxbridge`` for Linux bridge.
-
-#. Run the ``neutron-lbaas`` database migration:
-
- .. code-block:: console
-
- neutron-db-manage --subproject neutron-lbaas upgrade head
-
-#. If you have deployed LBaaS v1, **stop the LBaaS v1 agent now**. The v1 and
- v2 agents **cannot** run simultaneously.
-
-#. Start the LBaaS v2 agent:
-
- .. code-block:: console
-
- neutron-lbaasv2-agent \
- --config-file /etc/neutron/neutron.conf \
- --config-file /etc/neutron/lbaas_agent.ini
-
-#. Restart the Network service to activate the new configuration. You are now
- ready to create load balancers with the LBaaS v2 agent.
-
-Configuring LBaaS v2 with Octavia
----------------------------------
-
-Octavia provides additional capabilities for load balancers, including using a
-compute driver to build instances that operate as load balancers.
-The `Hands on Lab - Install and Configure OpenStack Octavia
-`_
-session at the OpenStack Summit in Tokyo provides an overview of Octavia.
-
-The DevStack documentation offers a `simple method to deploy Octavia
-`_
-and test the service with redundant load balancer instances. If you already
-have Octavia installed and configured within your environment, you can
-configure the Network service to use Octavia:
-
-#. Add the LBaaS v2 service plug-in to the ``service_plugins`` configuration
- directive in ``/etc/neutron/neutron.conf``. The plug-in list is
- comma-separated:
-
- .. code-block:: console
-
- service_plugins = [existing service plugins],neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
-
-#. Add the Octavia service provider to the ``service_provider`` configuration
- directive within the ``[service_providers]`` section in
- ``/etc/neutron/neutron_lbaas.conf``:
-
- .. code-block:: console
-
- service_provider = LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default
-
- Ensure that the LBaaS v1 and v2 service providers are removed from the
- ``[service_providers]`` section. They are not used with Octavia. **Verify
- that all LBaaS agents are stopped.**
-
-#. Restart the Network service to activate the new configuration. You are now
- ready to create and manage load balancers with Octavia.
-
-Add LBaaS panels to Dashboard
------------------------------
-
-The Dashboard panels for managing LBaaS v2 are available starting with the
-Mitaka release.
-
-#. Clone the `neutron-lbaas-dashboard repository
- `__
- and check out the release
- branch that matches the installed version of Dashboard:
-
- .. code-block:: console
-
- $ git clone https://git.openstack.org/openstack/neutron-lbaas-dashboard
- $ cd neutron-lbaas-dashboard
- $ git checkout OPENSTACK_RELEASE
-
-#. Install the Dashboard panel plug-in:
-
- .. code-block:: console
-
- $ python setup.py install
-
-#. Copy the ``_1481_project_ng_loadbalancersv2_panel.py`` file from the
- ``neutron-lbaas-dashboard/enabled`` directory into the Dashboard
- ``openstack_dashboard/local/enabled`` directory.
-
- This step ensures that Dashboard can find the plug-in when it enumerates
- all of its available panels.
-
-#. Enable the plug-in in Dashboard by editing the ``local_settings.py`` file
- and setting ``enable_lb`` to ``True`` in the ``OPENSTACK_NEUTRON_NETWORK``
- dictionary.
-
-#. If Dashboard is configured to compress static files for better performance
- (usually set through ``COMPRESS_OFFLINE`` in ``local_settings.py``),
- optimize the static files again:
-
- .. code-block:: console
-
- $ ./manage.py collectstatic
- $ ./manage.py compress
-
-#. Restart Apache to activate the new panel:
-
- .. code-block:: console
-
- $ sudo service apache2 restart
-
-To find the panel, click on :guilabel:`Project` in Dashboard, then click the
-:guilabel:`Network` drop-down menu and select :guilabel:`Load Balancers`.
-
-LBaaS v2 operations
-~~~~~~~~~~~~~~~~~~~
-
-The same neutron commands are used for LBaaS v2 with an agent or with Octavia.
-
-Building an LBaaS v2 load balancer
-----------------------------------
-
-#. Start by creating a load balancer on a network. In this example, the
- ``private`` network is an isolated network with two web server instances:
-
- .. code-block:: console
-
- $ neutron lbaas-loadbalancer-create --name test-lb private-subnet
-
-#. You can view the load balancer status and IP address with the
- :command:`neutron lbaas-loadbalancer-show` command:
-
- .. code-block:: console
-
- $ neutron lbaas-loadbalancer-show test-lb
- +---------------------+------------------------------------------------+
- | Field | Value |
- +---------------------+------------------------------------------------+
- | admin_state_up | True |
- | description | |
- | id | 7780f9dd-e5dd-43a9-af81-0d2d1bd9c386 |
- | listeners | {"id": "23442d6a-4d82-40ee-8d08-243750dbc191"} |
- | | {"id": "7e0d084d-6d67-47e6-9f77-0115e6cf9ba8"} |
- | name | test-lb |
- | operating_status | ONLINE |
- | provider | octavia |
- | provisioning_status | ACTIVE |
- | tenant_id | fbfce4cb346c4f9097a977c54904cafd |
- | vip_address | 192.0.2.22 |
- | vip_port_id | 9f8f8a75-a731-4a34-b622-864907e1d556 |
- | vip_subnet_id | f1e7827d-1bfe-40b6-b8f0-2d9fd946f59b |
- +---------------------+------------------------------------------------+
-
-#. Update the security group to allow traffic to reach the new load balancer.
- Create a new security group along with ingress rules to allow traffic into
- the new load balancer. The neutron port for the load balancer is shown as
- ``vip_port_id`` above.
-
- Create a security group and rules to allow TCP port 80, TCP port 443, and
- all ICMP traffic:
-
- .. code-block:: console
-
- $ neutron security-group-create lbaas
- $ neutron security-group-rule-create \
- --direction ingress \
- --protocol tcp \
- --port-range-min 80 \
- --port-range-max 80 \
- --remote-ip-prefix 0.0.0.0/0 \
- lbaas
- $ neutron security-group-rule-create \
- --direction ingress \
- --protocol tcp \
- --port-range-min 443 \
- --port-range-max 443 \
- --remote-ip-prefix 0.0.0.0/0 \
- lbaas
- $ neutron security-group-rule-create \
- --direction ingress \
- --protocol icmp \
- lbaas
-
- Apply the security group to the load balancer's network port using
- ``vip_port_id`` from the :command:`neutron lbaas-loadbalancer-show`
- command:
-
- .. code-block:: console
-
- $ neutron port-update \
- --security-group lbaas \
- 9f8f8a75-a731-4a34-b622-864907e1d556
-
-Adding an HTTP listener
------------------------
-
-#. With the load balancer online, you can add a listener for plaintext
- HTTP traffic on port 80:
-
- .. code-block:: console
-
- $ neutron lbaas-listener-create \
- --name test-lb-http \
- --loadbalancer test-lb \
- --protocol HTTP \
- --protocol-port 80
-
- This load balancer is active and ready to serve traffic on ``192.0.2.22``.
-
-#. Verify that the load balancer is responding to pings before moving further:
-
- .. code-block:: console
-
- $ ping -c 4 192.0.2.22
- PING 192.0.2.22 (192.0.2.22) 56(84) bytes of data.
- 64 bytes from 192.0.2.22: icmp_seq=1 ttl=62 time=0.410 ms
- 64 bytes from 192.0.2.22: icmp_seq=2 ttl=62 time=0.407 ms
- 64 bytes from 192.0.2.22: icmp_seq=3 ttl=62 time=0.396 ms
- 64 bytes from 192.0.2.22: icmp_seq=4 ttl=62 time=0.397 ms
-
- --- 192.0.2.22 ping statistics ---
- 4 packets transmitted, 4 received, 0% packet loss, time 2997ms
- rtt min/avg/max/mdev = 0.396/0.402/0.410/0.020 ms
-
-
-#. You can begin building a pool and adding members to the pool to serve HTTP
- content on port 80. For this example, the web servers are ``192.0.2.16``
- and ``192.0.2.17``:
-
- .. code-block:: console
-
- $ neutron lbaas-pool-create \
- --name test-lb-pool-http \
- --lb-algorithm ROUND_ROBIN \
- --listener test-lb-http \
- --protocol HTTP
- $ neutron lbaas-member-create \
- --name test-lb-http-member-1 \
- --subnet private-subnet \
- --address 192.0.2.16 \
- --protocol-port 80 \
- test-lb-pool-http
- $ neutron lbaas-member-create \
- --name test-lb-http-member-2 \
- --subnet private-subnet \
- --address 192.0.2.17 \
- --protocol-port 80 \
- test-lb-pool-http
-
-#. You can use ``curl`` to verify connectivity through the load balancers to
- your web servers:
-
- .. code-block:: console
-
- $ curl 192.0.2.22
- web2
- $ curl 192.0.2.22
- web1
- $ curl 192.0.2.22
- web2
- $ curl 192.0.2.22
- web1
-
- In this example, the load balancer uses the round robin algorithm and the
- traffic alternates between the web servers on the backend.
-
-#. You can add a health monitor so that unresponsive servers are removed
- from the pool:
-
- .. code-block:: console
-
- $ neutron lbaas-healthmonitor-create \
- --name test-lb-http-monitor \
- --delay 5 \
- --max-retries 2 \
- --timeout 10 \
- --type HTTP \
- --pool test-lb-pool-http
-
- In this example, the health monitor removes the server from the pool if
- it fails a health check at two five-second intervals. When the server
- recovers and begins responding to health checks again, it is added to
- the pool once again.
-
-Adding an HTTPS listener
-------------------------
-
-You can add another listener on port 443 for HTTPS traffic. LBaaS v2 offers
-SSL/TLS termination at the load balancer, but this example takes a simpler
-approach and allows encrypted connections to terminate at each member server.
-
-#. Start by creating a listener, attaching a pool, and then adding members:
-
- .. code-block:: console
-
- $ neutron lbaas-listener-create \
- --name test-lb-https \
- --loadbalancer test-lb \
- --protocol HTTPS \
- --protocol-port 443
- $ neutron lbaas-pool-create \
- --name test-lb-pool-https \
- --lb-algorithm LEAST_CONNECTIONS \
- --listener test-lb-https \
- --protocol HTTPS
- $ neutron lbaas-member-create \
- --name test-lb-https-member-1 \
- --subnet private-subnet \
- --address 192.0.2.16 \
- --protocol-port 443 \
- test-lb-pool-https
- $ neutron lbaas-member-create \
- --name test-lb-https-member-2 \
- --subnet private-subnet \
- --address 192.0.2.17 \
- --protocol-port 443 \
- test-lb-pool-https
-
-#. You can also add a health monitor for the HTTPS pool:
-
- .. code-block:: console
-
- $ neutron lbaas-healthmonitor-create \
- --name test-lb-https-monitor \
- --delay 5 \
- --max-retries 2 \
- --timeout 10 \
- --type HTTPS \
- --pool test-lb-pool-https
-
- The load balancer now handles traffic on ports 80 and 443.
-
-Associating a floating IP address
----------------------------------
-
-Load balancers that are deployed on a public or provider network that are
-accessible to external clients do not need a floating IP address assigned.
-External clients can directly access the virtual IP address (VIP) of those
-load balancers.
-
-However, load balancers deployed onto private or isolated networks need a
-floating IP address assigned if they must be accessible to external clients. To
-complete this step, you must have a router between the private and public
-networks and an available floating IP address.
-
-You can use the :command:`neutron lbaas-loadbalancer-show` command from the
-beginning of this section to locate the ``vip_port_id``. The ``vip_port_id``
-is the ID of the network port that is assigned to the load balancer. You can
-associate a free floating IP address to the load balancer using
-:command:`neutron floatingip-associate`:
-
-.. code-block:: console
-
- $ neutron floatingip-associate FLOATINGIP_ID LOAD_BALANCER_PORT_ID
-
-Setting quotas for LBaaS v2
----------------------------
-
-Quotas are available for limiting the number of load balancers and load
-balancer pools. By default, both quotas are set to 10.
-
-You can adjust quotas using the :command:`neutron quota-update` command:
-
-.. code-block:: console
-
- $ neutron quota-update --tenant-id TENANT_UUID --loadbalancer 25
- $ neutron quota-update --tenant-id TENANT_UUID --pool 50
-
-A setting of ``-1`` disables the quota for a tenant.
-
-Retrieving load balancer statistics
------------------------------------
-
-The LBaaS v2 agent collects four types of statistics for each load balancer
-every six seconds. Users can query these statistics with the
-:command:`neutron lbaas-loadbalancer-stats` command:
-
-.. code-block:: console
-
- $ neutron lbaas-loadbalancer-stats test-lb
- +--------------------+----------+
- | Field | Value |
- +--------------------+----------+
- | active_connections | 0 |
- | bytes_in | 40264557 |
- | bytes_out | 71701666 |
- | total_connections | 384601 |
- +--------------------+----------+
-
-The ``active_connections`` count is the total number of connections that were
-active at the time the agent polled the load balancer. The other three
-statistics are cumulative since the load balancer was last started. For
-example, if the load balancer restarts due to a system error or a configuration
-change, these statistics will be reset.
diff --git a/doc/networking-guide/source/config-macvtap.rst b/doc/networking-guide/source/config-macvtap.rst
deleted file mode 100644
index 1185795941..0000000000
--- a/doc/networking-guide/source/config-macvtap.rst
+++ /dev/null
@@ -1,181 +0,0 @@
-.. _config-macvtap:
-
-========================
-Macvtap mechanism driver
-========================
-
-The Macvtap mechanism driver for the ML2 plug-in generally increases
-network performance of instances.
-
-Consider the following attributes of this mechanism driver to determine
-practicality in your environment:
-
-* Supports only instance ports. Ports for DHCP and layer-3 (routing)
- services must use another mechanism driver such as Linux bridge or
- Open vSwitch (OVS).
-
-* Supports only untagged (flat) and tagged (VLAN) networks.
-
-* Lacks support for security groups including basic (sanity) and
- anti-spoofing rules.
-
-* Lacks support for layer-3 high-availability mechanisms such as
- Virtual Router Redundancy Protocol (VRRP) and Distributed Virtual
- Routing (DVR).
-
-* Only compute resources can be attached via macvtap. Attaching other
- resources like DHCP, Routers and others is not supported. Therefore run
- either OVS or linux bridge in VLAN or flat mode on the controller node.
-
-* Instance migration requires the same values for the
- ``physical_interface_mapping`` configuration option on each compute node.
- For more information, see
- ``_.
-
-Prerequisites
-~~~~~~~~~~~~~
-
-You can add this mechanism driver to an existing environment using either
-the Linux bridge or OVS mechanism drivers with only provider networks or
-provider and self-service networks. You can change the configuration of
-existing compute nodes or add compute nodes with the Macvtap mechanism
-driver. The example configuration assumes addition of compute nodes with
-the Macvtap mechanism driver to the :ref:`deploy-lb-selfservice` or
-:ref:`deploy-ovs-selfservice` deployment examples.
-
-Add one or more compute nodes with the following components:
-
-* Three network interfaces: management, provider, and overlay.
-* OpenStack Networking Macvtap layer-2 agent and any dependencies.
-
-.. note::
-
- To support integration with the deployment examples, this content
- configures the Macvtap mechanism driver to use the overlay network
- for untagged (flat) or tagged (VLAN) networks in addition to overlay
- networks such as VXLAN. Your physical network infrastructure
- must support VLAN (802.1q) tagging on the overlay network.
-
-Architecture
-~~~~~~~~~~~~
-
-The Macvtap mechanism driver only applies to compute nodes. Otherwise,
-the environment resembles the prerequisite deployment example.
-
-.. image:: figures/config-macvtap-compute1.png
- :alt: Macvtap mechanism driver - compute node components
-
-.. image:: figures/config-macvtap-compute2.png
- :alt: Macvtap mechanism driver - compute node connectivity
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Use the following example configuration as a template to add support for
-the Macvtap mechanism driver to an existing operational environment.
-
-Controller node
----------------
-
-#. In the ``ml2_conf.ini`` file:
-
- * Add ``macvtap`` to mechanism drivers.
-
- .. code-block:: ini
-
- [ml2]
- mechanism_drivers = macvtap
-
- * Configure network mappings.
-
- .. code-block:: ini
-
- [ml2_type_flat]
- flat_networks = provider,macvtap
-
- [ml2_type_vlan]
- network_vlan_ranges = provider,macvtap:VLAN_ID_START:VLAN_ID_END
-
- .. note::
-
- Use of ``macvtap`` is arbitrary. Only the self-service deployment
- examples require VLAN ID ranges. Replace ``VLAN_ID_START`` and
- ``VLAN_ID_END`` with appropriate numerical values.
-
-#. Restart the following services:
-
- * Server
-
-Network nodes
--------------
-
-No changes.
-
-Compute nodes
--------------
-
-#. Install the Networking service Macvtap layer-2 agent.
-
-#. In the ``neutron.conf`` file, configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
-#. In the ``macvtap_agent.ini`` file, configure the layer-2 agent.
-
- .. code-block:: ini
-
- [macvtap]
- physical_interface_mappings = macvtap:MACVTAP_INTERFACE
-
- [securitygroup]
- firewall_driver = noop
-
- Replace ``MACVTAP_INTERFACE`` with the name of the underlying
- interface that handles Macvtap mechanism driver interfaces.
- If using a prerequisite deployment example, replace
- ``MACVTAP_INTERFACE`` with the name of the underlying interface
- that handles overlay networks. For example, ``eth1``.
-
-#. Start the following services:
-
- * Macvtap agent
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of the agents:
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | 31e1bc1b-c872-4429-8fc3-2c8eba52634e | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent |
- | 378f5550-feee-42aa-a1cb-e548b7c2601f | Open vSwitch agent | compute1 | None | True | UP | neutron-openvswitch-agent |
- | 7d2577d0-e640-42a3-b303-cb1eb077f2b6 | L3 agent | compute1 | nova | True | UP | neutron-l3-agent |
- | d5d7522c-ad14-4c63-ab45-f6420d6a81dd | Metering agent | compute1 | None | True | UP | neutron-metering-agent |
- | e838ef5c-75b1-4b12-84da-7bdbd62f1040 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
-
-Create initial networks
------------------------
-
-This mechanism driver simply changes the virtual network interface driver
-for instances. Thus, you can reference the ``Create initial networks``
-content for the prerequisite deployment example.
-
-Verify network operation
-------------------------
-
-This mechanism driver simply changes the virtual network interface driver
-for instances. Thus, you can reference the ``Verify network operation``
-content for the prerequisite deployment example.
-
-Network traffic flow
-~~~~~~~~~~~~~~~~~~~~
-
-This mechanism driver simply removes the Linux bridge handling security
-groups on the compute nodes. Thus, you can reference the network traffic
-flow scenarios for the prerequisite deployment example.
diff --git a/doc/networking-guide/source/config-ml2.rst b/doc/networking-guide/source/config-ml2.rst
deleted file mode 100644
index 26b00dca2a..0000000000
--- a/doc/networking-guide/source/config-ml2.rst
+++ /dev/null
@@ -1,513 +0,0 @@
-.. _config-plugin-ml2:
-
-===========
-ML2 plug-in
-===========
-
-Architecture
-~~~~~~~~~~~~
-
-The Modular Layer 2 (ML2) neutron plug-in is a framework allowing OpenStack
-Networking to simultaneously use the variety of layer 2 networking
-technologies found in complex real-world data centers. The ML2 framework
-distinguishes between the two kinds of drivers that can be configured:
-
-* Type drivers
-
- Define how an OpenStack network is technically realized. Example: VXLAN
-
- Each available network type is managed by an ML2 type driver. Type drivers
- maintain any needed type-specific network state. They validate the type
- specific information for provider networks and are responsible for the
- allocation of a free segment in project networks.
-
-* Mechanism drivers
-
- Define the mechanism to access an OpenStack network of a certain type.
- Example: Open vSwitch mechanism driver.
-
- The mechanism driver is responsible for taking the information established by
- the type driver and ensuring that it is properly applied given the
- specific networking mechanisms that have been enabled.
-
- Mechanism drivers can utilize L2 agents (via RPC) and/or interact directly
- with external devices or controllers.
-
-Multiple mechanism and type drivers can be used simultaneously to access
-different ports of the same virtual network.
-
-.. todo::
- Picture showing relationships
-
-ML2 driver support matrix
--------------------------
-
-
-.. list-table:: Mechanism drivers and L2 agents
- :header-rows: 1
-
- * - type driver / mech driver
- - Flat
- - VLAN
- - VXLAN
- - GRE
- * - Open vSwitch
- - yes
- - yes
- - yes
- - yes
- * - Linux bridge
- - yes
- - yes
- - yes
- - no
- * - SRIOV
- - yes
- - yes
- - no
- - no
- * - MacVTap
- - yes
- - yes
- - no
- - no
- * - L2 population
- - no
- - no
- - yes
- - yes
-
-.. note::
-
- L2 population is a special mechanism driver that optimizes BUM (Broadcast,
- unknown destination address, multicast) traffic in the overlay networks
- VXLAN and GRE. It needs to be used in conjunction with either the
- Linux bridge or the Open vSwitch mechanism driver and cannot be used as
- standalone mechanism driver. For more information, see the
- *Mechanism drivers* section below.
-
-Configuration
-~~~~~~~~~~~~~
-
-Network type drivers
---------------------
-
-To enable type drivers in the ML2 plug-in. Edit the
-``/etc/neutron/plugins/ml2/ml2_conf.ini`` file:
-
-.. code-block:: ini
-
- [ml2]
- type_drivers = flat,vlan,vxlan,gre
-
-.. note::
-
- For more details,see the `Bug 1567792 `__.
-
-For more details, see the
-`Networking configuration options `__
-of Configuration Reference.
-
-The following type drivers are available
-
-* Flat
-
-* VLAN
-
-* GRE
-
-* VXLAN
-
-Provider network types
-^^^^^^^^^^^^^^^^^^^^^^
-
-Provider networks provide connectivity like project networks.
-But only administrative (privileged) users can manage those
-networks because they interface with the physical network infrastructure.
-More information about provider networks see
-:doc:`intro-os-networking` or the `OpenStack Administrator Guide
-`__.
-
-* Flat
-
- The administrator needs to configure a list of physical network names that
- can be used for provider networks.
- For more details, see the related section in the
- `Configuration Reference `__.
-
-* VLAN
-
- The administrator needs to configure a list of physical network names that
- can be used for provider networks.
- For more details, see the related section in the
- `Configuration Reference `__.
-
-* GRE
-
- No additional configuration required.
-
-* VXLAN
-
- The administrator can configure the VXLAN multicast group that should be
- used.
-
- .. note::
-
- VXLAN multicast group configuration is not applicable for the Open
- vSwitch agent.
-
- As of today it is not used in the Linux bridge agent. The Linux bridge
- agent has its own agent specific configuration option. For more details,
- see the `Bug 1523614 `__.
-
-Project network types
-^^^^^^^^^^^^^^^^^^^^^
-
-Project networks provide connectivity to instances for a particular
-project. Regular (non-privileged) users can manage project networks
-within the allocation that an administrator or operator defines for
-them. More information about project and provider networks see
-:doc:`intro-os-networking`
-or the `OpenStack Administrator Guide
-`__.
-
-Project network configurations are made in the
-``/etc/neutron/plugins/ml2/ml2_conf.ini`` configuration file on the neutron
-server:
-
-* VLAN
-
- The administrator needs to configure the range of VLAN IDs that can be
- used for project network allocation.
- For more details, see the related section in the
- `Configuration Reference `__.
-
-* GRE
-
- The administrator needs to configure the range of tunnel IDs that can be
- used for project network allocation.
- For more details, see the related section in the
- `Configuration Reference `__.
-
-* VXLAN
-
- The administrator needs to configure the range of VXLAN IDs that can be
- used for project network allocation.
- For more details, see the related section in the
- `Configuration Reference `__.
-
-.. note::
- Flat networks for project allocation are not supported. They only
- can exist as a provider network.
-
-Mechanism drivers
------------------
-
-To enable mechanism drivers in the ML2 plug-in, edit the
-``/etc/neutron/plugins/ml2/ml2_conf.ini`` file on the neutron server:
-
-.. code-block:: ini
-
- [ml2]
- mechanism_drivers = ovs,l2pop
-
-.. note::
-
- For more details, see the `Bug 1567792 `__.
-
-For more details, see the
-`Configuration Reference `__.
-
-* Linux bridge
-
- No additional configurations required for the mechanism driver. Additional
- agent configuration is required. For details, see the related *L2 agent*
- section below.
-
-* Open vSwitch
-
- No additional configurations required for the mechanism driver. Additional
- agent configuration is required. For details, see the related *L2 agent*
- section below.
-
-* SRIOV
-
- The administrator needs to define a list PCI hardware that shall be used
- by OpenStack. For more details, see the related section in the
- `Configuration Reference `__.
-
-* MacVTap
-
- No additional configurations required for the mechanism driver. Additional
- agent configuration is required. Please see the related section.
-
-* L2 population
-
- The administrator can configure some optional configuration options. For more
- details, see the related section in the
- `Configuration Reference `__.
-
-* Specialized
-
- * Open source
-
- External open source mechanism drivers exist as well as the neutron
- integrated reference implementations. Configuration of those drivers is not
- part of this document. For example:
-
- * OpenDaylight
- * OpenContrail
-
- * Proprietary (vendor)
-
- External mechanism drivers from various vendors exist as well as the
- neutron integrated reference implementations.
-
- Configuration of those drivers is not part of this document.
-
-
-Agents
-------
-
-L2 agent
-^^^^^^^^
-
-An L2 agent serves layer 2 (Ethernet) network connectivity to OpenStack
-resources. It typically runs on each Network Node and on each Compute Node.
-
-* Open vSwitch agent
-
- The Open vSwitch agent configures the Open vSwitch to realize L2 networks for
- OpenStack resources.
-
- Configuration for the Open vSwitch agent is typically done in the
- ``openvswitch_agent.ini`` configuration file. Make sure that on agent start
- you pass this configuration file as argument.
-
- For a detailed list of configuration options, see the related section in the
- `Configuration Reference `__.
-
-* Linux bridge agent
-
- The Linux bridge agent configures Linux bridges to realize L2 networks for
- OpenStack resources.
-
- Configuration for the Linux bridge agent is typically done in the
- ``linuxbridge_agent.ini`` configuration file. Make sure that on agent start
- you pass this configuration file as argument.
-
- For a detailed list of configuration options, see the related section in the
- `Configuration Reference `__.
-
-* SRIOV Nic Switch agent
-
- The sriov nic switch agent configures PCI virtual functions to realize L2
- networks for OpenStack instances. Network attachments for other resources
- like routers, DHCP, and so on are not supported.
-
- Configuration for the SRIOV nic switch agent is typically done in the
- ``sriov_agent.ini`` configuration file. Make sure that on agent start
- you pass this configuration file as argument.
-
- For a detailed list of configuration options, see the related section in the
- `Configuration Reference `__.
-
-* MacVTap agent
-
- The MacVTap agent uses kernel MacVTap devices for realizing L2
- networks for OpenStack instances. Network attachments for other resources
- like routers, DHCP, and so on are not supported.
-
- Configuration for the MacVTap agent is typically done in the
- ``macvtap_agent.ini`` configuration file. Make sure that on agent start
- you pass this configuration file as argument.
-
- For a detailed list of configuration options, see the related section in the
- `Configuration Reference `__.
-
-L3 agent
-^^^^^^^^
-
-The L3 agent offers advanced layer 3 services, like virtual Routers and
-Floating IPs. It requires an L2 agent running in parallel.
-
-Configuration for the L3 agent is typically done in the
-``l3_agent.ini`` configuration file. Make sure that on agent start
-you pass this configuration file as argument.
-
-For a detailed list of configuration options, see the related section in the
-`Configuration Reference `__.
-
-DHCP agent
-^^^^^^^^^^
-
-The DHCP agent is responsible for :term:`DHCP ` and RADVD (Router Advertisement Daemon) services.
-It requires a running L2 agent on the same node.
-
-Configuration for the DHCP agent is typically done in the
-``dhcp_agent.ini`` configuration file. Make sure that on agent start
-you pass this configuration file as argument.
-
-For a detailed list of configuration options, see the related section in the
-`Configuration Reference `__.
-
-Metadata agent
-^^^^^^^^^^^^^^
-
-The Metadata agent allows instances to access cloud-init meta data and user
-data via the network. It requires a running L2 agent on the same node.
-
-Configuration for the Metadata agent is typically done in the
-``metadata_agent.ini`` configuration file. Make sure that on agent start
-you pass this configuration file as argument.
-
-For a detailed list of configuration options, see the related section in the
-`Configuration Reference `__.
-
-L3 metering agent
-^^^^^^^^^^^^^^^^^
-
-The L3 metering agent enables layer3 traffic metering. It requires a running L3
-agent on the same node.
-
-Configuration for the L3 metering agent is typically done in the
-``metering_agent.ini`` configuration file. Make sure that on agent start
-you pass this configuration file as argument.
-
-For a detailed list of configuration options, see the related section in the
-`Configuration Reference `__.
-
-Security
---------
-
-L2 agents support some important security configurations.
-
-* Security Groups
-
- For more details, see the related section in the
- `Configuration Reference `__.
-
-* Arp Spoofing Prevention
-
- Configured in the *L2 agent* configuration.
-
-
-Reference implementations
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Overview
---------
-
-In this section, the combination of a mechanism driver and an L2 agent is
-called 'reference implementation'. The following table lists these
-implementations:
-
-.. list-table:: Mechanism drivers and L2 agents
- :header-rows: 1
-
- * - Mechanism Driver
- - L2 agent
- * - Open vSwitch
- - Open vSwitch agent
- * - Linux bridge
- - Linux bridge agent
- * - SRIOV
- - SRIOV nic switch agent
- * - MacVTap
- - MacVTap agent
- * - L2 population
- - Open vSwitch agent, Linux bridge agent
-
-The following tables shows which reference implementations support which
-non-L2 neutron agents:
-
-.. list-table:: Reference implementations and other agents
- :header-rows: 1
-
- * - Reference Implementation
- - L3 agent
- - DHCP agent
- - Metadata agent
- - L3 Metering agent
- * - Open vSwitch & Open vSwitch agent
- - yes
- - yes
- - yes
- - yes
- * - Linux bridge & Linux bridge agent
- - yes
- - yes
- - yes
- - yes
- * - SRIOV & SRIOV nic switch agent
- - no
- - no
- - no
- - no
- * - MacVTap & MacVTap agent
- - no
- - no
- - no
- - no
-
-.. note::
- L2 population is not listed here, as it is not a standalone mechanism.
- If other agents are supported depends on the conjunctive mechanism driver
- that is used for binding a port.
-
- .. todo::
- Update Link. Doc seems to be not available anymore since Liberty.
-
- More information about L2 population see the
- `OpenStack Manuals `__.
-
-
-Buying guide
-------------
-
-This guide characterizes the L2 reference implementations that currently exist.
-
-* Open vSwitch mechanism and Open vSwitch agent
-
- Can be used for instance network attachments as well as for attachments of
- other network resources like routers, DHCP, and so on.
-
-* Linux bridge mechanism and Linux bridge agent
-
- Can be used for instance network attachments as well as for attachments of
- other network resources like routers, DHCP, and so on.
-
-* SRIOV mechanism driver and SRIOV NIC switch agent
-
- Can only be used for instance network attachments (device_owner = compute).
-
- Is deployed besides an other mechanism driver and L2 agent such as OVS or
- Linux bridge. It offers instances direct access to the network adapter
- through a PCI Virtual Function (VF). This gives an instance direct access to
- hardware capabilities and high performance networking.
-
- The cloud consumer can decide via the neutron APIs VNIC_TYPE attribute, if
- an instance gets a normal OVS port or an SRIOV port.
-
- Due to direct connection, some features are not available when using SRIOV.
- For example, DVR, security groups, migration.
-
- For more information see the :ref:`config-sriov`.
-
-* MacVTap mechanism driver and MacVTap agent
-
- Can only be used for instance network attachments (device_owner = compute)
- and not for attachment of other resources like routers, DHCP, and so on.
-
- It is positioned as alternative to Open vSwitch or Linux bridge support on
- the compute node for internal deployments.
-
- MacVTap offers a direct connection with very little overhead between
- instances and down to the adapter. You can use MacVTap agent on the
- compute node when you require a network connection that is performance
- critical. It does not require specific hardware (like with SRIOV).
-
- Due to the direct connection, some features are not available when using
- it on the compute node. For example, DVR, security groups and arp-spoofing
- protection.
diff --git a/doc/networking-guide/source/config-mtu.rst b/doc/networking-guide/source/config-mtu.rst
deleted file mode 100644
index 4748669250..0000000000
--- a/doc/networking-guide/source/config-mtu.rst
+++ /dev/null
@@ -1,134 +0,0 @@
-.. _config-mtu:
-
-==================
-MTU considerations
-==================
-
-The Networking service uses the MTU of the underlying physical network to
-calculate the MTU for virtual network components including instance network
-interfaces. By default, it assumes a standard 1500-byte MTU for the
-underlying physical network.
-
-The Networking service only references the underlying physical network MTU.
-Changing the underlying physical network device MTU requires configuration
-of physical network devices such as switches and routers.
-
-Jumbo frames
-~~~~~~~~~~~~
-
-The Networking service supports underlying physical networks using jumbo
-frames and also enables instances to use jumbo frames minus any overlay
-protocol overhead. For example, an underlying physical network with a
-9000-byte MTU yields a 8950-byte MTU for instances using a VXLAN network
-with IPv4 endpoints. Using IPv6 endpoints for overlay networks adds 20
-bytes of overhead for any protocol.
-
-The Networking service supports the following underlying physical network
-architectures. Case 1 refers to the most common architecture. In general,
-architectures should avoid cases 2 and 3.
-
-.. note::
-
- You can trigger MTU recalculation for existing networks by changing the
- MTU configuration and restarting the ``neutron-server`` service.
- However, propagating MTU calculations to the data plane may require
- users to delete and recreate ports on the network.
-
- When using the Open vSwitch or Linux bridge drivers, new MTU calculations
- will be propogated automatically after restarting the ``l3-agent`` service.
-
-Case 1
-------
-
-For typical underlying physical network architectures that implement a single
-MTU value, you can leverage jumbo frames using two options, one in the
-``neutron.conf`` file and the other in the ``ml2_conf.ini`` file. Most
-environments should use this configuration.
-
-For example, referencing an underlying physical network with a 9000-byte MTU:
-
-#. In the ``neutron.conf`` file:
-
- .. code-block:: ini
-
- [DEFAULT]
- global_physnet_mtu = 9000
-
-#. In the ``ml2_conf.ini`` file:
-
- .. code-block:: ini
-
- [ml2]
- path_mtu = 9000
-
-Case 2
-------
-
-Some underlying physical network architectures contain multiple layer-2
-networks with different MTU values. You can configure each flat or VLAN
-provider network in the bridge or interface mapping options of the layer-2
-agent to reference a unique MTU value.
-
-For example, referencing a 4000-byte MTU for ``provider2``, a 1500-byte
-MTU for ``provider3``, and a 9000-byte MTU for other networks using the
-Open vSwitch agent:
-
-#. In the ``neutron.conf`` file:
-
- .. code-block:: ini
-
- [DEFAULT]
- global_physnet_mtu = 9000
-
-#. In the ``openvswitch_agent.ini`` file:
-
- .. code-block:: ini
-
- [ovs]
- bridge_mappings = provider1:eth1,provider2:eth2,provider3:eth3
-
-#. In the ``ml2_conf.ini`` file:
-
- .. code-block:: ini
-
- [ml2]
- physical_network_mtus = provider2:4000,provider3:1500
- path_mtu = 9000
-
-Case 3
-------
-
-Some underlying physical network architectures contain a unique layer-2 network
-for overlay networks using protocols such as VXLAN and GRE.
-
-For example, referencing a 4000-byte MTU for overlay networks and a 9000-byte
-MTU for other networks:
-
-#. In the ``neutron.conf`` file:
-
- .. code-block:: ini
-
- [DEFAULT]
- global_physnet_mtu = 9000
-
-#. In the ``ml2_conf.ini`` file:
-
- .. code-block:: ini
-
- [ml2]
- path_mtu = 4000
-
- .. note::
-
- Other networks including provider networks and flat or VLAN
- self-service networks assume the value of the ``global_physnet_mtu``
- option.
-
-Instance network interfaces (VIFs)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The DHCP agent provides an appropriate MTU value to instances using IPv4,
-while the L3 agent provides an appropriate MTU value to instances using
-IPv6. IPv6 uses RA via the L3 agent because the DHCP agent only supports
-IPv4. Instances using IPv4 and IPv6 should obtain the same MTU value
-regardless of method.
diff --git a/doc/networking-guide/source/config-ovs-dpdk.rst b/doc/networking-guide/source/config-ovs-dpdk.rst
deleted file mode 100644
index cd61fc507f..0000000000
--- a/doc/networking-guide/source/config-ovs-dpdk.rst
+++ /dev/null
@@ -1,155 +0,0 @@
-.. _config-ovs-dpdk:
-
-===============================
-Open vSwitch with DPDK datapath
-===============================
-
-This page serves as a guide for how to use the OVS with DPDK datapath
-functionality available in the Networking service as of the Mitaka release.
-
-The basics
-~~~~~~~~~~
-
-Open vSwitch (OVS) provides support for a Data Plane Development Kit (DPDK)
-datapath since OVS 2.2, and a DPDK-backed ``vhost-user`` virtual interface
-since OVS 2.4. The DPDK datapath provides lower latency and higher performance
-than the standard kernel OVS datapath, while DPDK-backed ``vhost-user``
-interfaces can connect guests to this datapath. For more information on DPDK,
-refer to the `DPDK `__ website.
-
-OVS with DPDK, or OVS-DPDK, can be used to provide high-performance networking
-between instances on OpenStack compute nodes.
-
-Prerequisites
--------------
-
-Using DPDK in OVS requires the following minimum software versions:
-
-* OVS 2.4
-* DPDK 2.0
-* QEMU 2.1.0
-* libvirt 1.2.13
-
-Support of ``vhost-user`` multiqueue that enables use of multiqueue with
-``virtio-net`` and ``igb_uio`` is available if the following newer
-versions are used:
-
-* OVS 2.5
-* DPDK 2.2
-* QEMU 2.5
-* libvirt 1.2.17
-
-In both cases, install and configure Open vSwitch with DPDK support for each
-node. For more information, see the
-`OVS-DPDK `__
-installation guide (select an appropriate OVS version in the
-:guilabel:`Branch` drop-down menu).
-
-`Neutron configuration reference for OVS-DPDK
-`__
-for configuration of neutron OVS agent.
-
-In case you wish to configure multiqueue, see the
-`OVS configuration chapter on vhost-user
-`__
-in QEMU documentation.
-
-The technical background of multiqueue is explained in the corresponding
-`blueprint `__.
-
-Additionally, OpenStack supports ``vhost-user`` reconnect feature starting
-from the Ocata release, as implementation of fix for
-`bug 1604924 `__.
-Starting from OpenStack Ocata release this feature is used without any
-configuration necessary in case the following minimum software versions
-are used:
-
-* OVS 2.6
-* DPDK 16.07
-* QEMU 2.7
-
-The support of this feature is not yet present in ML2 OVN and ODL
-mechanism drivers.
-
-Using vhost-user interfaces
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Once OVS and neutron are correctly configured with DPDK support,
-``vhost-user`` interfaces are completely transparent to the guest
-(except in case of multiqueue configuration described below).
-However, guests must request huge pages. This can be done through flavors.
-For example:
-
-.. code-block:: console
-
- $ openstack flavor set m1.large --property hw:mem_page_size=large
-
-For more information about the syntax for ``hw:mem_page_size``, refer to the
-`Flavors `__ guide.
-
-.. note::
-
- ``vhost-user`` requires file descriptor-backed shared memory. Currently, the
- only way to request this is by requesting large pages. This is why instances
- spawned on hosts with OVS-DPDK must request large pages. The aggregate
- flavor affinity filter can be used to associate flavors with large page
- support to hosts with OVS-DPDK support.
-
-Create and add ``vhost-user`` network interfaces to instances in the same
-fashion as conventional interfaces. These interfaces can use the kernel
-``virtio-net`` driver or a DPDK-compatible driver in the guest
-
-.. code-block:: console
-
- $ openstack server create --nic net-id=$net_id ... testserver
-
-Using vhost-user multiqueue
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To use this feature, the following should be set in the flavor extra specs
-(flavor keys):
-
-.. code-block:: console
-
- $ openstack flavor set $m1.large --property hw:vif_multiqueue_enabled=true
-
-This setting can be overridden by the image metadata property if the feature
-is enabled in the extra specs:
-
-.. code-block:: console
-
- $ openstack image set --property hw_vif_mutliqueue_enabled=true IMAGE_NAME
-
-Support of ``virtio-net`` multiqueue needs to be present in kernel of
-guest VM and is available starting from Linux kernel 3.8.
-
-Check pre-set maximum for number of combined channels in channel
-configuration.
-Configuration of OVS and flavor done successfully should result in
-maximum being more than '1'):
-
-.. code-block:: console
-
- $ ethtool -l INTERFACE_NAME
-
-To increase number of current combined channels run following command in
-guest VM:
-
-.. code-block:: console
-
- $ ethtool -L INTERFACE_NAME combined QUEUES_NR
-
-The number of queues should typically match the number of vCPUs
-defined for the instance. In newer kernel versions
-this is configured automatically.
-
-Known limitations
-~~~~~~~~~~~~~~~~~
-
-* This feature is only supported when using the libvirt compute driver, and the
- KVM/QEMU hypervisor.
-* Huge pages are required for each instance running on hosts with OVS-DPDK.
- If huge pages are not present in the guest, the interface will appear but
- will not function.
-* Expect performance degradation of services using tap devices: these devices
- do not support DPDK. Example services include DVR, FWaaS, or LBaaS.
diff --git a/doc/networking-guide/source/config-ovsfwdriver.rst b/doc/networking-guide/source/config-ovsfwdriver.rst
deleted file mode 100644
index 3936b178b7..0000000000
--- a/doc/networking-guide/source/config-ovsfwdriver.rst
+++ /dev/null
@@ -1,55 +0,0 @@
-.. _config-ovsfwdriver:
-
-===================================
-Native Open vSwitch firewall driver
-===================================
-
-.. note::
-
- Experimental feature or incomplete documentation.
-
-Historically, Open vSwitch (OVS) could not interact directly with *iptables*
-to implement security groups. Thus, the OVS agent and Compute service use
-a Linux bridge between each instance (VM) and the OVS integration bridge
-``br-int`` to implement security groups. The Linux bridge device contains
-the *iptables* rules pertaining to the instance. In general, additional
-components between instances and physical network infrastructure cause
-scalability and performance problems. To alleviate such problems, the OVS
-agent includes an optional firewall driver that natively implements security
-groups as flows in OVS rather than the Linux bridge device and *iptables*.
-This increases scalability and performance.
-
-Configuring heterogeneous firewall drivers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-L2 agents can be configured to use differing firewall drivers. There is no
-requirement that they all be the same. If an agent lacks a firewall driver
-configuration, it will default to what is configured on its server. This also
-means there is no requirement that the server has any firewall driver
-configured at all, as long as the agents are configured correctly.
-
-Prerequisites
-~~~~~~~~~~~~~
-
-The native OVS firewall implementation requires kernel and user space support
-for *conntrack*, thus requiring minimum versions of the Linux kernel and
-Open vSwitch. All cases require Open vSwitch version 2.5 or newer.
-
-* Kernel version 4.3 or newer includes *conntrack* support.
-* Kernel version 3.3, but less than 4.3, does not include *conntrack*
- support and requires building the OVS modules.
-
-Enable the native OVS firewall driver
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-* On nodes running the Open vSwitch agent, edit the
- ``openvswitch_agent.ini`` file and enable the firewall driver.
-
- .. code-block:: ini
-
- [securitygroup]
- firewall_driver = openvswitch
-
-For more information, see the `Open vSwitch Firewall Driver
-`_
-and the `video `_.
diff --git a/doc/networking-guide/source/config-qos.rst b/doc/networking-guide/source/config-qos.rst
deleted file mode 100644
index 9956342357..0000000000
--- a/doc/networking-guide/source/config-qos.rst
+++ /dev/null
@@ -1,479 +0,0 @@
-.. _config-qos:
-
-========================
-Quality of Service (QoS)
-========================
-
-QoS is defined as the ability to guarantee certain network requirements
-like bandwidth, latency, jitter, and reliability in order to satisfy a
-Service Level Agreement (SLA) between an application provider and end
-users.
-
-Network devices such as switches and routers can mark traffic so that it is
-handled with a higher priority to fulfill the QoS conditions agreed under
-the SLA. In other cases, certain network traffic such as Voice over IP (VoIP)
-and video streaming needs to be transmitted with minimal bandwidth
-constraints. On a system without network QoS management, all traffic will be
-transmitted in a "best-effort" manner making it impossible to guarantee service
-delivery to customers.
-
-QoS is an advanced service plug-in. QoS is decoupled from the rest of the
-OpenStack Networking code on multiple levels and it is available through the
-ml2 extension driver.
-
-Details about the DB models, API extension, and use cases are out of the scope
-of this guide but can be found in the
-`Neutron QoS specification `_.
-
-
-Supported QoS rule types
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Any plug-in or ml2 mechanism driver can claim support for some QoS rule types
-by providing a plug-in/driver class property called
-``supported_qos_rule_types`` that returns a list of strings that correspond
-to `QoS rule types
-`_.
-
-The following table shows the Networking back ends, QoS supported rules, and
-traffic directions (from the VM point of view).
-
-.. table:: **Networking back ends, supported rules, and traffic direction**
-
- ==================== ================ ================ ================
- Rule \ back end Open vSwitch SR-IOV Linux bridge
- ==================== ================ ================ ================
- Bandwidth limit Egress Egress (1) Egress
- Minimum bandwidth - Egress -
- DSCP marking Egress - Egress
- ==================== ================ ================ ================
-
-.. note::
-
- (1) Max burst parameter is skipped because it is not supported by the
- IP tool.
-
-In the most simple case, the property can be represented by a simple Python
-list defined on the class.
-
-For an ml2 plug-in, the list of supported QoS rule types and parameters is
-defined as a common subset of rules supported by all active mechanism drivers.
-A QoS rule is always attached to a QoS policy. When a rule is created or
-updated:
-
-* The QoS plug-in will check if this rule and parameters are supported by any
- active mechanism driver if the QoS policy is not attached to any port or
- network.
-
-* The QoS plug-in will check if this rule and parameters are supported by the
- mechanism drivers managing those ports if the QoS policy is attached to any
- port or network.
-
-
-Configuration
-~~~~~~~~~~~~~
-
-To enable the service, follow the steps below:
-
-On network nodes:
-
-#. Add the QoS service to the ``service_plugins`` setting in
- ``/etc/neutron/neutron.conf``. For example:
-
- .. code-block:: none
-
- service_plugins = \
- neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,
- neutron.services.metering.metering_plugin.MeteringPlugin,
- neutron.services.qos.qos_plugin.QoSPlugin
-
-#. Optionally, set the needed ``notification_drivers`` in the ``[qos]``
- section in ``/etc/neutron/neutron.conf`` (``message_queue`` is the
- default).
-
-#. In ``/etc/neutron/plugins/ml2/ml2_conf.ini``, add ``qos`` to
- ``extension_drivers`` in the ``[ml2]`` section. For example:
-
- .. code-block:: ini
-
- [ml2]
- extension_drivers = port_security, qos
-
-#. If the Open vSwitch agent is being used, set ``extensions`` to
- ``qos`` in the ``[agent]`` section of
- ``/etc/neutron/plugins/ml2/openvswitch_agent.ini``. For example:
-
- .. code-block:: ini
-
- [agent]
- extensions = qos
-
-On compute nodes:
-
-#. In ``/etc/neutron/plugins/ml2/openvswitch_agent.ini``, add ``qos`` to the
- ``extensions`` setting in the ``[agent]`` section. For example:
-
- .. code-block:: ini
-
- [agent]
- extensions = qos
-
-.. note::
-
- QoS currently works with ml2 only (SR-IOV, Open vSwitch, and linuxbridge
- are drivers that are enabled for QoS in Mitaka release).
-
-Trusted projects policy.json configuration
-------------------------------------------
-
-If projects are trusted to administrate their own QoS policies in
-your cloud, neutron's file ``policy.json`` can be modified to allow this.
-
-Modify ``/etc/neutron/policy.json`` policy entries as follows:
-
-.. code-block:: none
-
- "get_policy": "rule:regular_user",
- "create_policy": "rule:regular_user",
- "update_policy": "rule:regular_user",
- "delete_policy": "rule:regular_user",
- "get_rule_type": "rule:regular_user",
-
-To enable bandwidth limit rule:
-
-.. code-block:: none
-
- "get_policy_bandwidth_limit_rule": "rule:regular_user",
- "create_policy_bandwidth_limit_rule": "rule:regular_user",
- "delete_policy_bandwidth_limit_rule": "rule:regular_user",
- "update_policy_bandwidth_limit_rule": "rule:regular_user",
-
-To enable DSCP marking rule:
-
-.. code-block:: none
-
- "get_policy_dscp_marking_rule": "rule:regular_user",
- "create_dscp_marking_rule": "rule:regular_user",
- "delete_dscp_marking_rule": "rule:regular_user",
- "update_dscp_marking_rule": "rule:regular_user",
-
-To enable minimum bandwidth rule:
-
-.. code-block:: none
-
- "get_policy_minimum_bandwidth_rule": "rule:regular_user",
- "create_policy_minimum_bandwidth_rule": "rule:regular_user",
- "delete_policy_minimum_bandwidth_rule": "rule:regular_user",
- "update_policy_minimum_bandwidth_rule": "rule:regular_user",
-
-User workflow
-~~~~~~~~~~~~~
-
-QoS policies are only created by admins with the default ``policy.json``.
-Therefore, you should have the cloud operator set them up on
-behalf of the cloud projects.
-
-If projects are trusted to create their own policies, check the trusted
-projects ``policy.json`` configuration section.
-
-First, create a QoS policy and its bandwidth limit rule:
-
-.. code-block:: console
-
- $ openstack network qos policy create bw-limiter
-
- Created a new policy:
- +-------------+--------------------------------------+
- | Field | Value |
- +-------------+--------------------------------------+
- | description | |
- | id | 5df855e9-a833-49a3-9c82-c0839a5f103f |
- | name | qos1 |
- | project_id | 4db7c1ed114a4a7fb0f077148155c500 |
- | rules | [] |
- | shared | False |
- +-------------+--------------------------------------+
-
- $ openstack network qos rule create --type bandwidth-limit --max-kbps 3000 \
- --max-burst-kbits 300 --egress bw-limiter
-
- Created a new bandwidth_limit_rule:
- +----------------+--------------------------------------+
- | Field | Value |
- +----------------+--------------------------------------+
- | direction | egress |
- | id | 92ceb52f-170f-49d0-9528-976e2fee2d6f |
- | max_burst_kbps | 300 |
- | max_kbps | 3000 |
- +----------------+--------------------------------------+
-
-.. note::
-
- The QoS implementation requires a burst value to ensure proper behavior of
- bandwidth limit rules in the Open vSwitch and Linux bridge agents. If you
- do not provide a value, it defaults to 80% of the bandwidth limit which
- works for typical TCP traffic.
-
-Second, associate the created policy with an existing neutron port.
-In order to do this, user extracts the port id to be associated to
-the already created policy. In the next example, we will assign the
-``bw-limiter`` policy to the VM with IP address ``192.0.2.1``.
-
-.. code-block:: console
-
- $ openstack port list
-
- +--------------------------------------+-----------------------------------+
- | ID | Fixed IP Addresses |
- +--------------------------------------+-----------------------------------+
- | 0271d1d9-1b16-4410-bd74-82cdf6dcb5b3 | { ... , "ip_address": "192.0.2.1"}|
- | 88101e57-76fa-4d12-b0e0-4fc7634b874a | { ... , "ip_address": "192.0.2.3"}|
- | e04aab6a-5c6c-4bd9-a600-33333551a668 | { ... , "ip_address": "192.0.2.2"}|
- +--------------------------------------+-----------------------------------+
-
- $ openstack port set --qos-policy bw-limiter \
- 88101e57-76fa-4d12-b0e0-4fc7634b874a
- Updated port: 88101e57-76fa-4d12-b0e0-4fc7634b874a
-
-In order to detach a port from the QoS policy, simply update again the
-port configuration.
-
-.. code-block:: console
-
- $ openstack port unset --no-qos-policy 88101e57-76fa-4d12-b0e0-4fc7634b874a
- Updated port: 88101e57-76fa-4d12-b0e0-4fc7634b874a
-
-
-Ports can be created with a policy attached to them too.
-
-.. code-block:: console
-
- $ openstack port create --qos-policy bw-limiter --network private port1
-
- Created a new port:
- +-----------------------+--------------------------------------------------+
- | Field | Value |
- +-----------------------+--------------------------------------------------+
- | admin_state_up | UP |
- | allowed_address_pairs | |
- | binding_host_id | |
- | binding_profile | |
- | binding_vif_details | |
- | binding_vif_type | unbound |
- | binding_vnic_type | normal |
- | created_at | 2017-05-15T08:43:00Z |
- | description | |
- | device_id | |
- | device_owner | |
- | dns_assignment | None |
- | dns_name | None |
- | extra_dhcp_opts | |
- | fixed_ips | ip_address='10.0.10.4', subnet_id='292f8c1e-...' |
- | id | f51562ee-da8d-42de-9578-f6f5cb248226 |
- | ip_address | None |
- | mac_address | fa:16:3e:d9:f2:ba |
- | name | port1 |
- | network_id | 55dc2f70-0f92-4002-b343-ca34277b0234 |
- | option_name | None |
- | option_value | None |
- | port_security_enabled | False |
- | project_id | 4db7c1ed114a4a7fb0f077148155c500 |
- | qos_policy_id | 5df855e9-a833-49a3-9c82-c0839a5f103f |
- | revision_number | 6 |
- | security_group_ids | 0531cc1a-19d1-4cc7-ada5-49f8b08245be |
- | status | DOWN |
- | subnet_id | None |
- | updated_at | 2017-05-15T08:43:00Z |
- +-----------------------+--------------------------------------------------+
-
-
-You can attach networks to a QoS policy. The meaning of this is that
-any compute port connected to the network will use the network policy by
-default unless the port has a specific policy attached to it. Internal network
-owned ports like DHCP and internal router ports are excluded from network
-policy application.
-
-In order to attach a QoS policy to a network, update an existing
-network, or initially create the network attached to the policy.
-
-.. code-block:: console
-
- $ openstack network set --qos-policy bw-limiter private
- Updated network: private
-
-.. note::
-
- Configuring the proper burst value is very important. If the burst value is
- set too low, bandwidth usage will be throttled even with a proper bandwidth
- limit setting. This issue is discussed in various documentation sources, for
- example in `Juniper's documentation
- `_.
- Burst value for TCP traffic can be set as 80% of desired bandwidth limit
- value. For example, if the bandwidth limit is set to 1000kbps then enough
- burst value will be 800kbit. If the configured burst value is too low,
- achieved bandwidth limit will be lower than expected. If the configured burst
- value is too high, too few packets could be limited and achieved bandwidth
- limit would be higher than expected.
-
-Administrator enforcement
--------------------------
-
-Administrators are able to enforce policies on project ports or networks.
-As long as the policy is not shared, the project is not be able to detach
-any policy attached to a network or port.
-
-If the policy is shared, the project is able to attach or detach such
-policy from its own ports and networks.
-
-
-Rule modification
------------------
-You can modify rules at runtime. Rule modifications will be propagated to any
-attached port.
-
-.. code-block:: console
-
- $ openstack network qos rule set --max-kbps 2000 --max-burst-kbps 200 \
- --ingress bw-limiter 92ceb52f-170f-49d0-9528-976e2fee2d6f
- Updated bandwidth_limit_rule: 92ceb52f-170f-49d0-9528-976e2fee2d6f
-
- $ openstack network qos rule show \
- bw-limiter 92ceb52f-170f-49d0-9528-976e2fee2d6f
-
- +----------------+--------------------------------------+
- | Field | Value |
- +----------------+--------------------------------------+
- | direction | ingress |
- | id | 92ceb52f-170f-49d0-9528-976e2fee2d6f |
- | max_burst_kbps | 200 |
- | max_kbps | 2000 |
- +----------------+--------------------------------------+
-
-Just like with bandwidth limiting, create a policy for DSCP marking rule:
-
-.. code-block:: console
-
- $ openstack network qos policy create dscp-marking
-
- +-------------+--------------------------------------+
- | Field | Value |
- +-------------+--------------------------------------+
- | description | |
- | id | d1f90c76-fbe8-4d6f-bb87-a9aea997ed1e |
- | name | dscp-marking |
- | project_id | 4db7c1ed114a4a7fb0f077148155c500 |
- | rules | [] |
- | shared | False |
- +-------------+--------------------------------------+
-
-You can create, update, list, delete, and show DSCP markings
-with the neutron client:
-
-.. code-block:: console
-
- $ openstack network qos rule create --type dscp-marking --dscp-mark 26 \
- dscp-marking
-
- Created a new dscp marking rule
- +----------------+--------------------------------------+
- | Field | Value |
- +----------------+--------------------------------------+
- | id | 115e4f70-8034-4176-8fe9-2c47f8878a7d |
- | dscp_mark | 26 |
- +----------------+--------------------------------------+
-
-.. code-block:: console
-
- $ openstack network qos rule set --dscp-mark 22 \
- dscp-marking 115e4f70-8034-4176-8fe9-2c47f8878a7d
- Updated dscp_rule: 115e4f70-8034-4176-8fe9-2c47f8878a7d
-
- $ openstack network qos rule list dscp-marking
-
- +--------------------------------------+----------------------------------+
- | ID | DSCP Mark |
- +--------------------------------------+----------------------------------+
- | 115e4f70-8034-4176-8fe9-2c47f8878a7d | 22 |
- +--------------------------------------+----------------------------------+
-
- $ openstack network qos rule show \
- dscp-marking 115e4f70-8034-4176-8fe9-2c47f8878a7d
-
- +----------------+--------------------------------------+
- | Field | Value |
- +----------------+--------------------------------------+
- | id | 115e4f70-8034-4176-8fe9-2c47f8878a7d |
- | dscp_mark | 22 |
- +----------------+--------------------------------------+
-
- $ openstack network qos rule delete \
- dscp-marking 115e4f70-8034-4176-8fe9-2c47f8878a7d
- Deleted dscp_rule: 115e4f70-8034-4176-8fe9-2c47f8878a7d
-
-You can also include minimum bandwidth rules in your policy:
-
-.. code-block:: console
-
- $ openstack network qos policy create bandwidth-control
- +-------------+--------------------------------------+
- | Field | Value |
- +-------------+--------------------------------------+
- | description | |
- | id | 8491547e-add1-4c6c-a50e-42121237256c |
- | name | bandwidth-control |
- | project_id | 7cc5a84e415d48e69d2b06aa67b317d8 |
- | rules | [] |
- | shared | False |
- +-------------+--------------------------------------+
-
- $ openstack network qos rule create \
- --type minimum-bandwidth --min-kbps 1000 --egress bandwidth-control
- +------------+--------------------------------------+
- | Field | Value |
- +------------+--------------------------------------+
- | direction | egress |
- | id | da858b32-44bc-43c9-b92b-cf6e2fa836ab |
- | min_kbps | 1000 |
- | name | None |
- | project_id | |
- +------------+--------------------------------------+
-
-A policy with a minimum bandwidth ensures best efforts are made to provide
-no less than the specified bandwidth to each port on which the rule is
-applied. However, as this feature is not yet integrated with the Compute
-scheduler, minimum bandwidth cannot be guaranteed.
-
-It is also possible to combine several rules in one policy:
-
-.. code-block:: console
-
- $ openstack network qos rule create --type bandwidth-limit \
- --max-kbps 50000 --max-burst-kbits 50000 bandwidth-control
- +----------------+--------------------------------------+
- | Field | Value |
- +----------------+--------------------------------------+
- | id | 0db48906-a762-4d32-8694-3f65214c34a6 |
- | max_burst_kbps | 50000 |
- | max_kbps | 50000 |
- | name | None |
- | project_id | |
- +----------------+--------------------------------------+
-
- $ openstack network qos policy show bandwidth-control
- +-------------+-------------------------------------------------------------------+
- | Field | Value |
- +-------------+-------------------------------------------------------------------+
- | description | |
- | id | 8491547e-add1-4c6c-a50e-42121237256c |
- | name | bandwidth-control |
- | project_id | 7cc5a84e415d48e69d2b06aa67b317d8 |
- | rules | [{u'max_kbps': 50000, u'type': u'bandwidth_limit', |
- | | u'id': u'0db48906-a762-4d32-8694-3f65214c34a6', |
- | | u'max_burst_kbps': 50000, |
- | | u'qos_policy_id': u'8491547e-add1-4c6c-a50e-42121237256c'}, |
- | | {u'direction': |
- | | u'egress', u'min_kbps': 1000, u'type': u'minimum_bandwidth', |
- | | u'id': u'da858b32-44bc-43c9-b92b-cf6e2fa836ab', |
- | | u'qos_policy_id': u'8491547e-add1-4c6c-a50e-42121237256c'}] |
- | shared | False |
- +-------------+-------------------------------------------------------------------+
diff --git a/doc/networking-guide/source/config-rbac.rst b/doc/networking-guide/source/config-rbac.rst
deleted file mode 100644
index 48388effa3..0000000000
--- a/doc/networking-guide/source/config-rbac.rst
+++ /dev/null
@@ -1,458 +0,0 @@
-.. _config-rbac:
-
-================================
-Role-Based Access Control (RBAC)
-================================
-
-The Role-Based Access Control (RBAC) policy framework enables both operators
-and users to grant access to resources for specific projects.
-
-
-Supported objects for sharing with specific projects
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Currently, the access that can be granted using this feature
-is supported by:
-
-* Regular port creation permissions on networks (since Liberty).
-* Binding QoS policies permissions to networks or ports (since Mitaka).
-* Attaching router gateways to networks (since Mitaka).
-
-
-Sharing an object with specific projects
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Sharing an object with a specific project is accomplished by creating
-a policy entry that permits the target project the ``access_as_shared``
-action on that object.
-
-
-Sharing a network with specific projects
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Create a network to share:
-
-.. code-block:: console
-
- $ openstack network create secret_network
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2017-01-25T20:16:40Z |
- | description | |
- | dns_domain | None |
- | id | f55961b9-3eb8-42eb-ac96-b97038b568de |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | mtu | 1450 |
- | name | secret_network |
- | port_security_enabled | True |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 9 |
- | qos_policy_id | None |
- | revision_number | 3 |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | updated_at | 2017-01-25T20:16:40Z |
- +---------------------------+--------------------------------------+
-
-
-Create the policy entry using the :command:`openstack network rbac create`
-command (in this example, the ID of the project we want to share with is
-``b87b2fc13e0248a4a031d38e06dc191d``):
-
-.. code-block:: console
-
- $ openstack network rbac create --target-project \
- b87b2fc13e0248a4a031d38e06dc191d --action access_as_shared \
- --type network f55961b9-3eb8-42eb-ac96-b97038b568de
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | action | access_as_shared |
- | id | f93efdbf-f1e0-41d2-b093-8328959d469e |
- | name | None |
- | object_id | f55961b9-3eb8-42eb-ac96-b97038b568de |
- | object_type | network |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | target_project_id | b87b2fc13e0248a4a031d38e06dc191d |
- +-------------------+--------------------------------------+
-
-
-The ``target-project`` parameter specifies the project that requires
-access to the network. The ``action`` parameter specifies what
-the project is allowed to do. The ``type`` parameter says
-that the target object is a network. The final parameter is the ID of
-the network we are granting access to.
-
-Project ``b87b2fc13e0248a4a031d38e06dc191d`` will now be able to see
-the network when running :command:`openstack network list` and
-:command:`openstack network show` and will also be able to create ports
-on that network. No other users (other than admins and the owner)
-will be able to see the network.
-
-To remove access for that project, delete the policy that allows
-it using the :command:`openstack network rbac delete` command:
-
-.. code-block:: console
-
- $ openstack network rbac delete f93efdbf-f1e0-41d2-b093-8328959d469e
-
-If that project has ports on the network, the server will prevent the
-policy from being deleted until the ports have been deleted:
-
-.. code-block:: console
-
- $ openstack network rbac delete f93efdbf-f1e0-41d2-b093-8328959d469e
- RBAC policy on object f93efdbf-f1e0-41d2-b093-8328959d469e
- cannot be removed because other objects depend on it.
-
-This process can be repeated any number of times to share a network
-with an arbitrary number of projects.
-
-
-Sharing a QoS policy with specific projects
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Create a QoS policy to share:
-
-.. code-block:: console
-
- $ openstack network qos policy create secret_policy
- +-------------+--------------------------------------+
- | Field | Value |
- +-------------+--------------------------------------+
- | description | |
- | id | 1f730d69-1c45-4ade-a8f2-89070ac4f046 |
- | name | secret_policy |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | rules | [] |
- | shared | False |
- +-------------+--------------------------------------+
-
-
-Create the RBAC policy entry using the :command:`openstack network rbac create`
-command (in this example, the ID of the project we want to share with is
-``be98b82f8fdf46b696e9e01cebc33fd9``):
-
-.. code-block:: console
-
- $ openstack network rbac create --target-project \
- be98b82f8fdf46b696e9e01cebc33fd9 --action access_as_shared \
- --type qos_policy 1f730d69-1c45-4ade-a8f2-89070ac4f046
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | action | access_as_shared |
- | id | 8828e38d-a0df-4c78-963b-e5f215d3d550 |
- | name | None |
- | object_id | 1f730d69-1c45-4ade-a8f2-89070ac4f046 |
- | object_type | qos_policy |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | target_project_id | be98b82f8fdf46b696e9e01cebc33fd9 |
- +-------------------+--------------------------------------+
-
-
-The ``target-project`` parameter specifies the project that requires
-access to the QoS policy. The ``action`` parameter specifies what
-the project is allowed to do. The ``type`` parameter says
-that the target object is a QoS policy. The final parameter is the ID of
-the QoS policy we are granting access to.
-
-Project ``be98b82f8fdf46b696e9e01cebc33fd9`` will now be able to see
-the QoS policy when running :command:`openstack network qos policy list` and
-:command:`openstack network qos policy show` and will also be able to bind
-it to its ports or networks. No other users (other than admins and the owner)
-will be able to see the QoS policy.
-
-To remove access for that project, delete the RBAC policy that allows
-it using the :command:`openstack network rbac delete` command:
-
-.. code-block:: console
-
- $ openstack network rbac delete 8828e38d-a0df-4c78-963b-e5f215d3d550
-
-If that project has ports or networks with the QoS policy applied to them,
-the server will not delete the RBAC policy until
-the QoS policy is no longer in use:
-
-.. code-block:: console
-
- $ openstack network rbac delete 8828e38d-a0df-4c78-963b-e5f215d3d550
- RBAC policy on object 8828e38d-a0df-4c78-963b-e5f215d3d550
- cannot be removed because other objects depend on it.
-
-This process can be repeated any number of times to share a qos-policy
-with an arbitrary number of projects.
-
-
-How the 'shared' flag relates to these entries
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As introduced in other guide entries, neutron provides a means of
-making an object (``network``, ``qos-policy``) available to every project.
-This is accomplished using the ``shared`` flag on the supported object:
-
-.. code-block:: console
-
- $ openstack network create global_network --share
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2017-01-25T20:32:06Z |
- | description | |
- | dns_domain | None |
- | id | 84a7e627-573b-49da-af66-c9a65244f3ce |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | mtu | 1450 |
- | name | global_network |
- | port_security_enabled | True |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 7 |
- | qos_policy_id | None |
- | revision_number | 3 |
- | router:external | Internal |
- | segments | None |
- | shared | True |
- | status | ACTIVE |
- | subnets | |
- | updated_at | 2017-01-25T20:32:07Z |
- +---------------------------+--------------------------------------+
-
-
-This is the equivalent of creating a policy on the network that permits
-every project to perform the action ``access_as_shared`` on that network.
-Neutron treats them as the same thing, so the policy entry for that
-network should be visible using the :command:`openstack network rbac list`
-command:
-
-.. code-block:: console
-
- $ openstack network rbac list
- +-------------------------------+-------------+--------------------------------+
- | ID | Object Type | Object ID |
- +-------------------------------+-------------+--------------------------------+
- | 58a5ee31-2ad6-467d- | qos_policy | 1f730d69-1c45-4ade- |
- | 8bb8-8c2ae3dd1382 | | a8f2-89070ac4f046 |
- | 27efbd79-f384-4d89-9dfc- | network | 84a7e627-573b-49da- |
- | 6c4a606ceec6 | | af66-c9a65244f3ce |
- +-------------------------------+-------------+--------------------------------+
-
-
-Use the :command:`neutron rbac-show` command to see the details:
-
-.. code-block:: console
-
- $ openstack network rbac show 27efbd79-f384-4d89-9dfc-6c4a606ceec6
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | action | access_as_shared |
- | id | 27efbd79-f384-4d89-9dfc-6c4a606ceec6 |
- | name | None |
- | object_id | 84a7e627-573b-49da-af66-c9a65244f3ce |
- | object_type | network |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | target_project_id | * |
- +-------------------+--------------------------------------+
-
-
-The output shows that the entry allows the action ``access_as_shared``
-on object ``84a7e627-573b-49da-af66-c9a65244f3ce`` of type ``network``
-to target_tenant ``*``, which is a wildcard that represents all projects.
-
-Currently, the ``shared`` flag is just a mapping to the underlying
-RBAC policies for a network. Setting the flag to ``True`` on a network
-creates a wildcard RBAC entry. Setting it to ``False`` removes the
-wildcard entry.
-
-When you run :command:`openstack network list` or
-:command:`openstack network show`, the ``shared`` flag is calculated by the
-server based on the calling project and the RBAC entries for each network.
-For QoS objects use :command:`openstack network qos policy list` or
-:command:`openstack network qos policy show` respectively.
-If there is a wildcard entry, the ``shared`` flag is always set to ``True``.
-If there are only entries that share with specific projects, only
-the projects the object is shared to will see the flag as ``True``
-and the rest will see the flag as ``False``.
-
-
-Allowing a network to be used as an external network
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To make a network available as an external network for specific projects
-rather than all projects, use the ``access_as_external`` action.
-
-#. Create a network that you want to be available as an external network:
-
- .. code-block:: console
-
- $ openstack network create secret_external_network
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2017-01-25T20:36:59Z |
- | description | |
- | dns_domain | None |
- | id | 802d4e9e-4649-43e6-9ee2-8d052a880cfb |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | mtu | 1450 |
- | name | secret_external_network |
- | port_security_enabled | True |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | proider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 21 |
- | qos_policy_id | None |
- | revision_number | 3 |
- | router:external | Internal |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | updated_at | 2017-01-25T20:36:59Z |
- +---------------------------+--------------------------------------+
-
-
-#. Create a policy entry using the :command:`openstack network rbac create`
- command (in this example, the ID of the project we want to share with is
- ``838030a7bf3c4d04b4b054c0f0b2b17c``):
-
- .. code-block:: console
-
- $ openstack network rbac create --target-project \
- 838030a7bf3c4d04b4b054c0f0b2b17c --action access_as_external \
- --type network 802d4e9e-4649-43e6-9ee2-8d052a880cfb
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | action | access_as_external |
- | id | afdd5b8d-b6f5-4a15-9817-5231434057be |
- | name | None |
- | object_id | 802d4e9e-4649-43e6-9ee2-8d052a880cfb |
- | object_type | network |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | target_project_id | 838030a7bf3c4d04b4b054c0f0b2b17c |
- +-------------------+--------------------------------------+
-
-
-The ``target-project`` parameter specifies the project that requires
-access to the network. The ``action`` parameter specifies what
-the project is allowed to do. The ``type`` parameter indicates
-that the target object is a network. The final parameter is the ID of
-the network we are granting external access to.
-
-Now project ``838030a7bf3c4d04b4b054c0f0b2b17c`` is able to see
-the network when running :command:`openstack network list`
-and :command:`openstack network show` and can attach router gateway
-ports to that network. No other users (other than admins
-and the owner) are able to see the network.
-
-To remove access for that project, delete the policy that allows
-it using the :command:`openstack network rbac delete` command:
-
-.. code-block:: console
-
- $ openstack network rbac delete afdd5b8d-b6f5-4a15-9817-5231434057be
-
-If that project has router gateway ports attached to that network,
-the server prevents the policy from being deleted until the
-ports have been deleted:
-
-.. code-block:: console
-
- $ openstack network rbac delete afdd5b8d-b6f5-4a15-9817-5231434057be
- RBAC policy on object afdd5b8d-b6f5-4a15-9817-5231434057be
- cannot be removed because other objects depend on it.
-
-This process can be repeated any number of times to make a network
-available as external to an arbitrary number of projects.
-
-If a network is marked as external during creation, it now implicitly
-creates a wildcard RBAC policy granting everyone access to preserve
-previous behavior before this feature was added.
-
-.. code-block:: console
-
- $ openstack network create global_external_network --external
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | created_at | 2017-01-25T20:41:44Z |
- | description | |
- | dns_domain | None |
- | id | 72a257a2-a56e-4ac7-880f-94a4233abec6 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | is_default | None |
- | mtu | 1450 |
- | name | global_external_network |
- | port_security_enabled | True |
- | project_id | 61b7eba037fd41f29cfba757c010faff |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 69 |
- | qos_policy_id | None |
- | revision_number | 4 |
- | router:external | External |
- | segments | None |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | updated_at | 2017-01-25T20:41:44Z |
- +---------------------------+--------------------------------------+
-
-
-In the output above the standard ``router:external`` attribute is
-``External`` as expected. Now a wildcard policy is visible in the
-RBAC policy listings:
-
-.. code-block:: console
-
- $ openstack network rbac list --long -c ID -c Action
- +--------------------------------------+--------------------+
- | ID | Action |
- +--------------------------------------+--------------------+
- | b694e541-bdca-480d-94ec-eda59ab7d71a | access_as_external |
- +--------------------------------------+--------------------+
-
-
-You can modify or delete this policy with the same constraints
-as any other RBAC ``access_as_external`` policy.
-
-
-Preventing regular users from sharing objects with each other
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The default ``policy.json`` file will not allow regular
-users to share objects with every other project using a wildcard;
-however, it will allow them to share objects with specific project
-IDs.
-
-If an operator wants to prevent normal users from doing this, the
-``"create_rbac_policy":`` entry in ``policy.json`` can be adjusted
-from ``""`` to ``"rule:admin_only"``.
diff --git a/doc/networking-guide/source/config-routed-networks.rst b/doc/networking-guide/source/config-routed-networks.rst
deleted file mode 100644
index bfe5178ee1..0000000000
--- a/doc/networking-guide/source/config-routed-networks.rst
+++ /dev/null
@@ -1,450 +0,0 @@
-.. _config-routed-provider-networks:
-
-========================
-Routed provider networks
-========================
-
-.. note::
-
- Use of this feature requires the OpenStack client
- version 3.3 or newer.
-
-Before routed provider networks, the Networking service could not present a
-multi-segment layer-3 network as a single entity. Thus, each operator typically
-chose one of the following architectures:
-
-* Single large layer-2 network
-* Multiple smaller layer-2 networks
-
-Single large layer-2 networks become complex at scale and involve significant
-failure domains.
-
-Multiple smaller layer-2 networks scale better and shrink failure domains, but
-leave network selection to the user. Without additional information, users
-cannot easily differentiate these networks.
-
-A routed provider network enables a single provider network to represent
-multiple layer-2 networks (broadcast domains) or segments and enables the
-operator to present one network to users. However, the particular IP
-addresses available to an instance depend on the segment of the network
-available on the particular compute node.
-
-Similar to conventional networking, layer-2 (switching) handles transit of
-traffic between ports on the same segment and layer-3 (routing) handles
-transit of traffic between segments.
-
-Each segment requires at least one subnet that explicitly belongs to that
-segment. The association between a segment and a subnet distinguishes a
-routed provider network from other types of networks. The Networking service
-enforces that either zero or all subnets on a particular network associate
-with a segment. For example, attempting to create a subnet without a segment
-on a network containing subnets with segments generates an error.
-
-The Networking service does not provide layer-3 services between segments.
-Instead, it relies on physical network infrastructure to route subnets.
-Thus, both the Networking service and physical network infrastructure must
-contain configuration for routed provider networks, similar to conventional
-provider networks. In the future, implementation of dynamic routing protocols
-may ease configuration of routed networks.
-
-Prerequisites
-~~~~~~~~~~~~~
-
-Routed provider networks require additional prerequisites over conventional
-provider networks. We recommend using the following procedure:
-
-#. Begin with segments. The Networking service defines a segment using the
- following components:
-
- * Unique physical network name
- * Segmentation type
- * Segmentation ID
-
- For example, ``provider1``, ``VLAN``, and ``2016``. See the
- `API reference `__
- for more information.
-
- Within a network, use a unique physical network name for each segment which
- enables reuse of the same segmentation details between subnets. For
- example, using the same VLAN ID across all segments of a particular
- provider network. Similar to conventional provider networks, the operator
- must provision the layer-2 physical network infrastructure accordingly.
-
-#. Implement routing between segments.
-
- The Networking service does not provision routing among segments. The
- operator must implement routing among segments of a provider network.
- Each subnet on a segment must contain the gateway address of the
- router interface on that particular subnet. For example:
-
- =========== ======= ======================= =====================
- Segment Version Addresses Gateway
- =========== ======= ======================= =====================
- segment1 4 203.0.113.0/24 203.0.113.1
- segment1 6 fd00:203:0:113::/64 fd00:203:0:113::1
- segment2 4 198.51.100.0/24 198.51.100.1
- segment2 6 fd00:198:51:100::/64 fd00:198:51:100::1
- =========== ======= ======================= =====================
-
-#. Map segments to compute nodes.
-
- Routed provider networks imply that compute nodes reside on different
- segments. The operator must ensure that every compute host that is supposed
- to participate in a router provider network has direct connectivity to one
- of its segments.
-
- =========== ====== ================
- Host Rack Physical Network
- =========== ====== ================
- compute0001 rack 1 segment 1
- compute0002 rack 1 segment 1
- ... ... ...
- compute0101 rack 2 segment 2
- compute0102 rack 2 segment 2
- compute0102 rack 2 segment 2
- ... ... ...
- =========== ====== ================
-
-#. Deploy DHCP agents.
-
- Unlike conventional provider networks, a DHCP agent cannot support more
- than one segment within a network. The operator must deploy at least one
- DHCP agent per segment. Consider deploying DHCP agents on compute nodes
- containing the segments rather than one or more network nodes to reduce
- node count.
-
- =========== ====== ================
- Host Rack Physical Network
- =========== ====== ================
- network0001 rack 1 segment 1
- network0002 rack 2 segment 2
- ... ... ...
- =========== ====== ================
-
-#. Configure communication of the Networking service with the Compute
- scheduler.
-
- An instance with an interface with an IPv4 address in a routed provider
- network must be placed by the Compute scheduler in a host that has access to
- a segment with available IPv4 addresses. To make this possible, the
- Networking service communicates to the Compute scheduler the inventory of
- IPv4 addresses associated with each segment of a routed provider network.
- The operator must configure the authentication credentials that the
- Networking service will use to communicate with the Compute scheduler's
- placement API. Please see below an example configuration.
-
- .. note::
-
- Coordination between the Networking service and the Compute scheduler is
- not necessary for IPv6 subnets as a consequence of their large address
- spaces.
-
- .. note::
-
- The coordination between the Networking service and the Compute scheduler
- requires the following minimum API micro-versions.
-
- * Compute service API: 2.41
- * Placement API: 1.1
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Controller node
----------------
-
-#. Enable the segments service plug-in by appending ``segments`` to the list
- of ``service_plugins`` in the ``neutron.conf`` file on all nodes running the
- ``neutron-server`` service:
-
- .. code-block:: ini
-
- [DEFAULT]
- # ...
- service_plugins = ..., segments
-
-#. Add a ``placement`` section to the ``neutron.conf`` file with authentication
- credentials for the Compute service placement API:
-
- .. code-block:: ini
-
- [placement]
- auth_uri = http://192.0.2.72/identity
- project_domain_name = Default
- project_name = service
- user_domain_name = Default
- password = apassword
- username = nova
- auth_url = http://192.0.2.72/identity_admin
- auth_type = password
- region_name = RegionOne
-
-#. Restart the ``neutron-server`` service.
-
-Network or compute nodes
-------------------------
-
-* Configure the layer-2 agent on each node to map one or more segments to
- the appropriate physical network bridge or interface and restart the
- agent.
-
-Create a routed provider network
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following steps create a routed provider network with two segments. Each
-segment contains one IPv4 subnet and one IPv6 subnet.
-
-#. Source the administrative project credentials.
-#. Create a VLAN provider network which includes a default segment. In this
- example, the network uses the ``provider1`` physical network with VLAN ID
- 2016.
-
- .. code-block:: console
-
- $ openstack network create --share --provider-physical-network provider1 \
- --provider-network-type vlan --provider-segment 2016 multisegment1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | l2_adjacency | True |
- | mtu | 1500 |
- | name | multisegment1 |
- | port_security_enabled | True |
- | provider:network_type | vlan |
- | provider:physical_network | provider1 |
- | provider:segmentation_id | 2016 |
- | router:external | Internal |
- | shared | True |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- +---------------------------+--------------------------------------+
-
-#. Rename the default segment to ``segment1``.
-
- .. code-block:: console
-
- $ openstack network segment list --network multisegment1
- +--------------------------------------+----------+--------------------------------------+--------------+---------+
- | ID | Name | Network | Network Type | Segment |
- +--------------------------------------+----------+--------------------------------------+--------------+---------+
- | 43e16869-ad31-48e4-87ce-acf756709e18 | None | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | vlan | 2016 |
- +--------------------------------------+----------+--------------------------------------+--------------+---------+
-
- .. code-block:: console
-
- $ openstack network segment set --name segment1 43e16869-ad31-48e4-87ce-acf756709e18
-
- .. note::
-
- This command provides no output.
-
-#. Create a second segment on the provider network. In this example, the
- segment uses the ``provider2`` physical network with VLAN ID 2016.
-
- .. code-block:: console
-
- $ openstack network segment create --physical-network provider2 \
- --network-type vlan --segment 2016 --network multisegment1 segment2
- +------------------+--------------------------------------+
- | Field | Value |
- +------------------+--------------------------------------+
- | description | None |
- | headers | |
- | id | 053b7925-9a89-4489-9992-e164c8cc8763 |
- | name | segment2 |
- | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 |
- | network_type | vlan |
- | physical_network | provider2 |
- | segmentation_id | 2016 |
- +------------------+--------------------------------------+
-
-#. Verify that the network contains the ``segment1`` and ``segment2`` segments.
-
- .. code-block:: console
-
- $ openstack network segment list --network multisegment1
- +--------------------------------------+----------+--------------------------------------+--------------+---------+
- | ID | Name | Network | Network Type | Segment |
- +--------------------------------------+----------+--------------------------------------+--------------+---------+
- | 053b7925-9a89-4489-9992-e164c8cc8763 | segment2 | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | vlan | 2016 |
- | 43e16869-ad31-48e4-87ce-acf756709e18 | segment1 | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | vlan | 2016 |
- +--------------------------------------+----------+--------------------------------------+--------------+---------+
-
-#. Create subnets on the ``segment1`` segment. In this example, the IPv4
- subnet uses 203.0.113.0/24 and the IPv6 subnet uses fd00:203:0:113::/64.
-
- .. code-block:: console
-
- $ openstack subnet create \
- --network multisegment1 --network-segment segment1 \
- --ip-version 4 --subnet-range 203.0.113.0/24 \
- multisegment1-segment1-v4
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 203.0.113.2-203.0.113.254 |
- | cidr | 203.0.113.0/24 |
- | enable_dhcp | True |
- | gateway_ip | 203.0.113.1 |
- | id | c428797a-6f8e-4cb1-b394-c404318a2762 |
- | ip_version | 4 |
- | name | multisegment1-segment1-v4 |
- | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 |
- | segment_id | 43e16869-ad31-48e4-87ce-acf756709e18 |
- +-------------------+--------------------------------------+
-
- $ openstack subnet create \
- --network multisegment1 --network-segment segment1 \
- --ip-version 6 --subnet-range fd00:203:0:113::/64 \
- --ipv6-address-mode slaac multisegment1-segment1-v6
- +-------------------+------------------------------------------------------+
- | Field | Value |
- +-------------------+------------------------------------------------------+
- | allocation_pools | fd00:203:0:113::2-fd00:203:0:113:ffff:ffff:ffff:ffff |
- | cidr | fd00:203:0:113::/64 |
- | enable_dhcp | True |
- | gateway_ip | fd00:203:0:113::1 |
- | id | e41cb069-9902-4c01-9e1c-268c8252256a |
- | ip_version | 6 |
- | ipv6_address_mode | slaac |
- | ipv6_ra_mode | None |
- | name | multisegment1-segment1-v6 |
- | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 |
- | segment_id | 43e16869-ad31-48e4-87ce-acf756709e18 |
- +-------------------+------------------------------------------------------+
-
- .. note::
-
- By default, IPv6 subnets on provider networks rely on physical network
- infrastructure for stateless address autoconfiguration (SLAAC) and
- router advertisement.
-
-#. Create subnets on the ``segment2`` segment. In this example, the IPv4
- subnet uses 198.51.100.0/24 and the IPv6 subnet uses fd00:198:51:100::/64.
-
- .. code-block:: console
-
- $ openstack subnet create \
- --network multisegment1 --network-segment segment2 \
- --ip-version 4 --subnet-range 198.51.100.0/24 \
- multisegment1-segment2-v4
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 198.51.100.2-198.51.100.254 |
- | cidr | 198.51.100.0/24 |
- | enable_dhcp | True |
- | gateway_ip | 198.51.100.1 |
- | id | 242755c2-f5fd-4e7d-bd7a-342ca95e50b2 |
- | ip_version | 4 |
- | name | multisegment1-segment2-v4 |
- | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 |
- | segment_id | 053b7925-9a89-4489-9992-e164c8cc8763 |
- +-------------------+--------------------------------------+
-
- $ openstack subnet create \
- --network multisegment1 --network-segment segment2 \
- --ip-version 6 --subnet-range fd00:198:51:100::/64 \
- --ipv6-address-mode slaac multisegment1-segment2-v6
- +-------------------+--------------------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------------------+
- | allocation_pools | fd00:198:51:100::2-fd00:198:51:100:ffff:ffff:ffff:ffff |
- | cidr | fd00:198:51:100::/64 |
- | enable_dhcp | True |
- | gateway_ip | fd00:198:51:100::1 |
- | id | b884c40e-9cfe-4d1b-a085-0a15488e9441 |
- | ip_version | 6 |
- | ipv6_address_mode | slaac |
- | ipv6_ra_mode | None |
- | name | multisegment1-segment2-v6 |
- | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 |
- | segment_id | 053b7925-9a89-4489-9992-e164c8cc8763 |
- +-------------------+--------------------------------------------------------+
-
-#. Verify that each IPv4 subnet associates with at least one DHCP agent.
-
- .. code-block:: console
-
- $ neutron dhcp-agent-list-hosting-net multisegment1
- +--------------------------------------+-------------+----------------+-------+
- | id | host | admin_state_up | alive |
- +--------------------------------------+-------------+----------------+-------+
- | c904ed10-922c-4c1a-84fd-d928abaf8f55 | compute0001 | True | :-) |
- | e0b22cc0-d2a6-4f1c-b17c-27558e20b454 | compute0101 | True | :-) |
- +--------------------------------------+-------------+----------------+-------+
-
-#. Verify that inventories were created for each segment IPv4 subnet in the
- Compute service placement API (for the sake of brevity, only one of the
- segments is shown in this example).
-
- .. code-block:: console
-
- $ SEGMENT_ID=053b7925-9a89-4489-9992-e164c8cc8763
- $ curl -s -X GET \
- http://localhost/placement/resource_providers/$SEGMENT_ID/inventories \
- -H "Content-type: application/json" \
- -H "X-Auth-Token: $TOKEN" \
- -H "Openstack-Api-Version: placement 1.1"
- {
- "resource_provider_generation": 1,
- "inventories": {
- "allocation_ratio": 1,
- "total": 254,
- "reserved": 2,
- "step_size": 1,
- "min_unit": 1,
- "max_unit": 1
- }
- }
-
- .. note::
-
- As of the writing of this guide, there is not placement API CLI client,
- so the :command:`curl` command is used for this example.
-
-#. Verify that host aggregates were created for each segment in the Compute
- service (for the sake of brevity, only one of the segments is shown in this
- example).
-
- .. code-block:: console
-
- $ openstack aggregate list
- +----+---------------------------------------------------------+-------------------+
- | Id | Name | Availability Zone |
- +----+---------------------------------------------------------+-------------------+
- | 10 | Neutron segment id 053b7925-9a89-4489-9992-e164c8cc8763 | None |
- +----+---------------------------------------------------------+-------------------+
-
-#. Launch one or more instances. Each instance obtains IP addresses according
- to the segment it uses on the particular compute node.
-
- .. note::
-
- Creating a port and passing it to an instance yields a different
- behavior than conventional networks. The Networking service
- defers assignment of IP addresses to the port until the particular
- compute node becomes apparent. For example:
-
- .. code-block:: console
-
- $ openstack port create --network multisegment1 port1
- +-----------------------+--------------------------------------+
- | Field | Value |
- +-----------------------+--------------------------------------+
- | admin_state_up | UP |
- | binding_vnic_type | normal |
- | id | 6181fb47-7a74-4add-9b6b-f9837c1c90c4 |
- | ip_allocation | deferred |
- | mac_address | fa:16:3e:34:de:9b |
- | name | port1 |
- | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 |
- | port_security_enabled | True |
- | security_groups | e4fcef0d-e2c5-40c3-a385-9c33ac9289c5 |
- | status | DOWN |
- +-----------------------+--------------------------------------+
diff --git a/doc/networking-guide/source/config-service-subnets.rst b/doc/networking-guide/source/config-service-subnets.rst
deleted file mode 100644
index 585077f3a5..0000000000
--- a/doc/networking-guide/source/config-service-subnets.rst
+++ /dev/null
@@ -1,338 +0,0 @@
-.. _config-service-subnets:
-
-===============
-Service subnets
-===============
-
-Service subnets enable operators to define valid port types for each
-subnet on a network without limiting networks to one subnet or manually
-creating ports with a specific subnet ID. Using this feature, operators
-can ensure that ports for instances and router interfaces, for example,
-always use different subnets.
-
-Operation
-~~~~~~~~~
-
-Define one or more service types for one or more subnets on a particular
-network. Each service type must correspond to a valid device owner within
-the port model in order for it to be used.
-
-During IP allocation, the :ref:`IPAM ` driver returns an
-address from a subnet with a service type matching the port device
-owner. If no subnets match, or all matching subnets lack available IP
-addresses, the IPAM driver attempts to use a subnet without any service
-types to preserve compatibility. If all subnets on a network have a
-service type, the IPAM driver cannot preserve compatibility. However, this
-feature enables strict IP allocation from subnets with a matching device
-owner. If multiple subnets contain the same service type, or a subnet
-without a service type exists, the IPAM driver selects the first subnet
-with a matching service type. For example, a floating IP agent gateway port
-uses the following selection process:
-
-* ``network:floatingip_agent_gateway``
-* ``None``
-
-.. note::
-
- Ports with the device owner ``network:dhcp`` are exempt from the above IPAM
- logic for subnets with ``dhcp_enabled`` set to ``True``. This preserves the
- existing automatic DHCP port creation behaviour for DHCP-enabled subnets.
-
-Creating or updating a port with a specific subnet skips this selection
-process and explicitly uses the given subnet.
-
-Usage
-~~~~~
-
-.. note::
-
- Creating a subnet with a service type requires administrative
- privileges.
-
-Example 1 - Proof-of-concept
-----------------------------
-
-This following example is not typical of an actual deployment. It is shown
-to allow users to experiment with configuring service subnets.
-
-#. Create a network.
-
- .. code-block:: console
-
- $ openstack network create demo-net1
- +---------------------------+--------------------------------------+
- | Field | Value |
- +---------------------------+--------------------------------------+
- | admin_state_up | UP |
- | availability_zone_hints | |
- | availability_zones | |
- | description | |
- | headers | |
- | id | b5b729d8-31cc-4d2c-8284-72b3291fec02 |
- | ipv4_address_scope | None |
- | ipv6_address_scope | None |
- | mtu | 1450 |
- | name | demo-net1 |
- | port_security_enabled | True |
- | project_id | a3db43cd0f224242a847ab84d091217d |
- | provider:network_type | vxlan |
- | provider:physical_network | None |
- | provider:segmentation_id | 110 |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- | subnets | |
- | tags | [] |
- +---------------------------+--------------------------------------+
-
-#. Create a subnet on the network with one or more service types. For
- example, the ``compute:nova`` service type enables instances to use
- this subnet.
-
- .. code-block:: console
-
- $ openstack subnet create demo-subnet1 --subnet-range 192.0.2.0/24 \
- --service-type 'compute:nova' --network demo-net1
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | id | 6e38b23f-0b27-4e3c-8e69-fd23a3df1935 |
- | ip_version | 4 |
- | cidr | 192.0.2.0/24 |
- | name | demo-subnet1 |
- | network_id | b5b729d8-31cc-4d2c-8284-72b3291fec02 |
- | service_types | ['compute:nova'] |
- | tenant_id | a8b3054cc1214f18b1186b291525650f |
- +-------------------+--------------------------------------+
-
-#. Optionally, create another subnet on the network with a different service
- type. For example, the ``compute:foo`` arbitrary service type.
-
- .. code-block:: console
-
- $ openstack subnet create demo-subnet2 --subnet-range 198.51.100.0/24 \
- --service-type 'compute:foo' --network demo-net1
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | id | ea139dcd-17a3-4f0a-8cca-dff8b4e03f8a |
- | ip_version | 4 |
- | cidr | 198.51.100.0/24 |
- | name | demo-subnet2 |
- | network_id | b5b729d8-31cc-4d2c-8284-72b3291fec02 |
- | service_types | ['compute:foo'] |
- | tenant_id | a8b3054cc1214f18b1186b291525650f |
- +-------------------+--------------------------------------+
-
-#. Launch an instance using the network. For example, using the ``cirros``
- image and ``m1.tiny`` flavor.
-
- .. code-block:: console
-
- $ openstack server create demo-instance1 --flavor m1.tiny \
- --image cirros --nic net-id=b5b729d8-31cc-4d2c-8284-72b3291fec02
- +--------------------------------------+-----------------------------------------------+
- | Field | Value |
- +--------------------------------------+-----------------------------------------------+
- | OS-DCF:diskConfig | MANUAL |
- | OS-EXT-AZ:availability_zone | |
- | OS-EXT-SRV-ATTR:host | None |
- | OS-EXT-SRV-ATTR:hypervisor_hostname | None |
- | OS-EXT-SRV-ATTR:instance_name | instance-00000009 |
- | OS-EXT-STS:power_state | 0 |
- | OS-EXT-STS:task_state | scheduling |
- | OS-EXT-STS:vm_state | building |
- | OS-SRV-USG:launched_at | None |
- | OS-SRV-USG:terminated_at | None |
- | accessIPv4 | |
- | accessIPv6 | |
- | addresses | |
- | adminPass | Fn85skabdxBL |
- | config_drive | |
- | created | 2016-09-19T15:07:42Z |
- | flavor | m1.tiny (1) |
- | hostId | |
- | id | 04222b73-1a6e-4c2a-9af4-ef3d17d521ff |
- | image | cirros (4aaec87d-c655-4856-8618-b2dada3a2b11) |
- | key_name | None |
- | name | demo-instance1 |
- | os-extended-volumes:volumes_attached | [] |
- | progress | 0 |
- | project_id | d44c19e056674381b86430575184b167 |
- | properties | |
- | security_groups | [{u'name': u'default'}] |
- | status | BUILD |
- | updated | 2016-09-19T15:07:42Z |
- | user_id | 331afbeb322d4c559a181e19051ae362 |
- +--------------------------------------+-----------------------------------------------+
-
-#. Check the instance status. The ``Networks`` field contains an IP address
- from the subnet having the ``compute:nova`` service type.
-
- .. code-block:: console
-
- $ openstack server list
- +--------------------------------------+-----------------+---------+---------------------+
- | ID | Name | Status | Networks |
- +--------------------------------------+-----------------+---------+---------------------+
- | 20181f46-5cd2-4af8-9af0-f4cf5c983008 | demo-instance1 | ACTIVE | demo-net1=192.0.2.3 |
- +--------------------------------------+-----------------+---------+---------------------+
-
-Example 2 - DVR configuration
------------------------------
-
-The following example outlines how you can configure service subnets in
-a DVR-enabled deployment, with the goal of minimizing public IP
-address consumption. This example uses three subnets on the same external
-network:
-
-* 192.0.2.0/24 for instance floating IP addresses
-* 198.51.100.0/24 for floating IP agent gateway IPs configured on compute nodes
-* 203.0.113.0/25 for all other IP allocations on the external network
-
-This example uses again the private network, ``demo-net1``
-(b5b729d8-31cc-4d2c-8284-72b3291fec02) which was created in
-`Example 1 - Proof-of-concept`_.
-
-.. note:
-
- The output of the commands is not always shown since it
- is very similar to the above.
-
-#. Create an external network:
-
- .. code-block:: console
-
- $ openstack network create --external demo-ext-net
-
-#. Create a subnet on the external network for the instance floating IP
- addresses. This uses the ``network:floatingip`` service type.
-
- .. code-block:: console
-
- $ openstack subnet create demo-floating-ip-subnet \
- --subnet-range 192.0.2.0/24 --no-dhcp \
- --service-type 'network:floatingip' --network demo-ext-net
-
-#. Create a subnet on the external network for the floating IP agent
- gateway IP addresses, which are configured by DVR on compute nodes.
- This will use the ``network:floatingip_agent_gateway`` service type.
-
- .. code-block:: console
-
- $ openstack subnet create demo-floating-ip-agent-gateway-subnet \
- --subnet-range 198.51.100.0/24 --no-dhcp \
- --service-type 'network:floatingip_agent_gateway' \
- --network demo-ext-net
-
-#. Create a subnet on the external network for all other IP addresses
- allocated on the external network. This will not use any service
- type. It acts as a fall back for allocations that do not match
- either of the above two service subnets.
-
- .. code-block:: console
-
- $ openstack subnet create demo-other-subnet \
- --subnet-range 203.0.113.0/25 --no-dhcp \
- --network demo-ext-net
-
-#. Create a router:
-
- .. code-block:: console
-
- $ openstack router create demo-router
-
-#. Add an interface to the router on demo-subnet1:
-
- .. code-block:: console
-
- $ openstack router add subnet demo-router demo-subnet1
-
-#. Set the external gateway for the router, which will create an
- interface and allocate an IP address on demo-ext-net:
-
- .. code-block:: console
-
- $ neutron router-gateway-set demo-router demo-ext-net
-
-#. Launch an instance on a private network and retrieve the neutron
- port ID that was allocated. As above, use the ``cirros``
- image and ``m1.tiny`` flavor:
-
- .. code-block:: console
-
- $ openstack server create demo-instance1 --flavor m1.tiny \
- --image cirros --nic net-id=b5b729d8-31cc-4d2c-8284-72b3291fec02
- $ openstack port list --server demo-instance1
- +--------------------------------------+------+-------------------+--------------------------------------------------+--------+
- | ID | Name | MAC Address | Fixed IP Addresses | Status |
- +--------------------------------------+------+-------------------+--------------------------------------------------+--------+
- | a752bb24-9bf2-4d37-b9d6-07da69c86f19 | | fa:16:3e:99:54:32 | ip_address='203.0.113.130', | ACTIVE |
- | | | | subnet_id='6e38b23f-0b27-4e3c-8e69-fd23a3df1935' | |
- +--------------------------------------+------+-------------------+--------------------------------------------------+--------+
-
-#. Associate a floating IP with the instance port and verify it was
- allocated an IP address from the correct subnet:
-
- .. code-block:: console
-
- $ openstack floating ip create --port \
- a752bb24-9bf2-4d37-b9d6-07da69c86f19 demo-ext-net
- +---------------------+--------------------------------------+
- | Field | Value |
- +---------------------+--------------------------------------+
- | fixed_ip_address | 203.0.113.130 |
- | floating_ip_address | 192.0.2.12 |
- | floating_network_id | 02d236d5-dad9-4082-bb6b-5245f9f84d13 |
- | id | f15cae7f-5e05-4b19-bd25-4bb71edcf3de |
- | port_id | a752bb24-9bf2-4d37-b9d6-07da69c86f19 |
- | project_id | d44c19e056674381b86430575184b167 |
- | router_id | 5a8ca19f-3703-4f81-bc29-db6bc2f528d6 |
- | status | ACTIVE |
- +---------------------+--------------------------------------+
-
-#. As the `admin` user, verify the neutron routers are allocated IP
- addresses from their correct subnets. Use ``openstack port list``
- to find ports associated with the routers.
-
- First, the router gateway external port:
-
- .. code-block:: console
-
- $ neutron port-show f148ffeb-3c26-4067-bc5f-5c3dfddae2f5
- +-----------------------+--------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+--------------------------------------------------------------------------+
- | admin_state_up | UP |
- | device_id | 5a8ca19f-3703-4f81-bc29-db6bc2f528d6 |
- | device_owner | network:router_gateway |
- | extra_dhcp_opts | |
- | fixed_ips | ip_address='203.0.113.11', |
- | | subnet_id='67c251d9-2b7a-4200-99f6-e13785b0334d' |
- | id | f148ffeb-3c26-4067-bc5f-5c3dfddae2f5 |
- | mac_address | fa:16:3e:2c:0f:69 |
- | network_id | 02d236d5-dad9-4082-bb6b-5245f9f84d13 |
- | project_id | |
- | status | ACTIVE |
- +-----------------------+--------------------------------------------------------------------------+
-
- Second, the router floating IP agent gateway external port:
-
- .. code-block:: console
-
- $ neutron port-show a2d1e756-8ae1-4f96-9aa1-e7ea16a6a68a
- +-----------------------+--------------------------------------------------------------------------+
- | Field | Value |
- +-----------------------+--------------------------------------------------------------------------+
- | admin_state_up | UP |
- | device_id | 3d0c98eb-bca3-45cc-8aa4-90ae3deb0844 |
- | device_owner | network:floatingip_agent_gateway |
- | extra_dhcp_opts | |
- | fixed_ips | ip_address='198.51.100.10', |
- | | subnet_id='67c251d9-2b7a-4200-99f6-e13785b0334d' |
- | id | a2d1e756-8ae1-4f96-9aa1-e7ea16a6a68a |
- | mac_address | fa:16:3e:f4:5d:fa |
- | network_id | 02d236d5-dad9-4082-bb6b-5245f9f84d13 |
- | project_id | |
- | status | ACTIVE |
- +-----------------------+--------------------------------------------------------------------------+
diff --git a/doc/networking-guide/source/config-services-agent.rst b/doc/networking-guide/source/config-services-agent.rst
deleted file mode 100644
index 1299de9625..0000000000
--- a/doc/networking-guide/source/config-services-agent.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-.. _config-services-agent:
-
-===================
-Services and agents
-===================
-
-A usual neutron setup consists of multiple services and agents running on one
-or multiple nodes (though some setups may not need any agents).
-Each of these services provide some of the networking or API services.
-Among those of special interest are:
-
-#. The neutron-server that provides API endpoints and serves as a single point
- of access to the database. It usually runs on the controller nodes.
-#. Layer2 agent that can utilize Open vSwitch, Linux Bridge or other
- vendor-specific technology to provide network segmentation and isolation
- for project networks.
- The L2 agent should run on every node where it is deemed
- responsible for wiring and securing virtual interfaces (usually both
- compute and network nodes).
-#. Layer3 agent that runs on network node and provides east-west and
- north-south routing plus some advanced services such as FWaaS or VPNaaS.
-
-Configuration options
-~~~~~~~~~~~~~~~~~~~~~
-
-The neutron configuration options are segregated between
-neutron-server and agents. Both services and agents may load the main
-``neutron.conf`` since this file should contain the oslo.messaging
-configuration for internal neutron RPCs and may contain host specific
-configuration, such as file paths. The ``neutron.conf`` contains the
-database, keystone, nova credentials, and endpoints strictly for
-neutron-server to use.
-
-In addition, neutron-server may load a plugin-specific configuration file, yet
-the agents should not. As the plugin configuration is primarily site wide
-options and the plugin provides the persistence layer for neutron, agents
-should be instructed to act upon these values through RPC.
-
-Each individual agent may have its own configuration file. This file should be
-loaded after the main ``neutron.conf`` file, so the agent configuration takes
-precedence. The agent-specific configuration may contain configurations which
-vary between hosts in a neutron deployment such as the
-``external_network_bridge`` for an L3 agent. If any agent requires access to
-additional external services beyond the neutron RPC, those endpoints should be
-defined in the agent-specific configuration file (for example, nova metadata
-for metadata agent).
diff --git a/doc/networking-guide/source/config-sfc.rst b/doc/networking-guide/source/config-sfc.rst
deleted file mode 100644
index 2016064b47..0000000000
--- a/doc/networking-guide/source/config-sfc.rst
+++ /dev/null
@@ -1,331 +0,0 @@
-.. _adv-config-sfc:
-
-=========================
-Service function chaining
-=========================
-
-:term:`Service function chain (SFC)` essentially refers to the
-:term:`software-defined networking (SDN)` version of
-:term:`policy-based routing (PBR)`. In many cases, SFC involves security,
-although it can include a variety of other features.
-
-Fundamentally, SFC routes packets through one or more service functions
-instead of conventional routing that routes packets using destination IP
-address. Service functions essentially emulate a series of physical network
-devices with cables linking them together.
-
-A basic example of SFC involves routing packets from one location to another
-through a firewall that lacks a "next hop" IP address from a conventional
-routing perspective. A more complex example involves an ordered series of
-service functions, each implemented using multiple instances (VMs). Packets
-must flow through one instance and a hashing algorithm distributes flows
-across multiple instances at each hop.
-
-Architecture
-~~~~~~~~~~~~
-
-All OpenStack Networking services and OpenStack Compute instances connect to
-a virtual network via ports making it possible to create a traffic steering
-model for service chaining using only ports. Including these ports in a
-port chain enables steering of traffic through one or more instances
-providing service functions.
-
-A port chain, or service function path, consists of the following:
-
-* A set of ports that define the sequence of service functions.
-* A set of flow classifiers that specify the classified traffic flows
- entering the chain.
-
-If a service function involves a pair of ports, the first port acts as the
-ingress port of the service function and the second port acts as the egress
-port. If both ports use the same value, they function as a single virtual
-bidirectional port.
-
-A port chain is a unidirectional service chain. The first port acts as the
-head of the service function chain and the second port acts as the tail of the
-service function chain. A bidirectional service function chain consists of
-two unidirectional port chains.
-
-A flow classifier can only belong to one port chain to prevent ambiguity as
-to which chain should handle packets in the flow. A check prevents such
-ambiguity. However, you can associate multiple flow classifiers with a port
-chain because multiple flows can request the same service function path.
-
-Currently, SFC lacks support for multi-project service functions.
-
-The port chain plug-in supports backing service providers including the OVS
-driver and a variety of SDN controller drivers. The common driver API enables
-different drivers to provide different implementations for the service chain
-path rendering.
-
-.. image:: figures/port-chain-architecture-diagram.png
- :alt: Port chain architecture
-
-.. image:: figures/port-chain-diagram.png
- :alt: Port chain model
-
-See the `developer documentation
-`_ for more information.
-
-Resources
-~~~~~~~~~
-
-Port chain
-----------
-
-* ``id`` - Port chain ID
-* ``tenant_id`` - Project ID
-* ``name`` - Readable name
-* ``description`` - Readable description
-* ``port_pair_groups`` - List of port pair group IDs
-* ``flow_classifiers`` - List of flow classifier IDs
-* ``chain_parameters`` - Dictionary of chain parameters
-
-A port chain consists of a sequence of port pair groups. Each port pair group
-is a hop in the port chain. A group of port pairs represents service functions
-providing equivalent functionality. For example, a group of firewall service
-functions.
-
-A flow classifier identifies a flow. A port chain can contain multiple flow
-classifiers. Omitting the flow classifier effectively prevents steering of
-traffic through the port chain.
-
-The ``chain_parameters`` attribute contains one or more parameters for the
-port chain. Currently, it only supports a correlation parameter that
-defaults to ``mpls`` for consistency with :term:`Open vSwitch` (OVS)
-capabilities. Future values for the correlation parameter may include
-the :term:`network service header (NSH)`.
-
-Port pair group
----------------
-
-* ``id`` - Port pair group ID
-* ``tenant_id`` - Project ID
-* ``name`` - Readable name
-* ``description`` - Readable description
-* ``port_pairs`` - List of service function port pairs
-
-A port pair group may contain one or more port pairs. Multiple port
-pairs enable load balancing/distribution over a set of functionally
-equivalent service functions.
-
-Port pair
----------
-
-* ``id`` - Port pair ID
-* ``tenant_id`` - Project ID
-* ``name`` - Readable name
-* ``description`` - Readable description
-* ``ingress`` - Ingress port
-* ``egress`` - Egress port
-* ``service_function_parameters`` - Dictionary of service function parameters
-
-A port pair represents a service function instance that includes an ingress and
-egress port. A service function containing a bidirectional port uses the same
-ingress and egress port.
-
-The ``service_function_parameters`` attribute includes one or more parameters
-for the service function. Currently, it only supports a correlation parameter
-that determines association of a packet with a chain. This parameter defaults
-to ``none`` for legacy service functions that lack support for correlation such
-as the NSH. If set to ``none``, the data plane implementation must provide
-service function proxy functionality.
-
-Flow classifier
----------------
-
-* ``id`` - Flow classifier ID
-* ``tenant_id`` - Project ID
-* ``name`` - Readable name
-* ``description`` - Readable description
-* ``ethertype`` - Ethertype (IPv4/IPv6)
-* ``protocol`` - IP protocol
-* ``source_port_range_min`` - Minimum source protocol port
-* ``source_port_range_max`` - Maximum source protocol port
-* ``destination_port_range_min`` - Minimum destination protocol port
-* ``destination_port_range_max`` - Maximum destination protocol port
-* ``source_ip_prefix`` - Source IP address or prefix
-* ``destination_ip_prefix`` - Destination IP address or prefix
-* ``logical_source_port`` - Source port
-* ``logical_destination_port`` - Destination port
-* ``l7_parameters`` - Dictionary of L7 parameters
-
-A combination of the source attributes defines the source of the flow. A
-combination of the destination attributes defines the destination of the flow.
-The ``l7_parameters`` attribute is a place holder that may be used to support
-flow classification using layer 7 fields, such as a URL. If unspecified, the
-``logical_source_port`` and ``logical_destination_port`` attributes default to
-``none``, the ``ethertype`` attribute defaults to ``IPv4``, and all other
-attributes default to a wildcard value.
-
-Operations
-~~~~~~~~~~
-
-Create a port chain
--------------------
-
-The following example uses the ``neutron`` command-line interface (CLI) to
-create a port chain consisting of three service function instances to handle
-HTTP (TCP) traffic flows from 192.0.2.11:1000 to 198.51.100.11:80.
-
-* Instance 1
-
- * Name: vm1
- * Function: Firewall
- * Port pair: [p1, p2]
-
-* Instance 2
-
- * Name: vm2
- * Function: Firewall
- * Port pair: [p3, p4]
-
-* Instance 3
-
- * Name: vm3
- * Function: Intrusion detection system (IDS)
- * Port pair: [p5, p6]
-
-.. note::
-
- The example network ``net1`` must exist before creating ports on it.
-
-#. Source the credentials of the project that owns the ``net1`` network.
-
-#. Create ports on network ``net1`` and record the UUID values.
-
- .. code-block:: console
-
- $ openstack port create p1 --network net1
- $ openstack port create p2 --network net1
- $ openstack port create p3 --network net1
- $ openstack port create p4 --network net1
- $ openstack port create p5 --network net1
- $ openstack port create p6 --network net1
-
-#. Launch service function instance ``vm1`` using ports ``p1`` and ``p2``,
- ``vm2`` using ports ``p3`` and ``p4``, and ``vm3`` using ports ``p5``
- and ``p6``.
-
- .. code-block:: console
-
- $ openstack server create --nic port-id=P1_ID --nic port-id=P2_ID vm1
- $ openstack server create --nic port-id=P3_ID --nic port-id=P4_ID vm2
- $ openstack server create --nic port-id=P5_ID --nic port-id=P6_ID vm3
-
- Replace ``P1_ID``, ``P2_ID``, ``P3_ID``, ``P4_ID``, ``P5_ID``, and
- ``P6_ID`` with the UUIDs of the respective ports.
-
- .. note::
-
- This command requires additional options to successfully launch an
- instance. See the
- `CLI reference `_
- for more information.
-
- Alternatively, you can launch each instance with one network interface and
- attach additional ports later.
-
-#. Create flow classifier ``FC1`` that matches the appropriate packet headers.
-
- .. code-block:: console
-
- $ neutron flow-classifier-create \
- --description "HTTP traffic from 192.0.2.11 to 198.51.100.11" \
- --ethertype IPv4 \
- --source-ip-prefix 192.0.2.11/32 \
- --destination-ip-prefix 198.51.100.11/32 \
- --protocol tcp \
- --source-port 1000:1000 \
- --destination-port 80:80 FC1
-
-#. Create port pair ``PP1`` with ports ``p1`` and ``p2``, ``PP2`` with ports
- ``p3`` and ``p4``, and ``PP3`` with ports ``p5`` and ``p6``.
-
- .. code-block:: console
-
- $ neutron port-pair-create \
- --description "Firewall SF instance 1" \
- --ingress p1 \
- --egress p2 PP1
-
- $ neutron port-pair-create \
- --description "Firewall SF instance 2" \
- --ingress p3 \
- --egress p4 PP2
-
- $ neutron port-pair-create \
- --description "IDS SF instance" \
- --ingress p5 \
- --egress p6 PP3
-
-#. Create port pair group ``PPG1`` with port pair ``PP1`` and ``PP2`` and
- ``PPG2`` with port pair ``PP3``.
-
- .. code-block:: console
-
- $ neutron port-pair-group-create \
- --port-pair PP1 --port-pair PP2 PPG1
- $ neutron port-pair-group-create \
- --port-pair PP3 PPG2
-
- .. note::
-
- You can repeat the ``--port-pair`` option for multiple port pairs of
- functionally equivalent service functions.
-
-#. Create port chain ``PC1`` with port pair groups ``PPG1`` and ``PPG2`` and
- flow classifier ``FC1``.
-
- .. code-block:: console
-
- $ neutron port-chain-create \
- --port-pair-group PPG1 --port-pair-group PPG2 \
- --flow-classifier FC1 PC1
-
- .. note::
-
- You can repeat the ``--port-pair-group`` option to specify additional
- port pair groups in the port chain. A port chain must contain at least
- one port pair group.
-
- You can repeat the ``--flow-classifier`` option to specify multiple
- flow classifiers for a port chain. Each flow classifier identifies
- a flow.
-
-Update a port chain or port pair group
---------------------------------------
-
-* Use the :command:`neutron port-chain-update` command to dynamically add or
- remove port pair groups or flow classifiers on a port chain.
-
- * For example, add port pair group ``PPG3`` to port chain ``PC1``:
-
- .. code-block:: console
-
- $ neutron port-chain-update \
- --port-pair-group PPG1 --port-pair-group PPG2 --port-pair-group PPG3 \
- --flow-classifier FC1 PC1
-
- * For example, add flow classifier ``FC2`` to port chain ``PC1``:
-
- .. code-block:: console
-
- $ neutron port-chain-update \
- --port-pair-group PPG1 --port-pair-group PPG2 \
- --flow-classifier FC1 --flow-classifier FC2 PC1
-
- SFC steers traffic matching the additional flow classifier to the
- port pair groups in the port chain.
-
-* Use the :command:`neutron port-pair-group-update` command to perform dynamic
- scale-out or scale-in operations by adding or removing port pairs on a port
- pair group.
-
- .. code-block:: console
-
- $ neutron port-pair-group-update \
- --port-pair PP1 --port-pair PP2 --port-pair PP4 PPG1
-
- SFC performs load balancing/distribution over the additional service
- functions in the port pair group.
diff --git a/doc/networking-guide/source/config-sriov.rst b/doc/networking-guide/source/config-sriov.rst
deleted file mode 100644
index cadfbc1094..0000000000
--- a/doc/networking-guide/source/config-sriov.rst
+++ /dev/null
@@ -1,473 +0,0 @@
-.. _config-sriov:
-
-======
-SR-IOV
-======
-
-The purpose of this page is to describe how to enable SR-IOV functionality
-available in OpenStack (using OpenStack Networking). This functionality was
-first introduced in the OpenStack Juno release. This page intends to serve as
-a guide for how to configure OpenStack Networking and OpenStack Compute to
-create SR-IOV ports.
-
-The basics
-~~~~~~~~~~
-
-PCI-SIG Single Root I/O Virtualization and Sharing (SR-IOV) functionality is
-available in OpenStack since the Juno release. The SR-IOV specification
-defines a standardized mechanism to virtualize PCIe devices. This mechanism
-can virtualize a single PCIe Ethernet controller to appear as multiple PCIe
-devices. Each device can be directly assigned to an instance, bypassing the
-hypervisor and virtual switch layer. As a result, users are able to achieve
-low latency and near-line wire speed.
-
-The following terms are used throughout this document:
-
-.. list-table::
- :header-rows: 1
- :widths: 10 90
-
- * - Term
- - Definition
- * - PF
- - Physical Function. The physical Ethernet controller that supports
- SR-IOV.
- * - VF
- - Virtual Function. The virtual PCIe device created from a physical
- Ethernet controller.
-
-SR-IOV agent
-------------
-
-The SR-IOV agent allows you to set the admin state of ports, configure port
-security (enable and disable spoof checking), and configure QoS rate limiting
-and minimum bandwidth. You must include the SR-IOV agent on each compute node
-using SR-IOV ports.
-
-.. note::
-
- The SR-IOV agent was optional before Mitaka, and was not enabled by default
- before Liberty.
-
-.. note::
-
- The ability to control port security and QoS rate limit settings was added
- in Liberty.
-
-Supported Ethernet controllers
-------------------------------
-
-The following manufacturers are known to work:
-
-- Intel
-- Mellanox
-- QLogic
-
-For information on **Mellanox SR-IOV Ethernet ConnectX-3/ConnectX-3 Pro cards**, see
-`Mellanox: How To Configure SR-IOV VFs
-`_.
-
-For information on **QLogic SR-IOV Ethernet cards**, see
-`User's Guide OpenStack Deployment with SR-IOV Configuration
-`_.
-
-Using SR-IOV interfaces
-~~~~~~~~~~~~~~~~~~~~~~~
-
-In order to enable SR-IOV, the following steps are required:
-
-#. Create Virtual Functions (Compute)
-#. Whitelist PCI devices in nova-compute (Compute)
-#. Configure neutron-server (Controller)
-#. Configure nova-scheduler (Controller)
-#. Enable neutron sriov-agent (Compute)
-
-We recommend using VLAN provider networks for segregation. This way you can
-combine instances without SR-IOV ports and instances with SR-IOV ports on a
-single network.
-
-.. note::
-
- Throughout this guide, ``eth3`` is used as the PF and ``physnet2`` is used
- as the provider network configured as a VLAN range. These ports may vary in
- different environments.
-
-Create Virtual Functions (Compute)
-----------------------------------
-
-Create the VFs for the network interface that will be used for SR-IOV. We use
-``eth3`` as PF, which is also used as the interface for the VLAN provider
-network and has access to the private networks of all machines.
-
-.. note::
-
- The steps detail how to create VFs using Mellanox ConnectX-4 and newer/Intel
- SR-IOV Ethernet cards on an Intel system. Steps may differ for different
- hardware configurations.
-
-#. Ensure SR-IOV and VT-d are enabled in BIOS.
-
-#. Enable IOMMU in Linux by adding ``intel_iommu=on`` to the kernel parameters,
- for example, using GRUB.
-
-#. On each compute node, create the VFs via the PCI SYS interface:
-
- .. code-block:: console
-
- # echo '8' > /sys/class/net/eth3/device/sriov_numvfs
-
- .. note::
-
- On some PCI devices, observe that when changing the amount of VFs you
- receive the error ``Device or resource busy``. In this case, you must
- first set ``sriov_numvfs`` to ``0``, then set it to your new value.
-
- .. note::
-
- A network interface could be used both for PCI passthrough, using the PF,
- and SR-IOV, using the VFs. If the PF is used, the VF number stored in
- the ``sriov_numvfs`` file is lost. If the PF is attached again to the
- operating system, the number of VFs assigned to this interface will be
- zero. To keep the number of VFs always assigned to this interface,
- modify the interfaces configuration file adding an ``ifup`` script
- command.
-
- In Ubuntu, modifying the ``/etc/network/interfaces`` file:
-
- .. code-block:: ini
-
- auto eth3
- iface eth3 inet dhcp
- pre-up echo '4' > /sys/class/net/eth3/device/sriov_numvfs
-
-
- In Red Hat, modifying the ``/sbin/ifup-local`` file:
-
- .. code-block:: bash
-
- #!/bin/sh
- if [[ "$1" == "eth3" ]]
- then
- echo '4' > /sys/class/net/eth3/device/sriov_numvfs
- fi
-
-
- .. warning::
-
- Alternatively, you can create VFs by passing the ``max_vfs`` to the
- kernel module of your network interface. However, the ``max_vfs``
- parameter has been deprecated, so the PCI SYS interface is the preferred
- method.
-
- You can determine the maximum number of VFs a PF can support:
-
- .. code-block:: console
-
- # cat /sys/class/net/eth3/device/sriov_totalvfs
- 63
-
-#. Verify that the VFs have been created and are in ``up`` state:
-
- .. code-block:: console
-
- # lspci | grep Ethernet
- 82:00.0 Ethernet controller: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection (rev 01)
- 82:00.1 Ethernet controller: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection (rev 01)
- 82:10.0 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
- 82:10.2 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
- 82:10.4 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
- 82:10.6 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
- 82:11.0 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
- 82:11.2 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
- 82:11.4 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
- 82:11.6 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
-
- .. code-block:: console
-
- # ip link show eth3
- 8: eth3: mtu 1500 qdisc mq state UP mode DEFAULT qlen 1000
- link/ether a0:36:9f:8f:3f:b8 brd ff:ff:ff:ff:ff:ff
- vf 0 MAC 00:00:00:00:00:00, spoof checking on, link-state auto
- vf 1 MAC 00:00:00:00:00:00, spoof checking on, link-state auto
- vf 2 MAC 00:00:00:00:00:00, spoof checking on, link-state auto
- vf 3 MAC 00:00:00:00:00:00, spoof checking on, link-state auto
- vf 4 MAC 00:00:00:00:00:00, spoof checking on, link-state auto
- vf 5 MAC 00:00:00:00:00:00, spoof checking on, link-state auto
- vf 6 MAC 00:00:00:00:00:00, spoof checking on, link-state auto
- vf 7 MAC 00:00:00:00:00:00, spoof checking on, link-state auto
-
- If the interfaces are down, set them to ``up`` before launching a guest,
- otherwise the instance will fail to spawn:
-
- .. code-block:: console
-
- # ip link set eth3 up
-
-#. Persist created VFs on reboot:
-
- .. code-block:: console
-
- # echo "echo '7' > /sys/class/net/eth3/device/sriov_numvfs" >> /etc/rc.local
-
- .. note::
-
- The suggested way of making PCI SYS settings persistent is through
- the ``sysfsutils`` tool. However, this is not available by default on
- many major distributions.
-
-Whitelist PCI devices nova-compute (Compute)
---------------------------------------------
-
-#. Configure which PCI devices the ``nova-compute`` service may use. Edit
- the ``nova.conf`` file:
-
- .. code-block:: ini
-
- [default]
- pci_passthrough_whitelist = { "devname": "eth3", "physical_network": "physnet2"}
-
- This tells the Compute service that all VFs belonging to ``eth3`` are
- allowed to be passed through to instances and belong to the provider network
- ``physnet2``.
-
- Alternatively the ``pci_passthrough_whitelist`` parameter also supports
- whitelisting by:
-
- - PCI address: The address uses the same syntax as in ``lspci`` and an
- asterisk (*) can be used to match anything.
-
- .. code-block:: ini
-
- pci_passthrough_whitelist = { "address": "[[[[]:]]:][][.[]]", "physical_network": "physnet2" }
-
- For example, to match any domain, bus 0a, slot 00, and all functions:
-
- .. code-block:: ini
-
- pci_passthrough_whitelist = { "address": "*:0a:00.*", "physical_network": "physnet2" }
-
- - PCI ``vendor_id`` and ``product_id`` as displayed by the Linux utility
- ``lspci``.
-
- .. code-block:: ini
-
- pci_passthrough_whitelist = { "vendor_id": "", "product_id": "", "physical_network": "physnet2" }
-
- If the device defined by the PCI address or ``devname`` corresponds to an
- SR-IOV PF, all VFs under the PF will match the entry. Multiple
- ``pci_passthrough_whitelist`` entries per host are supported.
-
-#. Restart the ``nova-compute`` service for the changes to go into effect.
-
-.. _configure_sriov_neutron_server:
-
-Configure neutron-server (Controller)
--------------------------------------
-
-#. Add ``sriovnicswitch`` as mechanism driver. Edit the ``ml2_conf.ini`` file
- on each controller:
-
- .. code-block:: ini
-
- mechanism_drivers = openvswitch,sriovnicswitch
-
-#. Add the ``ml2_conf_sriov.ini`` file as parameter to the ``neutron-server``
- service. Edit the appropriate initialization script to configure the
- ``neutron-server`` service to load the SR-IOV configuration file:
-
- .. code-block:: bash
-
- --config-file /etc/neutron/neutron.conf
- --config-file /etc/neutron/plugin.ini
- --config-file /etc/neutron/plugins/ml2/ml2_conf_sriov.ini
-
-#. Restart the ``neutron-server`` service.
-
-Configure nova-scheduler (Controller)
--------------------------------------
-
-#. On every controller node running the ``nova-scheduler`` service, add
- ``PciPassthroughFilter`` to ``scheduler_default_filters`` to enable
- ``PciPassthroughFilter`` by default.
- Also ensure ``scheduler_available_filters`` parameter under the
- ``[DEFAULT]`` section in ``nova.conf`` is set to ``all_filters``
- to enable all filters provided by the Compute service.
-
- .. code-block:: ini
-
- [DEFAULT]
- scheduler_default_filters = RetryFilter, AvailabilityZoneFilter, RamFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter, PciPassthroughFilter
- scheduler_available_filters = nova.scheduler.filters.all_filters
-
-#. Restart the ``nova-scheduler`` service.
-
-Enable neutron sriov-agent (Compute)
--------------------------------------
-
-#. Install the SR-IOV agent.
-
-#. Edit the ``sriov_agent.ini`` file on each compute node. For example:
-
- .. code-block:: ini
-
- [securitygroup]
- firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-
- [sriov_nic]
- physical_device_mappings = physnet2:eth3
- exclude_devices =
-
- .. note::
-
- The ``physical_device_mappings`` parameter is not limited to be a 1-1
- mapping between physical networks and NICs. This enables you to map the
- same physical network to more than one NIC. For example, if ``physnet2``
- is connected to ``eth3`` and ``eth4``, then
- ``physnet2:eth3,physnet2:eth4`` is a valid option.
-
- The ``exclude_devices`` parameter is empty, therefore, all the VFs
- associated with eth3 may be configured by the agent. To exclude specific
- VFs, add them to the ``exclude_devices`` parameter as follows:
-
- .. code-block:: ini
-
- exclude_devices = eth1:0000:07:00.2;0000:07:00.3,eth2:0000:05:00.1;0000:05:00.2
-
-#. Ensure the neutron sriov-agent runs successfully:
-
- .. code-block:: console
-
- # neutron-sriov-nic-agent \
- --config-file /etc/neutron/neutron.conf \
- --config-file /etc/neutron/plugins/ml2/sriov_agent.ini
-
-#. Enable the neutron sriov-agent service.
-
- If installing from source, you must configure a daemon file for the init
- system manually.
-
-(Optional) FDB L2 agent extension
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Forwarding DataBase (FDB) population is an L2 agent extension to OVS agent or
-Linux bridge. Its objective is to update the FDB table for existing instance
-using normal port. This enables communication between SR-IOV instances and
-normal instances. The use cases of the FDB population extension are:
-
-* Direct port and normal port instances reside on the same compute node.
-
-* Direct port instance that uses floating IP address and network node
- are located on the same host.
-
-For additional information describing the problem, refer to:
-`Virtual switching technologies and Linux bridge.
-`_
-
-#. Edit the ``ovs_agent.ini`` or ``linuxbridge_agent.ini`` file on each compute
- node. For example:
-
- .. code-block:: console
-
- [agent]
- extensions = fdb
-
-#. Add the FDB section and the ``shared_physical_device_mappings`` parameter.
- This parameter maps each physical port to its physical network name. Each
- physical network can be mapped to several ports:
-
- .. code-block:: console
-
- [FDB]
- shared_physical_device_mappings = physnet1:p1p1, physnet1:p1p2
-
-Launching instances with SR-IOV ports
--------------------------------------
-
-Once configuration is complete, you can launch instances with SR-IOV ports.
-
-#. Get the ``id`` of the network where you want the SR-IOV port to be created:
-
- .. code-block:: console
-
- $ net_id=`neutron net-show net04 | grep "\ id\ " | awk '{ print $4 }'`
-
-#. Create the SR-IOV port. ``vnic_type=direct`` is used here, but other options
- include ``normal``, ``direct-physical``, and ``macvtap``:
-
- .. code-block:: console
-
- $ port_id=`neutron port-create $net_id --name sriov_port --binding:vnic_type direct | grep "\ id\ " | awk '{ print $4 }'`
-
-#. Create the instance. Specify the SR-IOV port created in step two for the
- NIC:
-
- .. code-block:: console
-
- $ openstack server create --flavor m1.large --image ubuntu_14.04 --nic port-id=$port_id test-sriov
-
- .. note::
-
- There are two ways to attach VFs to an instance. You can create an SR-IOV
- port or use the ``pci_alias`` in the Compute service. For more
- information about using ``pci_alias``, refer to `nova-api configuration
- `__.
-
-SR-IOV with InfiniBand
-~~~~~~~~~~~~~~~~~~~~~~
-
-The support for SR-IOV with InfiniBand allows a Virtual PCI device (VF) to
-be directly mapped to the guest, allowing higher performance and advanced
-features such as RDMA (remote direct memory access). To use this feature,
-you must:
-
-#. Use InfiniBand enabled network adapters.
-
-#. Run InfiniBand subnet managers to enable InfiniBand fabric.
-
- All InfiniBand networks must have a subnet manager running for the network
- to function. This is true even when doing a simple network of two
- machines with no switch and the cards are plugged in back-to-back. A
- subnet manager is required for the link on the cards to come up.
- It is possible to have more than one subnet manager. In this case, one
- of them will act as the master, and any other will act as a slave that
- will take over when the master subnet manager fails.
-
-#. Install the ``ebrctl`` utility on the compute nodes.
-
- Check that ``ebrctl`` is listed somewhere in ``/etc/nova/rootwrap.d/*``:
-
- .. code-block:: console
-
- $ grep 'ebrctl' /etc/nova/rootwrap.d/*
-
- If ``ebrctl`` does not appear in any of the rootwrap files, add this to the
- ``/etc/nova/rootwrap.d/compute.filters`` file in the ``[Filters]`` section.
-
- .. code-block:: none
-
- [Filters]
- ebrctl: CommandFilter, ebrctl, root
-
-Known limitations
-~~~~~~~~~~~~~~~~~
-
-* When using Quality of Service (QoS), ``max_burst_kbps`` (burst over
- ``max_kbps``) is not supported. In addition, ``max_kbps`` is rounded to
- Mbps.
-* Security groups are not supported when using SR-IOV, thus, the firewall
- driver must be disabled. This can be done in the ``neutron.conf`` file.
-
- .. code-block:: ini
-
- [securitygroup]
- firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-
-* SR-IOV is not integrated into the OpenStack Dashboard (horizon). Users must
- use the CLI or API to configure SR-IOV interfaces.
-* Live migration is not supported for instances with SR-IOV ports.
-
- .. note::
-
- SR-IOV features may require a specific NIC driver version, depending on the vendor.
- Intel NICs, for example, require ixgbe version 4.4.6 or greater, and ixgbevf version
- 3.2.2 or greater.
diff --git a/doc/networking-guide/source/config-subnet-pools.rst b/doc/networking-guide/source/config-subnet-pools.rst
deleted file mode 100644
index 4b26bf822d..0000000000
--- a/doc/networking-guide/source/config-subnet-pools.rst
+++ /dev/null
@@ -1,261 +0,0 @@
-.. _config-subnet-pools:
-
-============
-Subnet pools
-============
-
-Subnet pools have been made available since the Kilo release. It is a simple
-feature that has the potential to improve your workflow considerably. It also
-provides a building block from which other new features will be built in to
-OpenStack Networking.
-
-To see if your cloud has this feature available, you can check that it is
-listed in the supported aliases. You can do this with the OpenStack client.
-
-.. code-block:: console
-
- $ openstack extension list | grep subnet_allocation
- | Subnet Allocation | subnet_allocation | Enables allocation of subnets
- from a subnet pool |
-
-Why you need them
-~~~~~~~~~~~~~~~~~
-
-Before Kilo, Networking had no automation around the addresses used to create a
-subnet. To create one, you had to come up with the addresses on your own
-without any help from the system. There are valid use cases for this but if you
-are interested in the following capabilities, then subnet pools might be for
-you.
-
-First, would not it be nice if you could turn your pool of addresses over to
-Neutron to take care of? When you need to create a subnet, you just ask for
-addresses to be allocated from the pool. You do not have to worry about what
-you have already used and what addresses are in your pool. Subnet pools can do
-this.
-
-Second, subnet pools can manage addresses across projects. The addresses are
-guaranteed not to overlap. If the addresses come from an externally routable
-pool then you know that all of the projects have addresses which are *routable*
-and unique. This can be useful in the following scenarios.
-
-#. IPv6 since OpenStack Networking has no IPv6 floating IPs.
-#. Routing directly to a project network from an external network.
-
-How they work
-~~~~~~~~~~~~~
-
-A subnet pool manages a pool of addresses from which subnets can be allocated.
-It ensures that there is no overlap between any two subnets allocated from the
-same pool.
-
-As a regular project in an OpenStack cloud, you can create a subnet pool of
-your own and use it to manage your own pool of addresses. This does not require
-any admin privileges. Your pool will not be visible to any other project.
-
-If you are an admin, you can create a pool which can be accessed by any regular
-project. Being a shared resource, there is a quota mechanism to arbitrate
-access.
-
-Quotas
-~~~~~~
-
-Subnet pools have a quota system which is a little bit different than
-other quotas in Neutron. Other quotas in Neutron count discrete
-instances of an object against a quota. Each time you create something
-like a router, network, or a port, it uses one from your total quota.
-
-With subnets, the resource is the IP address space. Some subnets take
-more of it than others. For example, 203.0.113.0/24 uses 256 addresses
-in one subnet but 198.51.100.224/28 uses only 16. If address space is
-limited, the quota system can encourage efficient use of the space.
-
-With IPv4, the default_quota can be set to the number of absolute
-addresses any given project is allowed to consume from the pool. For
-example, with a quota of 128, I might get 203.0.113.128/26,
-203.0.113.224/28, and still have room to allocate 48 more addresses in
-the future.
-
-With IPv6 it is a little different. It is not practical to count
-individual addresses. To avoid ridiculously large numbers, the quota is
-expressed in the number of /64 subnets which can be allocated. For
-example, with a default_quota of 3, I might get 2001:db8:c18e:c05a::/64,
-2001:db8:221c:8ef3::/64, and still have room to allocate one more prefix
-in the future.
-
-Default subnet pools
-~~~~~~~~~~~~~~~~~~~~
-
-Beginning with Mitaka, a subnet pool can be marked as the default. This
-is handled with a new extension.
-
-.. code-block:: console
-
- $ openstack extension list | grep default-subnetpools
- | Default Subnetpools | default-subnetpools | Provides ability to mark
- and use a subnetpool as the default |
-
-
-An administrator can mark a pool as default. Only one pool from each
-address family can be marked default.
-
-.. code-block:: console
-
- $ openstack subnet pool set --default 74348864-f8bf-4fc0-ab03-81229d189467
-
-If there is a default, it can be requested by passing
-``--use-default-subnetpool`` instead of
-``--subnet-pool SUBNETPOOL``.
-
-Demo
-----
-
-If you have access to an OpenStack Kilo or later based neutron, you can play
-with this feature now. Give it a try. All of the following commands work
-equally as well with IPv6 addresses.
-
-First, as admin, create a shared subnet pool:
-
-.. code-block:: console
-
- $ openstack subnet pool create --share --pool-prefix 203.0.113.0/24 \
- --default-prefix-length 26 demo-subnetpool4
- +-------------------+--------------------------------+
- | Field | Value |
- +-------------------+--------------------------------+
- | address_scope_id | None |
- | created_at | 2016-12-14T07:21:26Z |
- | default_prefixlen | 26 |
- | default_quota | None |
- | description | |
- | headers | |
- | id | d3aefb76-2527-43d4-bc21-0ec253 |
- | | 908545 |
- | ip_version | 4 |
- | is_default | False |
- | max_prefixlen | 32 |
- | min_prefixlen | 8 |
- | name | demo-subnetpool4 |
- | prefixes | 203.0.113.0/24 |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d |
- | | 7c |
- | revision_number | 1 |
- | shared | True |
- | updated_at | 2016-12-14T07:21:26Z |
- +-------------------+--------------------------------+
-
-The ``default_prefix_length`` defines the subnet size you will get
-if you do not specify ``--prefix-length`` when creating a subnet.
-
-Do essentially the same thing for IPv6 and there are now two subnet
-pools. Regular projects can see them. (the output is trimmed a bit
-for display)
-
-.. code-block:: console
-
- $ openstack subnet pool list
- +------------------+------------------+--------------------+
- | ID | Name | Prefixes |
- +------------------+------------------+--------------------+
- | 2b7cc19f-0114-4e | demo-subnetpool | 2001:db8:a583::/48 |
- | f4-ad86-c1bb91fc | | |
- | d1f9 | | |
- | d3aefb76-2527-43 | demo-subnetpool4 | 203.0.113.0/24 |
- | d4-bc21-0ec25390 | | |
- | 8545 | | |
- +------------------+------------------+--------------------+
-
-Now, use them. It is easy to create a subnet from a pool:
-
-.. code-block:: console
-
- $ openstack subnet create --ip-version 4 --subnet-pool \
- demo-subnetpool4 --network demo-network1 demo-subnet1
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 203.0.113.194-203.0.113.254 |
- | cidr | 203.0.113.192/26 |
- | created_at | 2016-12-14T07:33:13Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 203.0.113.193 |
- | headers | |
- | host_routes | |
- | id | 8d4fbae3-076c-4c08-b2dd-2d6175115a5e |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | demo-subnet1 |
- | network_id | 6b377f77-ce00-4ff6-8676-82343817470d |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- | revision_number | 2 |
- | service_types | |
- | subnetpool_id | d3aefb76-2527-43d4-bc21-0ec253908545 |
- | updated_at | 2016-12-14T07:33:13Z |
- +-------------------+--------------------------------------+
-
-
-You can request a specific subnet from the pool. You need to specify a subnet
-that falls within the pool's prefixes. If the subnet is not already allocated,
-the request succeeds. You can leave off the IP version because it is deduced
-from the subnet pool.
-
-.. code-block:: console
-
- $ openstack subnet create --subnet-pool demo-subnetpool4 \
- --network demo-network1 --subnet-range 203.0.113.128/26 subnet2
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | allocation_pools | 203.0.113.130-203.0.113.190 |
- | cidr | 203.0.113.128/26 |
- | created_at | 2016-12-14T07:27:40Z |
- | description | |
- | dns_nameservers | |
- | enable_dhcp | True |
- | gateway_ip | 203.0.113.129 |
- | headers | |
- | host_routes | |
- | id | d32814e3-cf46-4371-80dd-498a80badfba |
- | ip_version | 4 |
- | ipv6_address_mode | None |
- | ipv6_ra_mode | None |
- | name | subnet2 |
- | network_id | 6b377f77-ce00-4ff6-8676-82343817470d |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- | revision_number | 2 |
- | service_types | |
- | subnetpool_id | d3aefb76-2527-43d4-bc21-0ec253908545 |
- | updated_at | 2016-12-14T07:27:40Z |
- +-------------------+--------------------------------------+
-
-
-If the pool becomes exhausted, load some more prefixes:
-
-.. code-block:: console
-
- $ openstack subnet pool set --pool-prefix \
- 198.51.100.0/24 demo-subnetpool4
- $ openstack subnet pool show demo-subnetpool4
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | address_scope_id | None |
- | created_at | 2016-12-14T07:21:26Z |
- | default_prefixlen | 26 |
- | default_quota | None |
- | description | |
- | id | d3aefb76-2527-43d4-bc21-0ec253908545 |
- | ip_version | 4 |
- | is_default | False |
- | max_prefixlen | 32 |
- | min_prefixlen | 8 |
- | name | demo-subnetpool4 |
- | prefixes | 198.51.100.0/24, 203.0.113.0/24 |
- | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c |
- | revision_number | 2 |
- | shared | True |
- | updated_at | 2016-12-14T07:30:32Z |
- +-------------------+--------------------------------------+
-
diff --git a/doc/networking-guide/source/config-trunking.rst b/doc/networking-guide/source/config-trunking.rst
deleted file mode 100644
index 4fa9c447f1..0000000000
--- a/doc/networking-guide/source/config-trunking.rst
+++ /dev/null
@@ -1,304 +0,0 @@
-.. _config-trunking:
-
-========
-Trunking
-========
-
-The network trunk service allows multiple networks to be connected to an
-instance using a single virtual NIC (vNIC). Multiple networks can be presented
-to an instance by connecting it to a single port.
-
-Operation
-~~~~~~~~~
-
-Network trunking consists of a service plug-in and a set of drivers that
-manage trunks on different layer-2 mechanism drivers. Users can create a
-port, associate it with a trunk, and launch an instance on that port. Users
-can dynamically attach and detach additional networks without disrupting
-operation of the instance.
-
-Every trunk has a parent port and can have any number of subports.
-The parent port is the port that the trunk is associated with. Users
-create instances and specify the parent port of the trunk when launching
-instances attached to a trunk.
-
-The network presented by the subport is the network of the associated
-port. When creating a subport, a ``segmentation-id`` may be required by
-the driver. ``segmentation-id`` defines the segmentation ID on which the
-subport network is presented to the instance. ``segmentation-type`` may be
-required by certain drivers like OVS, although at this time only ``vlan`` is
-supported as a ``segmentation-type``.
-
-.. note::
-
- The ``segmentation-type`` and ``segmentation-id`` parameters are optional
- in the Networking API. However, all drivers as of the Newton release
- require both to be provided when adding a subport to a trunk. Future
- drivers may be implemented without this requirement.
-
-The ``segmentation-type`` and ``segmentation-id`` specified by the user on the
-subports is intentionally decoupled from the ``segmentation-type`` and ID of
-the networks. For example, it is possible to configure the Networking service
-with ``tenant_network_types = vxlan`` and still create subports with
-``segmentation_type = vlan``. The Networking service performs remapping as
-necessary.
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-The ML2 plug-in supports trunking with the following mechanism drivers:
-
-* Open vSwitch (OVS)
-* Linux bridge
-* Open Virtual Network (OVN)
-
-When using a ``segmentation-type`` of ``vlan``, the OVS and Linux bridge
-drivers present the network of the parent port as the untagged VLAN and all
-subports as tagged VLANs.
-
-Controller node
----------------
-
-* In the ``neutron.conf`` file, enable the trunk service plug-in:
-
- .. code-block:: ini
-
- [DEFAULT]
- service_plugins = trunk
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials and list the enabled
- extensions.
-#. Use the command :command:`openstack extension list --network` to verify
- that the ``Trunk Extension`` and ``Trunk port details`` extensions are
- enabled.
-
-Workflow
---------
-
-At a high level, the basic steps to launching an instance on a trunk are
-the following:
-
-#. Create networks and subnets for the trunk and subports
-#. Create the trunk
-#. Add subports to the trunk
-#. Launch an instance on the trunk
-
-Create networks and subnets for the trunk and subports
-------------------------------------------------------
-
-Create the appropriate networks for the trunk and subports that will be added
-to the trunk. Create subnets on these networks to ensure the desired layer-3
-connectivity over the trunk.
-
-Create the trunk
-----------------
-
-* Create a parent port for the trunk.
-
- .. code-block:: console
-
- $ openstack port create --network project-net-A trunk-parent
- +-------------------+-------------------------------------------------------------------------+
- | Field | Value |
- +-------------------+-------------------------------------------------------------------------+
- | admin_state_up | UP |
- | binding_vif_type | unbound |
- | binding_vnic_type | normal |
- | fixed_ips | ip_address='192.0.2.7',subnet_id='8b957198-d3cf-4953-8449-ad4e4dd712cc' |
- | id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 |
- | mac_address | fa:16:3e:dd:c4:d1 |
- | name | trunk-parent |
- | network_id | 1b47d3e7-cda5-48e4-b0c8-d20bd7e35f55 |
- +-------------------+-------------------------------------------------------------------------+
-
-* Create the trunk using ``--parent-port`` to reference the port from
- the previous step:
-
- .. code-block:: console
-
- $ openstack network trunk create --parent-port trunk-parent trunk1
- +-----------------+--------------------------------------+
- | Field | Value |
- +-----------------+--------------------------------------+
- | admin_state_up | UP |
- | id | fdf02fcb-1844-45f1-9d9b-e4c2f522c164 |
- | name | trunk1 |
- | port_id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 |
- | sub_ports | |
- +-----------------+--------------------------------------+
-
-Add subports to the trunk
--------------------------
-
-Subports can be added to a trunk in two ways: creating the trunk with subports
-or adding subports to an existing trunk.
-
-* Create trunk with subports:
-
- This method entails creating the trunk with subports specified at trunk
- creation.
-
- .. code-block:: console
-
- $ openstack port create --network project-net-A trunk-parent
- +-------------------+-------------------------------------------------------------------------+
- | Field | Value |
- +-------------------+-------------------------------------------------------------------------+
- | admin_state_up | UP |
- | binding_vif_type | unbound |
- | binding_vnic_type | normal |
- | fixed_ips | ip_address='192.0.2.7',subnet_id='8b957198-d3cf-4953-8449-ad4e4dd712cc' |
- | id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 |
- | mac_address | fa:16:3e:dd:c4:d1 |
- | name | trunk-parent |
- | network_id | 1b47d3e7-cda5-48e4-b0c8-d20bd7e35f55 |
- +-------------------+-------------------------------------------------------------------------+
-
- $ openstack port create --network trunked-net subport1
- +-------------------+----------------------------------------------------------------------------+
- | Field | Value |
- +-------------------+----------------------------------------------------------------------------+
- | admin_state_up | UP |
- | binding_vif_type | unbound |
- | binding_vnic_type | normal |
- | fixed_ips | ip_address='198.51.100.8',subnet_id='2a860e2c-922b-437b-a149-b269a8c9b120' |
- | id | 91f9dde8-80a4-4506-b5da-c287feb8f5d8 |
- | mac_address | fa:16:3e:ba:f0:4d |
- | name | subport1 |
- | network_id | aef78ec5-16e3-4445-b82d-b2b98c6a86d9 |
- +-------------------+----------------------------------------------------------------------------+
-
- $ openstack network trunk create \
- --parent-port trunk-parent \
- --subport port=subport1,segmentation-type=vlan,segmentation-id=100 \
- trunk1
- +----------------+-------------------------------------------------------------------------------------------------+
- | Field | Value |
- +----------------+-------------------------------------------------------------------------------------------------+
- | admin_state_up | UP |
- | id | 61d8e620-fe3a-4d8f-b9e6-e1b0dea6d9e3 |
- | name | trunk1 |
- | port_id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 |
- | sub_ports | port_id='73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38', segmentation_id='100', segmentation_type='vlan' |
- +----------------+-------------------------------------------------------------------------------------------------+
-
-* Add subports to an existing trunk:
-
- This method entails creating a trunk, then adding subports to the trunk
- after it has already been created.
-
- .. code-block:: console
-
- $ openstack network trunk set --subport \
- port=subport1,segmentation-type=vlan,segmentation-id=100 \
- trunk1
-
- .. note::
-
- The command provides no output.
-
- .. code-block:: console
-
- $ openstack network trunk show trunk1
- +----------------+-------------------------------------------------------------------------------------------------+
- | Field | Value |
- +----------------+-------------------------------------------------------------------------------------------------+
- | admin_state_up | UP |
- | id | 61d8e620-fe3a-4d8f-b9e6-e1b0dea6d9e3 |
- | name | trunk1 |
- | port_id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 |
- | sub_ports | port_id='73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38', segmentation_id='100', segmentation_type='vlan' |
- +----------------+-------------------------------------------------------------------------------------------------+
-
-Launch an instance on the trunk
--------------------------------
-
-* Show trunk details to get the ``port_id`` of the trunk.
-
- .. code-block:: console
-
- $ openstack network trunk show trunk1
- +----------------+--------------------------------------+
- | Field | Value |
- +----------------+--------------------------------------+
- | admin_state_up | UP |
- | id | 61d8e620-fe3a-4d8f-b9e6-e1b0dea6d9e3 |
- | name | trunk |
- | port_id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 |
- | sub_ports | |
- +----------------+--------------------------------------+
-
-* Launch the instance by specifying ``port-id`` using the value of ``port_id``
- from the trunk details. Launching an instance on a subport is not supported.
-
-Using trunks and subports inside an instance
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When configuring instances to use a subport, ensure that the interface on the
-instance is set to use the MAC address assigned to the port by the Networking
-service. Instances are not made aware of changes made to the trunk after they
-are active. For example, when a subport with a ``segmentation-type`` of
-``vlan`` is added to a trunk, any operations specific to the instance operating
-system that allow the instance to send and receive traffic on the new VLAN must
-be handled outside of the Networking service.
-
-When creating subports, the MAC address of the trunk parent port can be set
-on the subport. This will allow VLAN subinterfaces inside an instance launched
-on a trunk to be configured without explicitly setting a MAC address. Although
-unique MAC addresses can be used for subports, this can present issues with
-ARP spoof protections and the native OVS firewall driver. If the native OVS
-firewall driver is to be used, we recommend that the MAC address of the parent
-port be re-used on all subports.
-
-Trunk states
-~~~~~~~~~~~~
-
-* ``ACTIVE``
-
- The trunk is ``ACTIVE`` when both the logical and physical resources have
- been created. This means that all operations within the Networking and
- Compute services have completed and the trunk is ready for use.
-
-* ``DOWN``
-
- A trunk is ``DOWN`` when it is first created without an instance launched on
- it, or when the instance associated with the trunk has been deleted.
-
-* ``DEGRADED``
-
- A trunk can be in a ``DEGRADED`` state when a temporary failure during
- the provisioning process is encountered. This includes situations where a
- subport add or remove operation fails. When in a degraded state, the trunk
- is still usable and some subports may be usable as well. Operations that
- cause the trunk to go into a ``DEGRADED`` state can be retried to fix
- temporary failures and move the trunk into an ``ACTIVE`` state.
-
-* ``ERROR``
-
- A trunk is in ``ERROR`` state if the request leads to a conflict or an
- error that cannot be fixed by retrying the request. The ``ERROR`` status
- can be encountered if the network is not compatible with the trunk
- configuration or the binding process leads to a persistent failure. When
- a trunk is in ``ERROR`` state, it must be brought to a sane state
- (``ACTIVE``), or else requests to add subports will be rejected.
-
-* ``BUILD``
-
- A trunk is in ``BUILD`` state while the resources associated with the
- trunk are in the process of being provisioned. Once the trunk and all of
- the subports have been provisioned successfully, the trunk transitions
- to ``ACTIVE``. If there was a partial failure, the trunk transitions
- to ``DEGRADED``.
-
- When ``admin_state`` is set to ``DOWN``, the user is blocked from performing
- operations on the trunk. ``admin_state`` is set by the user and should not be
- used to monitor the health of the trunk.
-
-Limitations and issues
-~~~~~~~~~~~~~~~~~~~~~~
-
-* See `bugs `__ for
- more information.
diff --git a/doc/networking-guide/source/config.rst b/doc/networking-guide/source/config.rst
deleted file mode 100644
index dd3d3a411d..0000000000
--- a/doc/networking-guide/source/config.rst
+++ /dev/null
@@ -1,39 +0,0 @@
-.. _config:
-
-=============
-Configuration
-=============
-
-.. toctree::
- :maxdepth: 2
-
- config-services-agent
- config-ml2
- config-address-scopes
- config-auto-allocation
- config-az
- config-bgp-dynamic-routing
- config-dhcp-ha
- config-dns-int
- config-dns-res
- config-dvr-ha-snat
- config-ipam
- config-ipv6
- config-lbaas
- config-macvtap
- config-mtu
- config-ovs-dpdk
- config-ovsfwdriver
- config-qos
- config-rbac
- config-routed-networks
- config-sfc
- config-sriov
- config-subnet-pools
- config-service-subnets
- config-trunking
-
-.. note::
-
- For general configuration, see the `Configuration Reference
- `_.
diff --git a/doc/networking-guide/source/deploy-lb-ha-vrrp.rst b/doc/networking-guide/source/deploy-lb-ha-vrrp.rst
deleted file mode 100644
index 09f5153b1f..0000000000
--- a/doc/networking-guide/source/deploy-lb-ha-vrrp.rst
+++ /dev/null
@@ -1,178 +0,0 @@
-.. _deploy-lb-ha-vrrp:
-
-==========================================
-Linux bridge: High availability using VRRP
-==========================================
-
-.. include:: shared/deploy-ha-vrrp.txt
-
-.. warning::
-
- This high-availability mechanism is not compatible with the layer-2
- population mechanism. You must disable layer-2 population in the
- ``linuxbridge_agent.ini`` file and restart the Linux bridge agent
- on all existing network and compute nodes prior to deploying the example
- configuration.
-
-Prerequisites
-~~~~~~~~~~~~~
-
-Add one network node with the following components:
-
-* Three network interfaces: management, provider, and overlay.
-* OpenStack Networking layer-2 agent, layer-3 agent, and any
- dependencies.
-
-.. note::
-
- You can keep the DHCP and metadata agents on each compute node or
- move them to the network nodes.
-
-Architecture
-~~~~~~~~~~~~
-
-.. image:: figures/deploy-lb-ha-vrrp-overview.png
- :alt: High-availability using Linux bridge with VRRP - overview
-
-The following figure shows components and connectivity for one self-service
-network and one untagged (flat) network. The master router resides on network
-node 1. In this particular case, the instance resides on the same compute
-node as the DHCP agent for the network. If the DHCP agent resides on another
-compute node, the latter only contains a DHCP namespace and Linux bridge
-with a port on the overlay physical network interface.
-
-.. image:: figures/deploy-lb-ha-vrrp-compconn1.png
- :alt: High-availability using Linux bridge with VRRP - components and connectivity - one network
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Use the following example configuration as a template to add support for
-high-availability using VRRP to an existing operational environment that
-supports self-service networks.
-
-Controller node
----------------
-
-#. In the ``neutron.conf`` file:
-
- * Enable VRRP.
-
- .. code-block:: ini
-
- [DEFAULT]
- l3_ha = True
-
-#. Restart the following services:
-
- * Server
-
-Network node 1
---------------
-
-No changes.
-
-Network node 2
---------------
-
-#. Install the Networking service Linux bridge layer-2 agent and layer-3
- agent.
-
-#. In the ``neutron.conf`` file, configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
-#. In the ``linuxbridge_agent.ini`` file, configure the layer-2 agent.
-
- .. code-block:: ini
-
- [linux_bridge]
- physical_interface_mappings = provider:PROVIDER_INTERFACE
-
- [vxlan]
- enable_vxlan = True
- local_ip = OVERLAY_INTERFACE_IP_ADDRESS
-
- [securitygroup]
- firewall_driver = iptables
-
- Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface
- that handles provider networks. For example, ``eth1``.
-
- Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
- interface that handles VXLAN overlays for self-service networks.
-
-#. In the ``l3_agent.ini`` file, configure the layer-3 agent.
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = linuxbridge
- external_network_bridge =
-
- .. note::
-
- The ``external_network_bridge`` option intentionally contains
- no value.
-
-#. Start the following services:
-
- * Linux bridge agent
- * Layer-3 agent
-
-Compute nodes
--------------
-
-No changes.
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of the agents.
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | 09de6af6-c5f1-4548-8b09-18801f068c57 | Linux bridge agent | compute2 | | True | UP | neutron-linuxbridge-agent |
- | 188945d1-9e70-4803-a276-df924e0788a4 | Linux bridge agent | compute1 | | True | UP | neutron-linuxbridge-agent |
- | e76c440d-d5f6-4316-a674-d689630b629e | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent |
- | e67367de-6657-11e6-86a4-931cd04404bb | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent |
- | e8174cae-6657-11e6-89f0-534ac6d0cb5c | Metadata agent | compute1 | | True | UP | neutron-metadata-agent |
- | ece49ec6-6657-11e6-bafb-c7560f19197d | Metadata agent | compute2 | | True | UP | neutron-metadata-agent |
- | 598f6357-4331-4da5-a420-0f5be000bec9 | L3 agent | network1 | nova | True | UP | neutron-l3-agent |
- | f4734e0f-bcd5-4922-a19d-e31d56b0a7ae | Linux bridge agent | network1 | | True | UP | neutron-linuxbridge-agent |
- | 670e5805-340b-4182-9825-fa8319c99f23 | Linux bridge agent | network2 | | True | UP | neutron-linuxbridge-agent |
- | 96224e89-7c15-42e9-89c4-8caac7abdd54 | L3 agent | network2 | nova | True | UP | neutron-l3-agent |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
-
-Create initial networks
------------------------
-
-.. include:: shared/deploy-ha-vrrp-initialnetworks.txt
-
-Verify network operation
-------------------------
-
-.. include:: shared/deploy-ha-vrrp-verifynetworkoperation.txt
-
-Verify failover operation
--------------------------
-
-.. include:: shared/deploy-ha-vrrp-verifyfailoveroperation.txt
-
-Keepalived VRRP health check
-----------------------------
-
-.. include:: shared/keepalived-vrrp-healthcheck.txt
-
-Network traffic flow
-~~~~~~~~~~~~~~~~~~~~
-
-This high-availability mechanism simply augments :ref:`deploy-lb-selfservice`
-with failover of layer-3 services to another router if the master router
-fails. Thus, you can reference :ref:`Self-service network traffic flow
-` for normal operation.
diff --git a/doc/networking-guide/source/deploy-lb-provider.rst b/doc/networking-guide/source/deploy-lb-provider.rst
deleted file mode 100644
index ec4f60da5b..0000000000
--- a/doc/networking-guide/source/deploy-lb-provider.rst
+++ /dev/null
@@ -1,365 +0,0 @@
-.. _deploy-lb-provider:
-
-===============================
-Linux bridge: Provider networks
-===============================
-
-The provider networks architecture example provides layer-2 connectivity
-between instances and the physical network infrastructure using VLAN
-(802.1q) tagging. It supports one untagged (flat) network and and up to
-4095 tagged (VLAN) networks. The actual quantity of VLAN networks depends
-on the physical network infrastructure. For more information on provider
-networks, see :ref:`intro-os-networking-provider`.
-
-Prerequisites
-~~~~~~~~~~~~~
-
-One controller node with the following components:
-
-* Two network interfaces: management and provider.
-* OpenStack Networking server service and ML2 plug-in.
-
-Two compute nodes with the following components:
-
-* Two network interfaces: management and provider.
-* OpenStack Networking Linux bridge layer-2 agent, DHCP agent, metadata agent,
- and any dependencies.
-
-.. note::
-
- Larger deployments typically deploy the DHCP and metadata agents on a
- subset of compute nodes to increase performance and redundancy. However,
- too many agents can overwhelm the message bus. Also, to further simplify
- any deployment, you can omit the metadata agent and use a configuration
- drive to provide metadata to instances.
-
-Architecture
-~~~~~~~~~~~~
-
-.. image:: figures/deploy-lb-provider-overview.png
- :alt: Provider networks using Linux bridge - overview
-
-The following figure shows components and connectivity for one untagged
-(flat) network. In this particular case, the instance resides on the
-same compute node as the DHCP agent for the network. If the DHCP agent
-resides on another compute node, the latter only contains a DHCP namespace
-and Linux bridge with a port on the provider physical network interface.
-
-.. image:: figures/deploy-lb-provider-compconn1.png
- :alt: Provider networks using Linux bridge - components and connectivity - one network
-
-The following figure describes virtual connectivity among components for
-two tagged (VLAN) networks. Essentially, each network uses a separate
-bridge that contains a port on the VLAN sub-interface on the provider
-physical network interface. Similar to the single untagged network case,
-the DHCP agent may reside on a different compute node.
-
-.. image:: figures/deploy-lb-provider-compconn2.png
- :alt: Provider networks using Linux bridge - components and connectivity - multiple networks
-
-.. note::
-
- These figures omit the controller node because it does not handle instance
- network traffic.
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Use the following example configuration as a template to deploy provider
-networks in your environment.
-
-Controller node
----------------
-
-#. Install the Networking service components that provides the
- ``neutron-server`` service and ML2 plug-in.
-
-#. In the ``neutron.conf`` file:
-
- * Configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
- * Disable service plug-ins because provider networks do not require
- any. However, this breaks portions of the dashboard that manage
- the Networking service. See the
- `Ocata Install Tutorials and Guides `__
- for more information.
-
- .. code-block:: ini
-
- [DEFAULT]
- service_plugins =
-
- * Enable two DHCP agents per network so both compute nodes can
- provide DHCP service provider networks.
-
- .. code-block:: ini
-
- [DEFAULT]
- dhcp_agents_per_network = 2
-
- * If necessary, :ref:`configure MTU `.
-
-#. In the ``ml2_conf.ini`` file:
-
- * Configure drivers and network types:
-
- .. code-block:: ini
-
- [ml2]
- type_drivers = flat,vlan
- tenant_network_types =
- mechanism_drivers = linuxbridge
- extension_drivers = port_security
-
- * Configure network mappings:
-
- .. code-block:: ini
-
- [ml2_type_flat]
- flat_networks = provider
-
- [ml2_type_vlan]
- network_vlan_ranges = provider
-
- .. note::
-
- The ``tenant_network_types`` option contains no value because the
- architecture does not support self-service networks.
-
- .. note::
-
- The ``provider`` value in the ``network_vlan_ranges`` option lacks VLAN
- ID ranges to support use of arbitrary VLAN IDs.
-
-#. Populate the database.
-
- .. code-block:: console
-
- # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
- --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
-
-#. Start the following services:
-
- * Server
-
-Compute nodes
--------------
-
-#. Install the Networking service Linux bridge layer-2 agent.
-
-#. In the ``neutron.conf`` file, configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
-#. In the ``linuxbridge_agent.ini`` file, configure the Linux bridge agent:
-
- .. code-block:: ini
-
- [linux_bridge]
- physical_interface_mappings = provider:PROVIDER_INTERFACE
-
- [vxlan]
- enable_vxlan = False
-
- [securitygroup]
- firewall_driver = iptables
-
- Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface
- that handles provider networks. For example, ``eth1``.
-
-#. In the ``dhcp_agent.ini`` file, configure the DHCP agent:
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = linuxbridge
- enable_isolated_metadata = True
- force_metadata = True
-
- .. note::
-
- The ``force_metadata`` option forces the DHCP agent to provide
- a host route to the metadata service on ``169.254.169.254``
- regardless of whether the subnet contains an interface on a
- router, thus maintaining similar and predictable metadata behavior
- among subnets.
-
-#. In the ``metadata_agent.ini`` file, configure the metadata agent:
-
- .. code-block:: ini
-
- [DEFAULT]
- nova_metadata_ip = controller
- metadata_proxy_shared_secret = METADATA_SECRET
-
- The value of ``METADATA_SECRET`` must match the value of the same option
- in the ``[neutron]`` section of the ``nova.conf`` file.
-
-#. Start the following services:
-
- * Linux bridge agent
- * DHCP agent
- * Metadata agent
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of the agents:
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | 09de6af6-c5f1-4548-8b09-18801f068c57 | Linux bridge agent | compute2 | | True | UP | neutron-linuxbridge-agent |
- | 188945d1-9e70-4803-a276-df924e0788a4 | Linux bridge agent | compute1 | | True | UP | neutron-linuxbridge-agent |
- | e76c440d-d5f6-4316-a674-d689630b629e | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent |
- | e67367de-6657-11e6-86a4-931cd04404bb | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent |
- | e8174cae-6657-11e6-89f0-534ac6d0cb5c | Metadata agent | compute1 | | True | UP | neutron-metadata-agent |
- | ece49ec6-6657-11e6-bafb-c7560f19197d | Metadata agent | compute2 | | True | UP | neutron-metadata-agent |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
-
-Create initial networks
------------------------
-
-.. include:: shared/deploy-provider-initialnetworks.txt
-
-Verify network operation
-------------------------
-
-.. include:: shared/deploy-provider-verifynetworkoperation.txt
-
-Network traffic flow
-~~~~~~~~~~~~~~~~~~~~
-
-.. include:: shared/deploy-provider-networktrafficflow.txt
-
-North-south scenario: Instance with a fixed IP address
-------------------------------------------------------
-
-* The instance resides on compute node 1 and uses provider network 1.
-* The instance sends a packet to a host on the Internet.
-
-The following steps involve compute node 1.
-
-#. The instance interface (1) forwards the packet to the provider
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the provider bridge handle firewalling
- and connection tracking for the packet.
-#. The VLAN sub-interface port (4) on the provider bridge forwards
- the packet to the physical network interface (5).
-#. The physical network interface (5) adds VLAN tag 101 to the packet and
- forwards it to the physical network infrastructure switch (6).
-
-The following steps involve the physical network infrastructure:
-
-#. The switch removes VLAN tag 101 from the packet and forwards it to the
- router (7).
-#. The router routes the packet from the provider network (8) to the
- external network (9) and forwards the packet to the switch (10).
-#. The switch forwards the packet to the external network (11).
-#. The external network (12) receives the packet.
-
-.. image:: figures/deploy-lb-provider-flowns1.png
- :alt: Provider networks using Linux bridge - network traffic flow - north/south
-
-.. note::
-
- Return traffic follows similar steps in reverse.
-
-East-west scenario 1: Instances on the same network
----------------------------------------------------
-
-Instances on the same network communicate directly between compute nodes
-containing those instances.
-
-* Instance 1 resides on compute node 1 and uses provider network 1.
-* Instance 2 resides on compute node 2 and uses provider network 1.
-* Instance 1 sends a packet to instance 2.
-
-The following steps involve compute node 1:
-
-#. The instance 1 interface (1) forwards the packet to the provider
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the provider bridge handle firewalling
- and connection tracking for the packet.
-#. The VLAN sub-interface port (4) on the provider bridge forwards
- the packet to the physical network interface (5).
-#. The physical network interface (5) adds VLAN tag 101 to the packet and
- forwards it to the physical network infrastructure switch (6).
-
-The following steps involve the physical network infrastructure:
-
-#. The switch forwards the packet from compute node 1 to compute node 2 (7).
-
-The following steps involve compute node 2:
-
-#. The physical network interface (8) removes VLAN tag 101 from the packet
- and forwards it to the VLAN sub-interface port (9) on the provider bridge.
-#. Security group rules (10) on the provider bridge handle firewalling
- and connection tracking for the packet.
-#. The provider bridge instance port (11) forwards the packet to
- the instance 2 interface (12) via ``veth`` pair.
-
-.. image:: figures/deploy-lb-provider-flowew1.png
- :alt: Provider networks using Linux bridge - network traffic flow - east/west scenario 1
-
-.. note::
-
- Return traffic follows similar steps in reverse.
-
-East-west scenario 2: Instances on different networks
------------------------------------------------------
-
-Instances communicate via router on the physical network infrastructure.
-
-* Instance 1 resides on compute node 1 and uses provider network 1.
-* Instance 2 resides on compute node 1 and uses provider network 2.
-* Instance 1 sends a packet to instance 2.
-
-.. note::
-
- Both instances reside on the same compute node to illustrate how VLAN
- tagging enables multiple logical layer-2 networks to use the same
- physical layer-2 network.
-
-The following steps involve the compute node:
-
-#. The instance 1 interface (1) forwards the packet to the provider
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the provider bridge handle firewalling
- and connection tracking for the packet.
-#. The VLAN sub-interface port (4) on the provider bridge forwards
- the packet to the physical network interface (5).
-#. The physical network interface (5) adds VLAN tag 101 to the packet and
- forwards it to the physical network infrastructure switch (6).
-
-The following steps involve the physical network infrastructure:
-
-#. The switch removes VLAN tag 101 from the packet and forwards it to the
- router (7).
-#. The router routes the packet from provider network 1 (8) to provider
- network 2 (9).
-#. The router forwards the packet to the switch (10).
-#. The switch adds VLAN tag 102 to the packet and forwards it to compute
- node 1 (11).
-
-The following steps involve the compute node:
-
-#. The physical network interface (12) removes VLAN tag 102 from the packet
- and forwards it to the VLAN sub-interface port (13) on the provider bridge.
-#. Security group rules (14) on the provider bridge handle firewalling
- and connection tracking for the packet.
-#. The provider bridge instance port (15) forwards the packet to
- the instance 2 interface (16) via ``veth`` pair.
-
-.. image:: figures/deploy-lb-provider-flowew2.png
- :alt: Provider networks using Linux bridge - network traffic flow - east/west scenario 2
-
-.. note::
-
- Return traffic follows similar steps in reverse.
diff --git a/doc/networking-guide/source/deploy-lb-selfservice.rst b/doc/networking-guide/source/deploy-lb-selfservice.rst
deleted file mode 100644
index ad535895e9..0000000000
--- a/doc/networking-guide/source/deploy-lb-selfservice.rst
+++ /dev/null
@@ -1,422 +0,0 @@
-.. _deploy-lb-selfservice:
-
-===================================
-Linux bridge: Self-service networks
-===================================
-
-This architecture example augments :ref:`deploy-lb-provider` to support
-a nearly limitless quantity of entirely virtual networks. Although the
-Networking service supports VLAN self-service networks, this example
-focuses on VXLAN self-service networks. For more information on
-self-service networks, see :ref:`intro-os-networking-selfservice`.
-
-.. note::
-
- The Linux bridge agent lacks support for other overlay protocols such
- as GRE and Geneve.
-
-Prerequisites
-~~~~~~~~~~~~~
-
-Add one network node with the following components:
-
-* Three network interfaces: management, provider, and overlay.
-* OpenStack Networking Linux bridge layer-2 agent, layer-3 agent, and any
- dependencies.
-
-Modify the compute nodes with the following components:
-
-* Add one network interface: overlay.
-
-.. note::
-
- You can keep the DHCP and metadata agents on each compute node or
- move them to the network node.
-
-Architecture
-~~~~~~~~~~~~
-
-.. image:: figures/deploy-lb-selfservice-overview.png
- :alt: Self-service networks using Linux bridge - overview
-
-The following figure shows components and connectivity for one self-service
-network and one untagged (flat) provider network. In this particular case, the
-instance resides on the same compute node as the DHCP agent for the network.
-If the DHCP agent resides on another compute node, the latter only contains
-a DHCP namespace and Linux bridge with a port on the overlay physical network
-interface.
-
-.. image:: figures/deploy-lb-selfservice-compconn1.png
- :alt: Self-service networks using Linux bridge - components and connectivity - one network
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Use the following example configuration as a template to add support for
-self-service networks to an existing operational environment that supports
-provider networks.
-
-Controller node
----------------
-
-#. In the ``neutron.conf`` file:
-
- * Enable routing and allow overlapping IP address ranges.
-
- .. code-block:: ini
-
- [DEFAULT]
- service_plugins = router
- allow_overlapping_ips = True
-
-#. In the ``ml2_conf.ini`` file:
-
- * Add ``vxlan`` to type drivers and project network types.
-
- .. code-block:: ini
-
- [ml2]
- type_drivers = flat,vlan,vxlan
- tenant_network_types = vxlan
-
- * Enable the layer-2 population mechanism driver.
-
- .. code-block:: ini
-
- [ml2]
- mechanism_drivers = linuxbridge,l2population
-
- * Configure the VXLAN network ID (VNI) range.
-
- .. code-block:: ini
-
- [ml2_type_vxlan]
- vni_ranges = VNI_START:VNI_END
-
- Replace ``VNI_START`` and ``VNI_END`` with appropriate numerical
- values.
-
-#. Restart the following services:
-
- * Server
-
-Network node
-------------
-
-#. Install the Networking service layer-3 agent.
-
-#. In the ``neutron.conf`` file, configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
-#. In the ``linuxbridge_agent.ini`` file, configure the layer-2 agent.
-
- .. code-block:: ini
-
- [linux_bridge]
- physical_interface_mappings = provider:PROVIDER_INTERFACE
-
- [vxlan]
- enable_vxlan = True
- l2_population = True
- local_ip = OVERLAY_INTERFACE_IP_ADDRESS
-
- [securitygroup]
- firewall_driver = iptables
-
- Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface
- that handles provider networks. For example, ``eth1``.
-
- Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
- interface that handles VXLAN overlays for self-service networks.
-
-#. In the ``l3_agent.ini`` file, configure the layer-3 agent.
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = linuxbridge
- external_network_bridge =
-
- .. note::
-
- The ``external_network_bridge`` option intentionally contains
- no value.
-
-#. Start the following services:
-
- * Linux bridge agent
- * Layer-3 agent
-
-Compute nodes
--------------
-
-#. In the ``linuxbridge_agent.ini`` file, enable VXLAN support including
- layer-2 population.
-
- .. code-block:: ini
-
- [vxlan]
- enable_vxlan = True
- l2_population = True
- local_ip = OVERLAY_INTERFACE_IP_ADDRESS
-
- Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
- interface that handles VXLAN overlays for self-service networks.
-
-#. Restart the following services:
-
- * Linux bridge agent
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of the agents.
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | 09de6af6-c5f1-4548-8b09-18801f068c57 | Linux bridge agent | compute2 | | True | UP | neutron-linuxbridge-agent |
- | 188945d1-9e70-4803-a276-df924e0788a4 | Linux bridge agent | compute1 | | True | UP | neutron-linuxbridge-agent |
- | e76c440d-d5f6-4316-a674-d689630b629e | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent |
- | e67367de-6657-11e6-86a4-931cd04404bb | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent |
- | e8174cae-6657-11e6-89f0-534ac6d0cb5c | Metadata agent | compute1 | | True | UP | neutron-metadata-agent |
- | ece49ec6-6657-11e6-bafb-c7560f19197d | Metadata agent | compute2 | | True | UP | neutron-metadata-agent |
- | 598f6357-4331-4da5-a420-0f5be000bec9 | L3 agent | network1 | nova | True | UP | neutron-l3-agent |
- | f4734e0f-bcd5-4922-a19d-e31d56b0a7ae | Linux bridge agent | network1 | | True | UP | neutron-linuxbridge-agent |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
-
-Create initial networks
------------------------
-
-.. include:: shared/deploy-selfservice-initialnetworks.txt
-
-Verify network operation
-------------------------
-
-.. include:: shared/deploy-selfservice-verifynetworkoperation.txt
-
-.. _deploy-lb-selfservice-networktrafficflow:
-
-Network traffic flow
-~~~~~~~~~~~~~~~~~~~~
-
-.. include:: shared/deploy-selfservice-networktrafficflow.txt
-
-North-south scenario 1: Instance with a fixed IP address
---------------------------------------------------------
-
-For instances with a fixed IPv4 address, the network node performs SNAT
-on north-south traffic passing from self-service to external networks
-such as the Internet. For instances with a fixed IPv6 address, the network
-node performs conventional routing of traffic between self-service and
-external networks.
-
-* The instance resides on compute node 1 and uses self-service network 1.
-* The instance sends a packet to a host on the Internet.
-
-The following steps involve compute node 1:
-
-#. The instance interface (1) forwards the packet to the self-service
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the self-service bridge handle
- firewalling and connection tracking for the packet.
-#. The self-service bridge forwards the packet to the VXLAN interface (4)
- which wraps the packet using VNI 101.
-#. The underlying physical interface (5) for the VXLAN interface forwards
- the packet to the network node via the overlay network (6).
-
-The following steps involve the network node:
-
-#. The underlying physical interface (7) for the VXLAN interface forwards
- the packet to the VXLAN interface (8) which unwraps the packet.
-#. The self-service bridge router port (9) forwards the packet to the
- self-service network interface (10) in the router namespace.
-
- * For IPv4, the router performs SNAT on the packet which changes the
- source IP address to the router IP address on the provider network
- and sends it to the gateway IP address on the provider network via
- the gateway interface on the provider network (11).
- * For IPv6, the router sends the packet to the next-hop IP address,
- typically the gateway IP address on the provider network, via the
- provider gateway interface (11).
-
-#. The router forwards the packet to the provider bridge router
- port (12).
-#. The VLAN sub-interface port (13) on the provider bridge forwards
- the packet to the provider physical network interface (14).
-#. The provider physical network interface (14) adds VLAN tag 101 to the packet
- and forwards it to the Internet via physical network infrastructure (15).
-
-.. note::
-
- Return traffic follows similar steps in reverse. However, without a
- floating IPv4 address, hosts on the provider or external networks cannot
- originate connections to instances on the self-service network.
-
-.. image:: figures/deploy-lb-selfservice-flowns1.png
- :alt: Self-service networks using Linux bridge - network traffic flow - north/south scenario 1
-
-North-south scenario 2: Instance with a floating IPv4 address
--------------------------------------------------------------
-
-For instances with a floating IPv4 address, the network node performs SNAT
-on north-south traffic passing from the instance to external networks
-such as the Internet and DNAT on north-south traffic passing from external
-networks to the instance. Floating IP addresses and NAT do not apply to IPv6.
-Thus, the network node routes IPv6 traffic in this scenario.
-
-* The instance resides on compute node 1 and uses self-service network 1.
-* A host on the Internet sends a packet to the instance.
-
-The following steps involve the network node:
-
-#. The physical network infrastructure (1) forwards the packet to the
- provider physical network interface (2).
-#. The provider physical network interface removes VLAN tag 101 and forwards
- the packet to the VLAN sub-interface on the provider bridge.
-#. The provider bridge forwards the packet to the self-service
- router gateway port on the provider network (5).
-
- * For IPv4, the router performs DNAT on the packet which changes the
- destination IP address to the instance IP address on the self-service
- network and sends it to the gateway IP address on the self-service
- network via the self-service interface (6).
- * For IPv6, the router sends the packet to the next-hop IP address,
- typically the gateway IP address on the self-service network, via
- the self-service interface (6).
-
-#. The router forwards the packet to the self-service bridge router
- port (7).
-#. The self-service bridge forwards the packet to the VXLAN interface (8)
- which wraps the packet using VNI 101.
-#. The underlying physical interface (9) for the VXLAN interface forwards
- the packet to the network node via the overlay network (10).
-
-The following steps involve the compute node:
-
-#. The underlying physical interface (11) for the VXLAN interface forwards
- the packet to the VXLAN interface (12) which unwraps the packet.
-#. Security group rules (13) on the self-service bridge handle firewalling
- and connection tracking for the packet.
-#. The self-service bridge instance port (14) forwards the packet to
- the instance interface (15) via ``veth`` pair.
-
-.. note::
-
- Egress instance traffic flows similar to north-south scenario 1, except SNAT
- changes the source IP address of the packet to the floating IPv4 address
- rather than the router IP address on the provider network.
-
-.. image:: figures/deploy-lb-selfservice-flowns2.png
- :alt: Self-service networks using Linux bridge - network traffic flow - north/south scenario 2
-
-East-west scenario 1: Instances on the same network
----------------------------------------------------
-
-Instances with a fixed IPv4/IPv6 or floating IPv4 address on the same network
-communicate directly between compute nodes containing those instances.
-
-By default, the VXLAN protocol lacks knowledge of target location
-and uses multicast to discover it. After discovery, it stores the
-location in the local forwarding database. In large deployments,
-the discovery process can generate a significant amount of network
-that all nodes must process. To eliminate the latter and generally
-increase efficiency, the Networking service includes the layer-2
-population mechanism driver that automatically populates the
-forwarding database for VXLAN interfaces. The example configuration
-enables this driver. For more information, see :ref:`config-plugin-ml2`.
-
-* Instance 1 resides on compute node 1 and uses self-service network 1.
-* Instance 2 resides on compute node 2 and uses self-service network 1.
-* Instance 1 sends a packet to instance 2.
-
-The following steps involve compute node 1:
-
-#. The instance 1 interface (1) forwards the packet to the
- self-service bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the self-service bridge handle firewalling
- and connection tracking for the packet.
-#. The self-service bridge forwards the packet to the VXLAN interface (4)
- which wraps the packet using VNI 101.
-#. The underlying physical interface (5) for the VXLAN interface forwards
- the packet to compute node 2 via the overlay network (6).
-
-The following steps involve compute node 2:
-
-#. The underlying physical interface (7) for the VXLAN interface forwards
- the packet to the VXLAN interface (8) which unwraps the packet.
-#. Security group rules (9) on the self-service bridge handle firewalling
- and connection tracking for the packet.
-#. The self-service bridge instance port (10) forwards the packet to
- the instance 1 interface (11) via ``veth`` pair.
-
-.. note::
-
- Return traffic follows similar steps in reverse.
-
-.. image:: figures/deploy-lb-selfservice-flowew1.png
- :alt: Self-service networks using Linux bridge - network traffic flow - east/west scenario 1
-
-East-west scenario 2: Instances on different networks
------------------------------------------------------
-
-Instances using a fixed IPv4/IPv6 address or floating IPv4 address communicate
-via router on the network node. The self-service networks must reside on the
-same router.
-
-* Instance 1 resides on compute node 1 and uses self-service network 1.
-* Instance 2 resides on compute node 1 and uses self-service network 2.
-* Instance 1 sends a packet to instance 2.
-
-.. note::
-
- Both instances reside on the same compute node to illustrate how VXLAN
- enables multiple overlays to use the same layer-3 network.
-
-The following steps involve the compute node:
-
-#. The instance 1 interface (1) forwards the packet to the self-service
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the self-service bridge handle
- firewalling and connection tracking for the packet.
-#. The self-service bridge forwards the packet to the VXLAN interface (4)
- which wraps the packet using VNI 101.
-#. The underlying physical interface (5) for the VXLAN interface forwards
- the packet to the network node via the overlay network (6).
-
-The following steps involve the network node:
-
-#. The underlying physical interface (7) for the VXLAN interface forwards
- the packet to the VXLAN interface (8) which unwraps the packet.
-#. The self-service bridge router port (9) forwards the packet to the
- self-service network 1 interface (10) in the router namespace.
-#. The router sends the packet to the next-hop IP address, typically the
- gateway IP address on self-service network 2, via the self-service
- network 2 interface (11).
-#. The router forwards the packet to the self-service network 2 bridge router
- port (12).
-#. The self-service network 2 bridge forwards the packet to the VXLAN
- interface (13) which wraps the packet using VNI 102.
-#. The physical network interface (14) for the VXLAN interface sends the
- packet to the compute node via the overlay network (15).
-
-The following steps involve the compute node:
-
-#. The underlying physical interface (16) for the VXLAN interface sends
- the packet to the VXLAN interface (17) which unwraps the packet.
-#. Security group rules (18) on the self-service bridge handle firewalling
- and connection tracking for the packet.
-#. The self-service bridge instance port (19) forwards the packet to
- the instance 2 interface (20) via ``veth`` pair.
-
-.. note::
-
- Return traffic follows similar steps in reverse.
-
-.. image:: figures/deploy-lb-selfservice-flowew2.png
- :alt: Self-service networks using Linux bridge - network traffic flow - east/west scenario 2
diff --git a/doc/networking-guide/source/deploy-lb.rst b/doc/networking-guide/source/deploy-lb.rst
deleted file mode 100644
index 6cb646f3d1..0000000000
--- a/doc/networking-guide/source/deploy-lb.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-.. _deploy-lb:
-
-=============================
-Linux bridge mechanism driver
-=============================
-
-The Linux bridge mechanism driver uses only Linux bridges and ``veth`` pairs
-as interconnection devices. A layer-2 agent manages Linux bridges on each
-compute node and any other node that provides layer-3 (routing), DHCP,
-metadata, or other network services.
-
-.. toctree::
- :maxdepth: 2
-
- deploy-lb-provider
- deploy-lb-selfservice
- deploy-lb-ha-vrrp
diff --git a/doc/networking-guide/source/deploy-ovs-ha-dvr.rst b/doc/networking-guide/source/deploy-ovs-ha-dvr.rst
deleted file mode 100644
index 1d9940eafb..0000000000
--- a/doc/networking-guide/source/deploy-ovs-ha-dvr.rst
+++ /dev/null
@@ -1,506 +0,0 @@
-.. _deploy-ovs-ha-dvr:
-
-=========================================
-Open vSwitch: High availability using DVR
-=========================================
-
-This architecture example augments the self-service deployment example
-with the Distributed Virtual Router (DVR) high-availability mechanism that
-provides connectivity between self-service and provider networks on compute
-nodes rather than network nodes for specific scenarios. For instances with a
-floating IPv4 address, routing between self-service and provider networks
-resides completely on the compute nodes to eliminate single point of
-failure and performance issues with network nodes. Routing also resides
-completely on the compute nodes for instances with a fixed or floating IPv4
-address using self-service networks on the same distributed virtual router.
-However, instances with a fixed IP address still rely on the network node for
-routing and SNAT services between self-service and provider networks.
-
-Consider the following attributes of this high-availability mechanism to
-determine practicality in your environment:
-
-* Only provides connectivity to an instance via the compute node on which
- the instance resides if the instance resides on a self-service network
- with a floating IPv4 address. Instances on self-service networks with
- only an IPv6 address or both IPv4 and IPv6 addresses rely on the network
- node for IPv6 connectivity.
-
-* The instance of a router on each compute node consumes an IPv4 address
- on the provider network on which it contains a gateway.
-
-Prerequisites
-~~~~~~~~~~~~~
-
-Modify the compute nodes with the following components:
-
-* Install the OpenStack Networking layer-3 agent.
-
-.. note::
-
- Consider adding at least one additional network node to provide
- high-availability for instances with a fixed IP address. See
- See :ref:`config-dvr-snat-ha-ovs` for more information.
-
-Architecture
-~~~~~~~~~~~~
-
-.. image:: figures/deploy-ovs-ha-dvr-overview.png
- :alt: High-availability using Open vSwitch with DVR - overview
-
-The following figure shows components and connectivity for one self-service
-network and one untagged (flat) network. In this particular case, the
-instance resides on the same compute node as the DHCP agent for the network.
-If the DHCP agent resides on another compute node, the latter only contains
-a DHCP namespace with a port on the OVS integration bridge.
-
-.. image:: figures/deploy-ovs-ha-dvr-compconn1.png
- :alt: High-availability using Open vSwitch with DVR - components and connectivity - one network
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Use the following example configuration as a template to add support for
-high-availability using DVR to an existing operational environment that
-supports self-service networks.
-
-Controller node
----------------
-
-#. In the ``neutron.conf`` file:
-
- * Enable distributed routing by default for all routers.
-
- .. code-block:: ini
-
- [DEFAULT]
- router_distributed = True
-
-#. Restart the following services:
-
- * Server
-
-Network node
-------------
-
-#. In the ``openswitch_agent.ini`` file, enable distributed routing.
-
- .. code-block:: ini
-
- [DEFAULT]
- enable_distributed_routing = True
-
-#. In the ``l3_agent.ini`` file, configure the layer-3 agent to provide
- SNAT services.
-
- .. code-block:: ini
-
- [DEFAULT]
- agent_mode = dvr_snat
-
- .. note::
-
- The ``external_network_bridge`` option intentionally contains
- no value.
-
-#. Restart the following services:
-
- * Open vSwitch agent
- * Layer-3 agent
-
-Compute nodes
--------------
-
-#. Install the Networking service layer-3 agent.
-
-#. In the ``openswitch_agent.ini`` file, enable distributed routing.
-
- .. code-block:: ini
-
- [DEFAULT]
- enable_distributed_routing = True
-
-#. In the ``l3_agent.ini`` file, configure the layer-3 agent.
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = openvswitch
- external_network_bridge =
- agent_mode = dvr
-
- .. note::
-
- The ``external_network_bridge`` option intentionally contains
- no value.
-
-#. Restart the following services:
-
- * Open vSwitch agent
- * Layer-3 agent
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of the agents.
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | 05d980f2-a4fc-4815-91e7-a7f7e118c0db | L3 agent | compute1 | nova | True | UP | neutron-l3-agent |
- | 1236bbcb-e0ba-48a9-80fc-81202ca4fa51 | Metadata agent | compute2 | | True | UP | neutron-metadata-agent |
- | 2a2e9a90-51b8-4163-a7d6-3e199ba2374b | L3 agent | compute2 | nova | True | UP | neutron-l3-agent |
- | 457d6898-b373-4bb3-b41f-59345dcfb5c5 | Open vSwitch agent | compute2 | | True | UP | neutron-openvswitch-agent |
- | 513caa68-0391-4e53-a530-082e2c23e819 | Linux bridge agent | compute1 | | True | UP | neutron-linuxbridge-agent |
- | 71f15e84-bc47-4c2a-b9fb-317840b2d753 | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent |
- | 8805b962-de95-4e40-bdc2-7a0add7521e8 | L3 agent | network1 | nova | True | UP | neutron-l3-agent |
- | a33cac5a-0266-48f6-9cac-4cef4f8b0358 | Open vSwitch agent | network1 | | True | UP | neutron-openvswitch-agent |
- | a6c69690-e7f7-4e56-9831-1282753e5007 | Metadata agent | compute1 | | True | UP | neutron-metadata-agent |
- | af11f22f-a9f4-404f-9fd8-cd7ad55c0f68 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent |
- | bcfc977b-ec0e-4ba9-be62-9489b4b0e6f1 | Open vSwitch agent | compute1 | | True | UP | neutron-openvswitch-agent |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
-
-Create initial networks
------------------------
-
-Similar to the self-service deployment example, this configuration supports
-multiple VXLAN self-service networks. After enabling high-availability, all
-additional routers use distributed routing. The following procedure creates
-an additional self-service network and router. The Networking service also
-supports adding distributed routing to existing routers.
-
-#. Source a regular (non-administrative) project credentials.
-#. Create a self-service network.
-
- .. code-block:: console
-
- $ openstack network create selfservice2
- +-------------------------+--------------+
- | Field | Value |
- +-------------------------+--------------+
- | admin_state_up | UP |
- | mtu | 1450 |
- | name | selfservice2 |
- | port_security_enabled | True |
- | router:external | Internal |
- | shared | False |
- | status | ACTIVE |
- +-------------------------+--------------+
-
-#. Create a IPv4 subnet on the self-service network.
-
- .. code-block:: console
-
- $ openstack subnet create --subnet-range 192.0.2.0/24 \
- --network selfservice2 --dns-nameserver 8.8.4.4 selfservice2-v4
- +-------------------+---------------------------+
- | Field | Value |
- +-------------------+---------------------------+
- | allocation_pools | 192.0.2.2-192.0.2.254 |
- | cidr | 192.0.2.0/24 |
- | dns_nameservers | 8.8.4.4 |
- | enable_dhcp | True |
- | gateway_ip | 192.0.2.1 |
- | ip_version | 4 |
- | name | selfservice2-v4 |
- +-------------------+---------------------------+
-
-#. Create a IPv6 subnet on the self-service network.
-
- .. code-block:: console
-
- $ openstack subnet create --subnet-range fd00:192:0:2::/64 --ip-version 6 \
- --ipv6-ra-mode slaac --ipv6-address-mode slaac --network selfservice2 \
- --dns-nameserver 2001:4860:4860::8844 selfservice2-v6
- +-------------------+------------------------------------------------------+
- | Field | Value |
- +-------------------+------------------------------------------------------+
- | allocation_pools | fd00:192:0:2::2-fd00:192:0:2:ffff:ffff:ffff:ffff |
- | cidr | fd00:192:0:2::/64 |
- | dns_nameservers | 2001:4860:4860::8844 |
- | enable_dhcp | True |
- | gateway_ip | fd00:192:0:2::1 |
- | ip_version | 6 |
- | ipv6_address_mode | slaac |
- | ipv6_ra_mode | slaac |
- | name | selfservice2-v6 |
- +-------------------+------------------------------------------------------+
-
-#. Create a router.
-
- .. code-block:: console
-
- $ openstack router create router2
- +-----------------------+---------+
- | Field | Value |
- +-----------------------+---------+
- | admin_state_up | UP |
- | name | router2 |
- | status | ACTIVE |
- +-----------------------+---------+
-
-#. Add the IPv4 and IPv6 subnets as interfaces on the router.
-
- .. code-block:: console
-
- $ openstack router add subnet router2 selfservice2-v4
- $ openstack router add subnet router2 selfservice2-v6
-
- .. note::
-
- These commands provide no output.
-
-#. Add the provider network as a gateway on the router.
-
- .. code-block:: console
-
- $ openstack router set router2 --external-gateway provider1
-
-Verify network operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify distributed routing on the router.
-
- .. code-block:: console
-
- $ openstack router show router2
- +-------------------------+---------+
- | Field | Value |
- +-------------------------+---------+
- | admin_state_up | UP |
- | distributed | True |
- | ha | False |
- | name | router2 |
- | status | ACTIVE |
- +-------------------------+---------+
-
-#. On each compute node, verify creation of a ``qrouter`` namespace with
- the same ID.
-
- Compute node 1:
-
- .. code-block:: console
-
- # ip netns
- qrouter-78d2f628-137c-4f26-a257-25fc20f203c1
-
- Compute node 2:
-
- .. code-block:: console
-
- # ip netns
- qrouter-78d2f628-137c-4f26-a257-25fc20f203c1
-
-#. On the network node, verify creation of the ``snat`` and ``qrouter``
- namespaces with the same ID.
-
- .. code-block:: console
-
- # ip netns
- snat-78d2f628-137c-4f26-a257-25fc20f203c1
- qrouter-78d2f628-137c-4f26-a257-25fc20f203c1
-
- .. note::
-
- The namespace for router 1 from :ref:`deploy-ovs-selfservice` should
- also appear on network node 1 because of creation prior to enabling
- distributed routing.
-
-#. Launch an instance with an interface on the addtional self-service network.
- For example, a CirrOS image using flavor ID 1.
-
- .. code-block:: console
-
- $ openstack server create --flavor 1 --image cirros --nic net-id=NETWORK_ID selfservice-instance2
-
- Replace ``NETWORK_ID`` with the ID of the additional self-service
- network.
-
-#. Determine the IPv4 and IPv6 addresses of the instance.
-
- .. code-block:: console
-
- $ openstack server list
- +--------------------------------------+-----------------------+--------+---------------------------------------------------------------------------+
- | ID | Name | Status | Networks |
- +--------------------------------------+-----------------------+--------+---------------------------------------------------------------------------+
- | bde64b00-77ae-41b9-b19a-cd8e378d9f8b | selfservice-instance2 | ACTIVE | selfservice2=fd00:192:0:2:f816:3eff:fe71:e93e, 192.0.2.4 |
- +--------------------------------------+-----------------------+--------+---------------------------------------------------------------------------+
-
-#. Create a floating IPv4 address on the provider network.
-
- .. code-block:: console
-
- $ openstack floating ip create provider1
- +-------------+--------------------------------------+
- | Field | Value |
- +-------------+--------------------------------------+
- | fixed_ip | None |
- | id | 0174056a-fa56-4403-b1ea-b5151a31191f |
- | instance_id | None |
- | ip | 203.0.113.17 |
- | pool | provider1 |
- +-------------+--------------------------------------+
-
-#. Associate the floating IPv4 address with the instance.
-
- .. code-block:: console
-
- $ openstack server add floating ip selfservice-instance2 203.0.113.17
-
- .. note::
-
- This command provides no output.
-
-#. On the compute node containing the instance, verify creation of the
- ``fip`` namespace with the same ID as the provider network.
-
- .. code-block:: console
-
- # ip netns
- fip-4bfa3075-b4b2-4f7d-b88e-df1113942d43
-
-Network traffic flow
-~~~~~~~~~~~~~~~~~~~~
-
-.. include:: shared/deploy-selfservice-networktrafficflow.txt
-
-This section only contains flow scenarios that benefit from distributed
-virtual routing or that differ from conventional operation. For other
-flow scenarios, see :ref:`deploy-ovs-selfservice-networktrafficflow`.
-
-North-south scenario 1: Instance with a fixed IP address
---------------------------------------------------------
-
-Similar to :ref:`deploy-ovs-selfservice-networktrafficflow-ns1`, except
-the router namespace on the network node becomes the SNAT namespace. The
-network node still contains the router namespace, but it serves no purpose
-in this case.
-
-.. image:: figures/deploy-ovs-ha-dvr-flowns1.png
- :alt: High-availability using Open vSwitch with DVR - network traffic flow - north/south scenario 1
-
-North-south scenario 2: Instance with a floating IPv4 address
--------------------------------------------------------------
-
-For instances with a floating IPv4 address using a self-service network
-on a distributed router, the compute node containing the instance performs
-SNAT on north-south traffic passing from the instance to external networks
-such as the Internet and DNAT on north-south traffic passing from external
-networks to the instance. Floating IP addresses and NAT do not apply to
-IPv6. Thus, the network node routes IPv6 traffic in this scenario.
-north-south traffic passing between the instance and external networks
-such as the Internet.
-
-* Instance 1 resides on compute node 1 and uses self-service network 1.
-* A host on the Internet sends a packet to the instance.
-
-The following steps involve the compute node:
-
-#. The physical network infrastructure (1) forwards the packet to the
- provider physical network interface (2).
-#. The provider physical network interface forwards the packet to the
- OVS provider bridge provider network port (3).
-#. The OVS provider bridge swaps actual VLAN tag 101 with the internal
- VLAN tag.
-#. The OVS provider bridge ``phy-br-provider`` port (4) forwards the
- packet to the OVS integration bridge ``int-br-provider`` port (5).
-#. The OVS integration bridge port for the provider network (6) removes
- the internal VLAN tag and forwards the packet to the provider network
- interface (7) in the floating IP namespace. This interface responds
- to any ARP requests for the instance floating IPv4 address.
-#. The floating IP namespace routes the packet (8) to the distributed
- router namespace (9) using a pair of IP addresses on the DVR internal
- network. This namespace contains the instance floating IPv4 address.
-#. The router performs DNAT on the packet which changes the destination
- IP address to the instance IP address on the self-service network via
- the self-service network interface (10).
-#. The router forwards the packet to the OVS integration bridge port for
- the self-service network (11).
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge removes the internal VLAN tag from the packet.
-#. The OVS integration bridge security group port (12) forwards the packet
- to the security group bridge OVS port (13) via ``veth`` pair.
-#. Security group rules (14) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge instance port (15) forwards the packet to the
- instance interface (16) via ``veth`` pair.
-
-.. image:: figures/deploy-ovs-ha-dvr-flowns2.png
- :alt: High-availability using Open vSwitch with DVR - network traffic flow - north/south scenario 2
-
-.. note::
-
- Egress traffic follows similar steps in reverse, except SNAT changes
- the source IPv4 address of the packet to the floating IPv4 address.
-
-East-west scenario 1: Instances on different networks on the same router
-------------------------------------------------------------------------
-
-Instances with fixed IPv4/IPv6 address or floating IPv4 address on the
-same compute node communicate via router on the compute node. Instances
-on different compute nodes communicate via an instance of the router on
-each compute node.
-
-.. note::
-
- This scenario places the instances on different compute nodes to
- show the most complex situation.
-
-The following steps involve compute node 1:
-
-#. The instance interface (1) forwards the packet to the security group
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge OVS port (4) forwards the packet to the OVS
- integration bridge security group port (5) via ``veth`` pair.
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge port for self-service network 1 (6) removes the
- internal VLAN tag and forwards the packet to the self-service network 1
- interface in the distributed router namespace (6).
-#. The distributed router namespace routes the packet to self-service network
- 2.
-#. The self-service network 2 interface in the distributed router namespace
- (8) forwards the packet to the OVS integration bridge port for
- self-service network 2 (9).
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge exchanges the internal VLAN tag for an
- internal tunnel ID.
-#. The OVS integration bridge ``patch-tun`` port (10) forwards the packet
- to the OVS tunnel bridge ``patch-int`` port (11).
-#. The OVS tunnel bridge (12) wraps the packet using VNI 101.
-#. The underlying physical interface (13) for overlay networks forwards
- the packet to compute node 2 via the overlay network (14).
-
-The following steps involve compute node 2:
-
-#. The underlying physical interface (15) for overlay networks forwards
- the packet to the OVS tunnel bridge (16).
-#. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID
- to it.
-#. The OVS tunnel bridge exchanges the internal tunnel ID for an internal
- VLAN tag.
-#. The OVS tunnel bridge ``patch-int`` patch port (17) forwards the packet
- to the OVS integration bridge ``patch-tun`` patch port (18).
-#. The OVS integration bridge removes the internal VLAN tag from the packet.
-#. The OVS integration bridge security group port (19) forwards the packet
- to the security group bridge OVS port (20) via ``veth`` pair.
-#. Security group rules (21) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge instance port (22) forwards the packet to the
- instance 2 interface (23) via ``veth`` pair.
-
-.. note::
-
- Routing between self-service networks occurs on the compute node containing
- the instance sending the packet. In this scenario, routing occurs on
- compute node 1 for packets from instance 1 to instance 2 and on compute
- node 2 for packets from instance 2 to instance 1.
-
-.. image:: figures/deploy-ovs-ha-dvr-flowew1.png
- :alt: High-availability using Open vSwitch with DVR - network traffic flow - east/west scenario 2
diff --git a/doc/networking-guide/source/deploy-ovs-ha-vrrp.rst b/doc/networking-guide/source/deploy-ovs-ha-vrrp.rst
deleted file mode 100644
index 878fb43b84..0000000000
--- a/doc/networking-guide/source/deploy-ovs-ha-vrrp.rst
+++ /dev/null
@@ -1,179 +0,0 @@
-.. _deploy-ovs-ha-vrrp:
-
-==========================================
-Open vSwitch: High availability using VRRP
-==========================================
-
-.. include:: shared/deploy-ha-vrrp.txt
-
-Prerequisites
-~~~~~~~~~~~~~
-
-Add one network node with the following components:
-
-* Three network interfaces: management, provider, and overlay.
-* OpenStack Networking layer-2 agent, layer-3 agent, and any
- dependencies.
-
-.. note::
-
- You can keep the DHCP and metadata agents on each compute node or
- move them to the network nodes.
-
-Architecture
-~~~~~~~~~~~~
-
-.. image:: figures/deploy-ovs-ha-vrrp-overview.png
- :alt: High-availability using VRRP with Linux bridge - overview
-
-The following figure shows components and connectivity for one self-service
-network and one untagged (flat) network. The master router resides on network
-node 1. In this particular case, the instance resides on the same compute
-node as the DHCP agent for the network. If the DHCP agent resides on another
-compute node, the latter only contains a DHCP namespace and Linux bridge
-with a port on the overlay physical network interface.
-
-.. image:: figures/deploy-ovs-ha-vrrp-compconn1.png
- :alt: High-availability using VRRP with Linux bridge - components and connectivity - one network
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Use the following example configuration as a template to add support for
-high-availability using VRRP to an existing operational environment that
-supports self-service networks.
-
-Controller node
----------------
-
-#. In the ``neutron.conf`` file:
-
- * Enable VRRP.
-
- .. code-block:: ini
-
- [DEFAULT]
- l3_ha = True
-
-#. Restart the following services:
-
- * Server
-
-Network node 1
---------------
-
-No changes.
-
-Network node 2
---------------
-
-#. Install the Networking service OVS layer-2 agent and layer-3 agent.
-
-#. Install OVS.
-
-#. In the ``neutron.conf`` file, configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
-#. Start the following services:
-
- * OVS
-
-#. Create the OVS provider bridge ``br-provider``:
-
- .. code-block:: console
-
- $ ovs-vsctl add-br br-provider
-
-#. In the ``openvswitch_agent.ini`` file, configure the layer-2 agent.
-
- .. code-block:: ini
-
- [ovs]
- bridge_mappings = provider:br-provider
- local_ip = OVERLAY_INTERFACE_IP_ADDRESS
-
- [agent]
- tunnel_types = vxlan
- l2_population = true
-
- [securitygroup]
- firewall_driver = iptables_hybrid
-
- Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
- interface that handles VXLAN overlays for self-service networks.
-
-#. In the ``l3_agent.ini`` file, configure the layer-3 agent.
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = openvswitch
- external_network_bridge =
-
- .. note::
-
- The ``external_network_bridge`` option intentionally contains
- no value.
-
-#. Start the following services:
-
- * Open vSwitch agent
- * Layer-3 agent
-
-Compute nodes
--------------
-
-No changes.
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of the agents.
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | 1236bbcb-e0ba-48a9-80fc-81202ca4fa51 | Metadata agent | compute2 | | True | UP | neutron-metadata-agent |
- | 457d6898-b373-4bb3-b41f-59345dcfb5c5 | Open vSwitch agent | compute2 | | True | UP | neutron-openvswitch-agent |
- | 71f15e84-bc47-4c2a-b9fb-317840b2d753 | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent |
- | 8805b962-de95-4e40-bdc2-7a0add7521e8 | L3 agent | network1 | nova | True | UP | neutron-l3-agent |
- | a33cac5a-0266-48f6-9cac-4cef4f8b0358 | Open vSwitch agent | network1 | | True | UP | neutron-openvswitch-agent |
- | a6c69690-e7f7-4e56-9831-1282753e5007 | Metadata agent | compute1 | | True | UP | neutron-metadata-agent |
- | af11f22f-a9f4-404f-9fd8-cd7ad55c0f68 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent |
- | bcfc977b-ec0e-4ba9-be62-9489b4b0e6f1 | Open vSwitch agent | compute1 | | True | UP | neutron-openvswitch-agent |
- | 7f00d759-f2c9-494a-9fbf-fd9118104d03 | Open vSwitch agent | network2 | | True | UP | neutron-openvswitch-agent |
- | b28d8818-9e32-4888-930b-29addbdd2ef9 | L3 agent | network2 | nova | True | UP | neutron-l3-agent |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
-
-Create initial networks
------------------------
-
-.. include:: shared/deploy-ha-vrrp-initialnetworks.txt
-
-Verify network operation
-------------------------
-
-.. include:: shared/deploy-ha-vrrp-verifynetworkoperation.txt
-
-Verify failover operation
--------------------------
-
-.. include:: shared/deploy-ha-vrrp-verifyfailoveroperation.txt
-
-Keepalived VRRP health check
-----------------------------
-
-.. include:: shared/keepalived-vrrp-healthcheck.txt
-
-Network traffic flow
-~~~~~~~~~~~~~~~~~~~~
-
-This high-availability mechanism simply augments :ref:`deploy-ovs-selfservice`
-with failover of layer-3 services to another router if the master router
-fails. Thus, you can reference :ref:`Self-service network traffic flow
-` for normal operation.
diff --git a/doc/networking-guide/source/deploy-ovs-provider.rst b/doc/networking-guide/source/deploy-ovs-provider.rst
deleted file mode 100644
index 9fa4cc78df..0000000000
--- a/doc/networking-guide/source/deploy-ovs-provider.rst
+++ /dev/null
@@ -1,428 +0,0 @@
-.. _deploy-ovs-provider:
-
-===============================
-Open vSwitch: Provider networks
-===============================
-
-This architecture example provides layer-2 connectivity between instances
-and the physical network infrastructure using VLAN (802.1q) tagging. It
-supports one untagged (flat) network and up to 4095 tagged (VLAN) networks.
-The actual quantity of VLAN networks depends on the physical network
-infrastructure. For more information on provider networks, see
-:ref:`intro-os-networking-provider`.
-
-.. warning::
-
- Linux distributions often package older releases of Open vSwitch that can
- introduce issues during operation with the Networking service. We recommend
- using at least the latest long-term stable (LTS) release of Open vSwitch
- for the best experience and support from Open vSwitch. See
- ``__ for available releases and the
- `installation instructions
- `__ for
-
-Prerequisites
-~~~~~~~~~~~~~
-
-One controller node with the following components:
-
-* Two network interfaces: management and provider.
-* OpenStack Networking server service and ML2 plug-in.
-
-Two compute nodes with the following components:
-
-* Two network interfaces: management and provider.
-* OpenStack Networking Open vSwitch (OVS) layer-2 agent, DHCP agent, metadata
- agent, and any dependencies including OVS.
-
-.. note::
-
- Larger deployments typically deploy the DHCP and metadata agents on a
- subset of compute nodes to increase performance and redundancy. However,
- too many agents can overwhelm the message bus. Also, to further simplify
- any deployment, you can omit the metadata agent and use a configuration
- drive to provide metadata to instances.
-
-Architecture
-~~~~~~~~~~~~
-
-.. image:: figures/deploy-ovs-provider-overview.png
- :alt: Provider networks using OVS - overview
-
-The following figure shows components and connectivity for one untagged
-(flat) network. In this particular case, the instance resides on the
-same compute node as the DHCP agent for the network. If the DHCP agent
-resides on another compute node, the latter only contains a DHCP namespace
-with a port on the OVS integration bridge.
-
-.. image:: figures/deploy-ovs-provider-compconn1.png
- :alt: Provider networks using OVS - components and connectivity - one network
-
-The following figure describes virtual connectivity among components for
-two tagged (VLAN) networks. Essentially, all networks use a single OVS
-integration bridge with different internal VLAN tags. The internal VLAN
-tags almost always differ from the network VLAN assignment in the Networking
-service. Similar to the untagged network case, the DHCP agent may reside on
-a different compute node.
-
-.. image:: figures/deploy-ovs-provider-compconn2.png
- :alt: Provider networks using OVS - components and connectivity - multiple networks
-
-.. note::
-
- These figures omit the controller node because it does not handle instance
- network traffic.
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Use the following example configuration as a template to deploy provider
-networks in your environment.
-
-Controller node
----------------
-
-#. Install the Networking service components that provide the
- ``neutron-server`` service and ML2 plug-in.
-
-#. In the ``neutron.conf`` file:
-
- * Configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
- * Disable service plug-ins because provider networks do not require
- any. However, this breaks portions of the dashboard that manage
- the Networking service. See the
- `Ocata Install Tutorials and Guides
- `__ for more
- information.
-
- .. code-block:: ini
-
- [DEFAULT]
- service_plugins =
-
- * Enable two DHCP agents per network so both compute nodes can
- provide DHCP service provider networks.
-
- .. code-block:: ini
-
- [DEFAULT]
- dhcp_agents_per_network = 2
-
- * If necessary, :ref:`configure MTU `.
-
-#. In the ``ml2_conf.ini`` file:
-
- * Configure drivers and network types:
-
- .. code-block:: ini
-
- [ml2]
- type_drivers = flat,vlan
- tenant_network_types =
- mechanism_drivers = openvswitch
- extension_drivers = port_security
-
- * Configure network mappings:
-
- .. code-block:: ini
-
- [ml2_type_flat]
- flat_networks = provider
-
- [ml2_type_vlan]
- network_vlan_ranges = provider
-
- .. note::
-
- The ``tenant_network_types`` option contains no value because the
- architecture does not support self-service networks.
-
- .. note::
-
- The ``provider`` value in the ``network_vlan_ranges`` option lacks VLAN
- ID ranges to support use of arbitrary VLAN IDs.
-
-#. Populate the database.
-
- .. code-block:: console
-
- # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
- --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
-
-#. Start the following services:
-
- * Server
-
-Compute nodes
--------------
-
-#. Install the Networking service OVS layer-2 agent, DHCP agent, and
- metadata agent.
-
-#. Install OVS.
-
-#. In the ``neutron.conf`` file, configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
-#. In the ``openvswitch_agent.ini`` file, configure the OVS agent:
-
- .. code-block:: ini
-
- [ovs]
- bridge_mappings = provider:br-provider
-
- [securitygroup]
- firewall_driver = iptables_hybrid
-
-#. In the ``dhcp_agent.ini`` file, configure the DHCP agent:
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = openvswitch
- enable_isolated_metadata = True
- force_metadata = True
-
- .. note::
-
- The ``force_metadata`` option forces the DHCP agent to provide
- a host route to the metadata service on ``169.254.169.254``
- regardless of whether the subnet contains an interface on a
- router, thus maintaining similar and predictable metadata behavior
- among subnets.
-
-#. In the ``metadata_agent.ini`` file, configure the metadata agent:
-
- .. code-block:: ini
-
- [DEFAULT]
- nova_metadata_ip = controller
- metadata_proxy_shared_secret = METADATA_SECRET
-
- The value of ``METADATA_SECRET`` must match the value of the same option
- in the ``[neutron]`` section of the ``nova.conf`` file.
-
-#. Start the following services:
-
- * OVS
-
-#. Create the OVS provider bridge ``br-provider``:
-
- .. code-block:: console
-
- $ ovs-vsctl add-br br-provider
-
-#. Add the provider network interface as a port on the OVS provider
- bridge ``br-provider``:
-
- .. code-block:: console
-
- $ ovs-vsctl add-port br-provider PROVIDER_INTERFACE
-
- Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface
- that handles provider networks. For example, ``eth1``.
-
-#. Start the following services:
-
- * OVS agent
- * DHCP agent
- * Metadata agent
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of the agents:
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | 1236bbcb-e0ba-48a9-80fc-81202ca4fa51 | Metadata agent | compute2 | | True | UP | neutron-metadata-agent |
- | 457d6898-b373-4bb3-b41f-59345dcfb5c5 | Open vSwitch agent | compute2 | | True | UP | neutron-openvswitch-agent |
- | 71f15e84-bc47-4c2a-b9fb-317840b2d753 | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent |
- | a6c69690-e7f7-4e56-9831-1282753e5007 | Metadata agent | compute1 | | True | UP | neutron-metadata-agent |
- | af11f22f-a9f4-404f-9fd8-cd7ad55c0f68 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent |
- | bcfc977b-ec0e-4ba9-be62-9489b4b0e6f1 | Open vSwitch agent | compute1 | | True | UP | neutron-openvswitch-agent |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
-
-Create initial networks
------------------------
-
-.. include:: shared/deploy-provider-initialnetworks.txt
-
-Verify network operation
-------------------------
-
-.. include:: shared/deploy-provider-verifynetworkoperation.txt
-
-Network traffic flow
-~~~~~~~~~~~~~~~~~~~~
-
-.. include:: shared/deploy-provider-networktrafficflow.txt
-
-North-south
------------
-
-* The instance resides on compute node 1 and uses provider network 1.
-* The instance sends a packet to a host on the Internet.
-
-The following steps involve compute node 1.
-
-#. The instance interface (1) forwards the packet to the security group
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge OVS port (4) forwards the packet to the OVS
- integration bridge security group port (5) via ``veth`` pair.
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge ``int-br-provider`` patch port (6) forwards
- the packet to the OVS provider bridge ``phy-br-provider`` patch port (7).
-#. The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag
- 101.
-#. The OVS provider bridge provider network port (8) forwards the packet to
- the physical network interface (9).
-#. The physical network interface forwards the packet to the physical
- network infrastructure switch (10).
-
-The following steps involve the physical network infrastructure:
-
-#. The switch removes VLAN tag 101 from the packet and forwards it to the
- router (11).
-#. The router routes the packet from the provider network (12) to the
- external network (13) and forwards the packet to the switch (14).
-#. The switch forwards the packet to the external network (15).
-#. The external network (16) receives the packet.
-
-.. image:: figures/deploy-ovs-provider-flowns1.png
- :alt: Provider networks using Open vSwitch - network traffic flow - north/south
-
-.. note::
-
- Return traffic follows similar steps in reverse.
-
-East-west scenario 1: Instances on the same network
----------------------------------------------------
-
-Instances on the same network communicate directly between compute nodes
-containing those instances.
-
-* Instance 1 resides on compute node 1 and uses provider network 1.
-* Instance 2 resides on compute node 2 and uses provider network 1.
-* Instance 1 sends a packet to instance 2.
-
-The following steps involve compute node 1:
-
-#. The instance 1 interface (1) forwards the packet to the security group
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge OVS port (4) forwards the packet to the OVS
- integration bridge security group port (5) via ``veth`` pair.
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge ``int-br-provider`` patch port (6) forwards
- the packet to the OVS provider bridge ``phy-br-provider`` patch port (7).
-#. The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag
- 101.
-#. The OVS provider bridge provider network port (8) forwards the packet to
- the physical network interface (9).
-#. The physical network interface forwards the packet to the physical
- network infrastructure switch (10).
-
-The following steps involve the physical network infrastructure:
-
-#. The switch forwards the packet from compute node 1 to compute node 2 (11).
-
-The following steps involve compute node 2:
-
-#. The physical network interface (12) forwards the packet to the OVS
- provider bridge provider network port (13).
-#. The OVS provider bridge ``phy-br-provider`` patch port (14) forwards the
- packet to the OVS integration bridge ``int-br-provider`` patch port (15).
-#. The OVS integration bridge swaps the actual VLAN tag 101 with the internal
- VLAN tag.
-#. The OVS integration bridge security group port (16) forwards the packet
- to the security group bridge OVS port (17).
-#. Security group rules (18) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge instance port (19) forwards the packet to the
- instance 2 interface (20) via ``veth`` pair.
-
-.. image:: figures/deploy-ovs-provider-flowew1.png
- :alt: Provider networks using Open vSwitch - network traffic flow - east/west scenario 1
-
-.. note::
-
- Return traffic follows similar steps in reverse.
-
-East-west scenario 2: Instances on different networks
------------------------------------------------------
-
-Instances communicate via router on the physical network infrastructure.
-
-* Instance 1 resides on compute node 1 and uses provider network 1.
-* Instance 2 resides on compute node 1 and uses provider network 2.
-* Instance 1 sends a packet to instance 2.
-
-.. note::
-
- Both instances reside on the same compute node to illustrate how VLAN
- tagging enables multiple logical layer-2 networks to use the same
- physical layer-2 network.
-
-The following steps involve the compute node:
-
-#. The instance 1 interface (1) forwards the packet to the security group
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge OVS port (4) forwards the packet to the OVS
- integration bridge security group port (5) via ``veth`` pair.
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge ``int-br-provider`` patch port (6) forwards
- the packet to the OVS provider bridge ``phy-br-provider`` patch port (7).
-#. The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag
- 101.
-#. The OVS provider bridge provider network port (8) forwards the packet to
- the physical network interface (9).
-#. The physical network interface forwards the packet to the physical
- network infrastructure switch (10).
-
-The following steps involve the physical network infrastructure:
-
-#. The switch removes VLAN tag 101 from the packet and forwards it to the
- router (11).
-#. The router routes the packet from provider network 1 (12) to provider
- network 2 (13).
-#. The router forwards the packet to the switch (14).
-#. The switch adds VLAN tag 102 to the packet and forwards it to compute
- node 1 (15).
-
-The following steps involve the compute node:
-
-#. The physical network interface (16) forwards the packet to the OVS
- provider bridge provider network port (17).
-#. The OVS provider bridge ``phy-br-provider`` patch port (18) forwards the
- packet to the OVS integration bridge ``int-br-provider`` patch port (19).
-#. The OVS integration bridge swaps the actual VLAN tag 102 with the internal
- VLAN tag.
-#. The OVS integration bridge security group port (20) removes the internal
- VLAN tag and forwards the packet to the security group bridge OVS port
- (21).
-#. Security group rules (22) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge instance port (23) forwards the packet to the
- instance 2 interface (24) via ``veth`` pair.
-
-.. image:: figures/deploy-ovs-provider-flowew2.png
- :alt: Provider networks using Open vSwitch - network traffic flow - east/west scenario 2
-
-.. note::
-
- Return traffic follows similar steps in reverse.
diff --git a/doc/networking-guide/source/deploy-ovs-selfservice.rst b/doc/networking-guide/source/deploy-ovs-selfservice.rst
deleted file mode 100644
index c8f53d3517..0000000000
--- a/doc/networking-guide/source/deploy-ovs-selfservice.rst
+++ /dev/null
@@ -1,508 +0,0 @@
-.. _deploy-ovs-selfservice:
-
-===================================
-Open vSwitch: Self-service networks
-===================================
-
-This architecture example augments :ref:`deploy-ovs-provider` to support
-a nearly limitless quantity of entirely virtual networks. Although the
-Networking service supports VLAN self-service networks, this example
-focuses on VXLAN self-service networks. For more information on
-self-service networks, see :ref:`intro-os-networking-selfservice`.
-
-Prerequisites
-~~~~~~~~~~~~~
-
-Add one network node with the following components:
-
-* Three network interfaces: management, provider, and overlay.
-* OpenStack Networking Open vSwitch (OVS) layer-2 agent, layer-3 agent, and
- any including OVS.
-
-Modify the compute nodes with the following components:
-
-* Add one network interface: overlay.
-
-.. note::
-
- You can keep the DHCP and metadata agents on each compute node or
- move them to the network node.
-
-Architecture
-~~~~~~~~~~~~
-
-.. image:: figures/deploy-ovs-selfservice-overview.png
- :alt: Self-service networks using OVS - overview
-
-The following figure shows components and connectivity for one self-service
-network and one untagged (flat) provider network. In this particular case, the
-instance resides on the same compute node as the DHCP agent for the network.
-If the DHCP agent resides on another compute node, the latter only contains
-a DHCP namespace and with a port on the OVS integration bridge.
-
-.. image:: figures/deploy-ovs-selfservice-compconn1.png
- :alt: Self-service networks using OVS - components and connectivity - one network
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-Use the following example configuration as a template to add support for
-self-service networks to an existing operational environment that supports
-provider networks.
-
-Controller node
----------------
-
-#. In the ``neutron.conf`` file:
-
- * Enable routing and allow overlapping IP address ranges.
-
- .. code-block:: ini
-
- [DEFAULT]
- service_plugins = router
- allow_overlapping_ips = True
-
-#. In the ``ml2_conf.ini`` file:
-
- * Add ``vxlan`` to type drivers and project network types.
-
- .. code-block:: ini
-
- [ml2]
- type_drivers = flat,vlan,vxlan
- tenant_network_types = vxlan
-
- * Enable the layer-2 population mechanism driver.
-
- .. code-block:: ini
-
- [ml2]
- mechanism_drivers = openvswitch,l2population
-
- * Configure the VXLAN network ID (VNI) range.
-
- .. code-block:: ini
-
- [ml2_type_vxlan]
- vni_ranges = VNI_START:VNI_END
-
- Replace ``VNI_START`` and ``VNI_END`` with appropriate numerical
- values.
-
-#. Restart the following services:
-
- * Neutron Server
- * Open vSwitch agent
-
-Network node
-------------
-
-#. Install the Networking service OVS layer-2 agent and layer-3 agent.
-
-#. Install OVS.
-
-#. In the ``neutron.conf`` file, configure common options:
-
- .. include:: shared/deploy-config-neutron-common.txt
-
-#. Start the following services:
-
- * OVS
-
-#. Create the OVS provider bridge ``br-provider``:
-
- .. code-block:: console
-
- $ ovs-vsctl add-br br-provider
-
-#. In the ``openvswitch_agent.ini`` file, configure the layer-2 agent.
-
- .. code-block:: ini
-
- [ovs]
- bridge_mappings = provider:br-provider
- local_ip = OVERLAY_INTERFACE_IP_ADDRESS
-
- [agent]
- tunnel_types = vxlan
- l2_population = True
-
- [securitygroup]
- firewall_driver = iptables_hybrid
-
- Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
- interface that handles VXLAN overlays for self-service networks.
-
-#. In the ``l3_agent.ini`` file, configure the layer-3 agent.
-
- .. code-block:: ini
-
- [DEFAULT]
- interface_driver = openvswitch
- external_network_bridge =
-
- .. note::
-
- The ``external_network_bridge`` option intentionally contains
- no value.
-
-#. Start the following services:
-
- * Open vSwitch agent
- * Layer-3 agent
-
-Compute nodes
--------------
-
-#. In the ``openvswitch_agent.ini`` file, enable VXLAN support including
- layer-2 population.
-
- .. code-block:: ini
-
- [ovs]
- local_ip = OVERLAY_INTERFACE_IP_ADDRESS
-
- [agent]
- tunnel_types = vxlan
- l2_population = True
-
- Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the
- interface that handles VXLAN overlays for self-service networks.
-
-#. Restart the following services:
-
- * Open vSwitch agent
-
-Verify service operation
-------------------------
-
-#. Source the administrative project credentials.
-#. Verify presence and operation of the agents.
-
- .. code-block:: console
-
- $ openstack network agent list
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
- | 1236bbcb-e0ba-48a9-80fc-81202ca4fa51 | Metadata agent | compute2 | | True | UP | neutron-metadata-agent |
- | 457d6898-b373-4bb3-b41f-59345dcfb5c5 | Open vSwitch agent | compute2 | | True | UP | neutron-openvswitch-agent |
- | 71f15e84-bc47-4c2a-b9fb-317840b2d753 | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent |
- | 8805b962-de95-4e40-bdc2-7a0add7521e8 | L3 agent | network1 | nova | True | UP | neutron-l3-agent |
- | a33cac5a-0266-48f6-9cac-4cef4f8b0358 | Open vSwitch agent | network1 | | True | UP | neutron-openvswitch-agent |
- | a6c69690-e7f7-4e56-9831-1282753e5007 | Metadata agent | compute1 | | True | UP | neutron-metadata-agent |
- | af11f22f-a9f4-404f-9fd8-cd7ad55c0f68 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent |
- | bcfc977b-ec0e-4ba9-be62-9489b4b0e6f1 | Open vSwitch agent | compute1 | | True | UP | neutron-openvswitch-agent |
- +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+
-
-Create initial networks
------------------------
-
-.. include:: shared/deploy-selfservice-initialnetworks.txt
-
-Verify network operation
-------------------------
-
-.. include:: shared/deploy-selfservice-verifynetworkoperation.txt
-
-.. _deploy-ovs-selfservice-networktrafficflow:
-
-Network traffic flow
-~~~~~~~~~~~~~~~~~~~~
-
-.. include:: shared/deploy-selfservice-networktrafficflow.txt
-
-.. _deploy-ovs-selfservice-networktrafficflow-ns1:
-
-North-south scenario 1: Instance with a fixed IP address
---------------------------------------------------------
-
-For instances with a fixed IPv4 address, the network node performs SNAT
-on north-south traffic passing from self-service to external networks
-such as the Internet. For instances with a fixed IPv6 address, the network
-node performs conventional routing of traffic between self-service and
-external networks.
-
-* The instance resides on compute node 1 and uses self-service network 1.
-* The instance sends a packet to a host on the Internet.
-
-The following steps involve compute node 1:
-
-#. The instance interface (1) forwards the packet to the security group
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge OVS port (4) forwards the packet to the OVS
- integration bridge security group port (5) via ``veth`` pair.
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge exchanges the internal VLAN tag for an internal
- tunnel ID.
-#. The OVS integration bridge patch port (6) forwards the packet to the
- OVS tunnel bridge patch port (7).
-#. The OVS tunnel bridge (8) wraps the packet using VNI 101.
-#. The underlying physical interface (9) for overlay networks forwards
- the packet to the network node via the overlay network (10).
-
-The following steps involve the network node:
-
-#. The underlying physical interface (11) for overlay networks forwards
- the packet to the OVS tunnel bridge (12).
-#. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID
- to it.
-#. The OVS tunnel bridge exchanges the internal tunnel ID for an internal
- VLAN tag.
-#. The OVS tunnel bridge patch port (13) forwards the packet to the OVS
- integration bridge patch port (14).
-#. The OVS integration bridge port for the self-service network (15)
- removes the internal VLAN tag and forwards the packet to the self-service
- network interface (16) in the router namespace.
-
- * For IPv4, the router performs SNAT on the packet which changes the
- source IP address to the router IP address on the provider network
- and sends it to the gateway IP address on the provider network via
- the gateway interface on the provider network (17).
- * For IPv6, the router sends the packet to the next-hop IP address,
- typically the gateway IP address on the provider network, via the
- provider gateway interface (17).
-
-#. The router forwards the packet to the OVS integration bridge port for
- the provider network (18).
-#. The OVS integration bridge adds the internal VLAN tag to the packet.
-#. The OVS integration bridge ``int-br-provider`` patch port (19) forwards
- the packet to the OVS provider bridge ``phy-br-provider`` patch port (20).
-#. The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag
- 101.
-#. The OVS provider bridge provider network port (21) forwards the packet to
- the physical network interface (22).
-#. The physical network interface forwards the packet to the Internet via
- physical network infrastructure (23).
-
-.. note::
-
- Return traffic follows similar steps in reverse. However, without a
- floating IPv4 address, hosts on the provider or external networks cannot
- originate connections to instances on the self-service network.
-
-.. image:: figures/deploy-ovs-selfservice-flowns1.png
- :alt: Self-service networks using Open vSwitch - network traffic flow - north/south scenario 1
-
-North-south scenario 2: Instance with a floating IPv4 address
--------------------------------------------------------------
-
-For instances with a floating IPv4 address, the network node performs SNAT
-on north-south traffic passing from the instance to external networks
-such as the Internet and DNAT on north-south traffic passing from external
-networks to the instance. Floating IP addresses and NAT do not apply to IPv6.
-Thus, the network node routes IPv6 traffic in this scenario.
-
-* The instance resides on compute node 1 and uses self-service network 1.
-* A host on the Internet sends a packet to the instance.
-
-The following steps involve the network node:
-
-#. The physical network infrastructure (1) forwards the packet to the
- provider physical network interface (2).
-#. The provider physical network interface forwards the packet to the
- OVS provider bridge provider network port (3).
-#. The OVS provider bridge swaps actual VLAN tag 101 with the internal
- VLAN tag.
-#. The OVS provider bridge ``phy-br-provider`` port (4) forwards the
- packet to the OVS integration bridge ``int-br-provider`` port (5).
-#. The OVS integration bridge port for the provider network (6) removes
- the internal VLAN tag and forwards the packet to the provider network
- interface (6) in the router namespace.
-
- * For IPv4, the router performs DNAT on the packet which changes the
- destination IP address to the instance IP address on the self-service
- network and sends it to the gateway IP address on the self-service
- network via the self-service interface (7).
- * For IPv6, the router sends the packet to the next-hop IP address,
- typically the gateway IP address on the self-service network, via
- the self-service interface (8).
-
-#. The router forwards the packet to the OVS integration bridge port for
- the self-service network (9).
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge exchanges the internal VLAN tag for an internal
- tunnel ID.
-#. The OVS integration bridge ``patch-tun`` patch port (10) forwards the
- packet to the OVS tunnel bridge ``patch-int`` patch port (11).
-#. The OVS tunnel bridge (12) wraps the packet using VNI 101.
-#. The underlying physical interface (13) for overlay networks forwards
- the packet to the network node via the overlay network (14).
-
-The following steps involve the compute node:
-
-#. The underlying physical interface (15) for overlay networks forwards
- the packet to the OVS tunnel bridge (16).
-#. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID
- to it.
-#. The OVS tunnel bridge exchanges the internal tunnel ID for an internal
- VLAN tag.
-#. The OVS tunnel bridge ``patch-int`` patch port (17) forwards the packet
- to the OVS integration bridge ``patch-tun`` patch port (18).
-#. The OVS integration bridge removes the internal VLAN tag from the packet.
-#. The OVS integration bridge security group port (19) forwards the packet
- to the security group bridge OVS port (20) via ``veth`` pair.
-#. Security group rules (21) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge instance port (22) forwards the packet to the
- instance interface (23) via ``veth`` pair.
-
-.. image:: figures/deploy-ovs-selfservice-flowns2.png
- :alt: Self-service networks using Open vSwitch - network traffic flow - north/south scenario 2
-
-.. note::
-
- Egress instance traffic flows similar to north-south scenario 1, except SNAT
- changes the source IP address of the packet to the floating IPv4 address
- rather than the router IP address on the provider network.
-
-East-west scenario 1: Instances on the same network
----------------------------------------------------
-
-Instances with a fixed IPv4/IPv6 address or floating IPv4 address on the
-same network communicate directly between compute nodes containing those
-instances.
-
-By default, the VXLAN protocol lacks knowledge of target location
-and uses multicast to discover it. After discovery, it stores the
-location in the local forwarding database. In large deployments,
-the discovery process can generate a significant amount of network
-that all nodes must process. To eliminate the latter and generally
-increase efficiency, the Networking service includes the layer-2
-population mechanism driver that automatically populates the
-forwarding database for VXLAN interfaces. The example configuration
-enables this driver. For more information, see :ref:`config-plugin-ml2`.
-
-* Instance 1 resides on compute node 1 and uses self-service network 1.
-* Instance 2 resides on compute node 2 and uses self-service network 1.
-* Instance 1 sends a packet to instance 2.
-
-The following steps involve compute node 1:
-
-#. The instance 1 interface (1) forwards the packet to the security group
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge OVS port (4) forwards the packet to the OVS
- integration bridge security group port (5) via ``veth`` pair.
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge exchanges the internal VLAN tag for an internal
- tunnel ID.
-#. The OVS integration bridge patch port (6) forwards the packet to the
- OVS tunnel bridge patch port (7).
-#. The OVS tunnel bridge (8) wraps the packet using VNI 101.
-#. The underlying physical interface (9) for overlay networks forwards
- the packet to compute node 2 via the overlay network (10).
-
-The following steps involve compute node 2:
-
-#. The underlying physical interface (11) for overlay networks forwards
- the packet to the OVS tunnel bridge (12).
-#. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID
- to it.
-#. The OVS tunnel bridge exchanges the internal tunnel ID for an internal
- VLAN tag.
-#. The OVS tunnel bridge ``patch-int`` patch port (13) forwards the packet
- to the OVS integration bridge ``patch-tun`` patch port (14).
-#. The OVS integration bridge removes the internal VLAN tag from the packet.
-#. The OVS integration bridge security group port (15) forwards the packet
- to the security group bridge OVS port (16) via ``veth`` pair.
-#. Security group rules (17) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge instance port (18) forwards the packet to the
- instance 2 interface (19) via ``veth`` pair.
-
-.. image:: figures/deploy-ovs-selfservice-flowew1.png
- :alt: Self-service networks using Open vSwitch - network traffic flow - east/west scenario 1
-
-.. note::
-
- Return traffic follows similar steps in reverse.
-
-East-west scenario 2: Instances on different networks
------------------------------------------------------
-
-Instances using a fixed IPv4/IPv6 address or floating IPv4 address communicate
-via router on the network node. The self-service networks must reside on the
-same router.
-
-* Instance 1 resides on compute node 1 and uses self-service network 1.
-* Instance 2 resides on compute node 1 and uses self-service network 2.
-* Instance 1 sends a packet to instance 2.
-
-.. note::
-
- Both instances reside on the same compute node to illustrate how VXLAN
- enables multiple overlays to use the same layer-3 network.
-
-The following steps involve the compute node:
-
-#. The instance interface (1) forwards the packet to the security group
- bridge instance port (2) via ``veth`` pair.
-#. Security group rules (3) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge OVS port (4) forwards the packet to the OVS
- integration bridge security group port (5) via ``veth`` pair.
-#. The OVS integration bridge adds an internal VLAN tag to the packet.
-#. The OVS integration bridge exchanges the internal VLAN tag for an internal
- tunnel ID.
-#. The OVS integration bridge ``patch-tun`` patch port (6) forwards the
- packet to the OVS tunnel bridge ``patch-int`` patch port (7).
-#. The OVS tunnel bridge (8) wraps the packet using VNI 101.
-#. The underlying physical interface (9) for overlay networks forwards
- the packet to the network node via the overlay network (10).
-
-The following steps involve the network node:
-
-#. The underlying physical interface (11) for overlay networks forwards
- the packet to the OVS tunnel bridge (12).
-#. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID
- to it.
-#. The OVS tunnel bridge exchanges the internal tunnel ID for an internal
- VLAN tag.
-#. The OVS tunnel bridge ``patch-int`` patch port (13) forwards the packet to
- the OVS integration bridge ``patch-tun`` patch port (14).
-#. The OVS integration bridge port for self-service network 1 (15)
- removes the internal VLAN tag and forwards the packet to the self-service
- network 1 interface (16) in the router namespace.
-#. The router sends the packet to the next-hop IP address, typically the
- gateway IP address on self-service network 2, via the self-service
- network 2 interface (17).
-#. The router forwards the packet to the OVS integration bridge port for
- self-service network 2 (18).
-#. The OVS integration bridge adds the internal VLAN tag to the packet.
-#. The OVS integration bridge exchanges the internal VLAN tag for an internal
- tunnel ID.
-#. The OVS integration bridge ``patch-tun`` patch port (19) forwards the
- packet to the OVS tunnel bridge ``patch-int`` patch port (20).
-#. The OVS tunnel bridge (21) wraps the packet using VNI 102.
-#. The underlying physical interface (22) for overlay networks forwards
- the packet to the compute node via the overlay network (23).
-
-The following steps involve the compute node:
-
-#. The underlying physical interface (24) for overlay networks forwards
- the packet to the OVS tunnel bridge (25).
-#. The OVS tunnel bridge unwraps the packet and adds an internal tunnel
- ID to it.
-#. The OVS tunnel bridge exchanges the internal tunnel ID for an internal
- VLAN tag.
-#. The OVS tunnel bridge ``patch-int`` patch port (26) forwards the packet
- to the OVS integration bridge ``patch-tun`` patch port (27).
-#. The OVS integration bridge removes the internal VLAN tag from the packet.
-#. The OVS integration bridge security group port (28) forwards the packet
- to the security group bridge OVS port (29) via ``veth`` pair.
-#. Security group rules (30) on the security group bridge handle firewalling
- and connection tracking for the packet.
-#. The security group bridge instance port (31) forwards the packet to the
- instance interface (32) via ``veth`` pair.
-
-.. note::
-
- Return traffic follows similar steps in reverse.
-
-.. image:: figures/deploy-ovs-selfservice-flowew2.png
- :alt: Self-service networks using Open vSwitch - network traffic flow - east/west scenario 2
diff --git a/doc/networking-guide/source/deploy-ovs.rst b/doc/networking-guide/source/deploy-ovs.rst
deleted file mode 100644
index 00677e606c..0000000000
--- a/doc/networking-guide/source/deploy-ovs.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-.. _deploy-ovs:
-
-=============================
-Open vSwitch mechanism driver
-=============================
-
-The Open vSwitch (OVS) mechanism driver uses a combination of OVS and Linux
-bridges as interconnection devices. However, optionally enabling the OVS
-native implementation of security groups removes the dependency on Linux
-bridges.
-
-We recommend using Open vSwitch version 2.4 or higher. Optional features
-may require a higher minimum version.
-
-.. toctree::
- :maxdepth: 2
-
- deploy-ovs-provider
- deploy-ovs-selfservice
- deploy-ovs-ha-vrrp
- deploy-ovs-ha-dvr
diff --git a/doc/networking-guide/source/deploy.rst b/doc/networking-guide/source/deploy.rst
deleted file mode 100644
index 06640a61ab..0000000000
--- a/doc/networking-guide/source/deploy.rst
+++ /dev/null
@@ -1,140 +0,0 @@
-.. _deploy:
-
-===================
-Deployment examples
-===================
-
-The following deployment examples provide building blocks of increasing
-architectural complexity using the Networking service reference architecture
-which implements the Modular Layer 2 (ML2) plug-in and either the Open
-vSwitch (OVS) or Linux bridge mechanism drivers. Both mechanism drivers support
-the same basic features such as provider networks, self-service networks,
-and routers. However, more complex features often require a particular
-mechanism driver. Thus, you should consider the requirements (or goals) of
-your cloud before choosing a mechanism driver.
-
-After choosing a :ref:`mechanism driver `, the
-deployment examples generally include the following building blocks:
-
-#. Provider (public/external) networks using IPv4 and IPv6
-
-#. Self-service (project/private/internal) networks including routers using
- IPv4 and IPv6
-
-#. High-availability features
-
-#. Other features such as BGP dynamic routing
-
-Prerequisites
-~~~~~~~~~~~~~
-
-Prerequisites, typically hardware requirements, generally increase with each
-building block. Each building block depends on proper deployment and operation
-of prior building blocks. For example, the first building block (provider
-networks) only requires one controller and two compute nodes, the second
-building block (self-service networks) adds a network node, and the
-high-availability building blocks typically add a second network node for a
-total of five nodes. Each building block could also require additional
-infrastructure or changes to existing infrastructure such as networks.
-
-For basic configuration of prerequisites, see the
-`Ocata Install Tutorials and Guides `__.
-
-.. note::
-
- Example commands using the ``openstack`` client assume version 3.2.0 or
- higher.
-
-Nodes
------
-
-The deployment examples refer one or more of the following nodes:
-
-* Controller: Contains control plane components of OpenStack services
- and their dependencies.
-
- * Two network interfaces: management and provider.
- * Operational SQL server with databases necessary for each OpenStack
- service.
- * Operational message queue service.
- * Operational OpenStack Identity (keystone) service.
- * Operational OpenStack Image Service (glance).
- * Operational management components of the OpenStack Compute (nova) service
- with appropriate configuration to use the Networking service.
- * OpenStack Networking (neutron) server service and ML2 plug-in.
-
-* Network: Contains the OpenStack Networking service layer-3 (routing)
- component. High availability options may include additional components.
-
- * Three network interfaces: management, overlay, and provider.
- * OpenStack Networking layer-2 (switching) agent, layer-3 agent, and any
- dependencies.
-
-* Compute: Contains the hypervisor component of the OpenStack Compute service
- and the OpenStack Networking layer-2, DHCP, and metadata components.
- High-availability options may include additional components.
-
- * Two network interfaces: management and provider.
- * Operational hypervisor components of the OpenStack Compute (nova) service
- with appropriate configuration to use the Networking service.
- * OpenStack Networking layer-2 agent, DHCP agent, metadata agent, and any
- dependencies.
-
-Each building block defines the quantity and types of nodes including the
-components on each node.
-
-.. note::
-
- You can virtualize these nodes for demonstration, training, or
- proof-of-concept purposes. However, you must use physical hosts for
- evaluation of performance or scaling.
-
-Networks and network interfaces
--------------------------------
-
-The deployment examples refer to one or more of the following networks
-and network interfaces:
-
-* Management: Handles API requests from clients and control plane traffic for
- OpenStack services including their dependencies.
-* Overlay: Handles self-service networks using an overlay protocol such as
- VXLAN or GRE.
-* Provider: Connects virtual and physical networks at layer-2. Typically
- uses physical network infrastructure for switching/routing traffic to
- external networks such as the Internet.
-
-.. note::
-
- For best performance, 10+ Gbps physical network infrastructure should
- support jumbo frames.
-
-For illustration purposes, the configuration examples typically reference
-the following IP address ranges:
-
-* Provider network 1:
-
- * IPv4: 203.0.113.0/24
- * IPv6: fd00:203:0:113::/64
-
-* Provider network 2:
-
- * IPv4: 192.0.2.0/24
- * IPv6: fd00:192:0:2::/64
-
-* Self-service networks:
-
- * IPv4: 198.51.100.0/24 in /24 segments
- * IPv6: fd00:198:51::/48 in /64 segments
-
-You may change them to work with your particular network infrastructure.
-
-.. _deploy-mechanism-drivers:
-
-Mechanism drivers
-~~~~~~~~~~~~~~~~~
-
-.. toctree::
- :maxdepth: 1
-
- deploy-lb
- deploy-ovs
diff --git a/doc/networking-guide/source/figures/NetworkTypes.png b/doc/networking-guide/source/figures/NetworkTypes.png
deleted file mode 100644
index 7fc50e1383..0000000000
Binary files a/doc/networking-guide/source/figures/NetworkTypes.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/NetworkTypes.svg b/doc/networking-guide/source/figures/NetworkTypes.svg
deleted file mode 100644
index 10e1125ddc..0000000000
--- a/doc/networking-guide/source/figures/NetworkTypes.svg
+++ /dev/null
@@ -1,20392 +0,0 @@
-
-
-
-
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.graffle b/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.graffle
deleted file mode 100644
index 6ef656b452..0000000000
Binary files a/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.png b/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.png
deleted file mode 100644
index a4bd88aa2f..0000000000
Binary files a/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.svg b/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.svg
deleted file mode 100644
index 04c76457b1..0000000000
--- a/doc/networking-guide/source/figures/bgp-dynamic-routing-example1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.graffle b/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.graffle
deleted file mode 100644
index 83d5d4b229..0000000000
Binary files a/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.png b/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.png
deleted file mode 100644
index e4a266ee15..0000000000
Binary files a/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.svg b/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.svg
deleted file mode 100644
index 4ed8e6a1dc..0000000000
--- a/doc/networking-guide/source/figures/bgp-dynamic-routing-example2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.graffle b/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.graffle
deleted file mode 100644
index 04741e8592..0000000000
Binary files a/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.png b/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.png
deleted file mode 100644
index 2a6c802d6d..0000000000
Binary files a/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.svg b/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.svg
deleted file mode 100644
index 2efd86ebdc..0000000000
--- a/doc/networking-guide/source/figures/bgp-dynamic-routing-overview.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/config-macvtap-compute1.png b/doc/networking-guide/source/figures/config-macvtap-compute1.png
deleted file mode 100644
index f675f79b76..0000000000
Binary files a/doc/networking-guide/source/figures/config-macvtap-compute1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/config-macvtap-compute2.png b/doc/networking-guide/source/figures/config-macvtap-compute2.png
deleted file mode 100644
index d110379ece..0000000000
Binary files a/doc/networking-guide/source/figures/config-macvtap-compute2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/demo_multiple_dhcp_agents.png b/doc/networking-guide/source/figures/demo_multiple_dhcp_agents.png
deleted file mode 100644
index 3867039563..0000000000
Binary files a/doc/networking-guide/source/figures/demo_multiple_dhcp_agents.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.graffle b/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.graffle
deleted file mode 100644
index 78f379cb0f..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.png b/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.png
deleted file mode 100644
index d2e9a84565..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.svg b/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.svg
deleted file mode 100644
index ddfa554672..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-compconn1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.graffle b/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.graffle
deleted file mode 100644
index 3e7896f02c..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.png b/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.png
deleted file mode 100644
index ab60fb3f78..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.svg b/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.svg
deleted file mode 100644
index 3c98f805fd..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-ha-vrrp-overview.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.graffle b/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.graffle
deleted file mode 100644
index e5abe24509..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.png b/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.png
deleted file mode 100644
index 919b8eec8d..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.svg b/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.svg
deleted file mode 100644
index ac2b83e4fd..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-provider-compconn1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.graffle b/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.graffle
deleted file mode 100644
index ff024f0042..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.png b/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.png
deleted file mode 100644
index 1038a20289..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.svg b/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.svg
deleted file mode 100644
index 5c982d2a1a..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-provider-compconn2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.graffle b/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.graffle
deleted file mode 100644
index b44116bb5d..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.png b/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.png
deleted file mode 100644
index 3eb9d184ac..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.svg b/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.svg
deleted file mode 100644
index f6a7569e1e..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-provider-flowew1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.graffle b/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.graffle
deleted file mode 100644
index ab875018a1..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.png b/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.png
deleted file mode 100644
index c7d31e5e74..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.svg b/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.svg
deleted file mode 100644
index 0ec8796130..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-provider-flowew2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.graffle b/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.graffle
deleted file mode 100644
index c6c9eaacfa..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.png b/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.png
deleted file mode 100644
index 82e03cb0ba..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.svg b/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.svg
deleted file mode 100644
index c7e1aab672..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-provider-flowns1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-overview.graffle b/doc/networking-guide/source/figures/deploy-lb-provider-overview.graffle
deleted file mode 100644
index d1e8e39dab..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-overview.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-overview.png b/doc/networking-guide/source/figures/deploy-lb-provider-overview.png
deleted file mode 100644
index 8d1a0cc106..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-provider-overview.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-provider-overview.svg b/doc/networking-guide/source/figures/deploy-lb-provider-overview.svg
deleted file mode 100644
index faa93c0762..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-provider-overview.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.graffle b/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.graffle
deleted file mode 100644
index afd7f1393f..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.png b/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.png
deleted file mode 100644
index db0e9b3b89..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.svg b/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.svg
deleted file mode 100644
index cc32b58fc2..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-selfservice-compconn1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.graffle b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.graffle
deleted file mode 100644
index c153ae8d73..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.png b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.png
deleted file mode 100644
index a5d9445a7e..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.svg b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.svg
deleted file mode 100644
index d47cfe974c..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.graffle b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.graffle
deleted file mode 100644
index 290912bc38..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.png b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.png
deleted file mode 100644
index 30980a333d..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.svg b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.svg
deleted file mode 100644
index 85d4863554..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowew2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.graffle b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.graffle
deleted file mode 100644
index c6eac60a73..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.png b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.png
deleted file mode 100644
index 806e0a412d..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.svg b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.svg
deleted file mode 100644
index 8b437c68cf..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.graffle b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.graffle
deleted file mode 100644
index c9071181ac..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.png b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.png
deleted file mode 100644
index 3d3d64b796..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.svg b/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.svg
deleted file mode 100644
index cd64cd86b8..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-selfservice-flowns2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.graffle b/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.graffle
deleted file mode 100644
index 9859ee24a0..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.png b/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.png
deleted file mode 100644
index bce458eec7..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.svg b/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.svg
deleted file mode 100644
index 81e524e5fb..0000000000
--- a/doc/networking-guide/source/figures/deploy-lb-selfservice-overview.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.graffle b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.graffle
deleted file mode 100644
index 94fd9983ea..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.png b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.png
deleted file mode 100644
index d1bb34c4b3..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.svg b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.svg
deleted file mode 100644
index d2ec7345e3..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-compconn1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.graffle b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.graffle
deleted file mode 100644
index 1bd2dbba01..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.png b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.png
deleted file mode 100644
index 8f63e417a8..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.svg b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.svg
deleted file mode 100644
index 9f849e636a..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowew1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.graffle b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.graffle
deleted file mode 100644
index e007d507f9..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.png b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.png
deleted file mode 100644
index bf7af55c86..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.svg b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.svg
deleted file mode 100644
index 9270e15dab..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.graffle b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.graffle
deleted file mode 100644
index 27fd411700..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.png b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.png
deleted file mode 100644
index c1c67085e4..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.svg b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.svg
deleted file mode 100644
index e72bc61010..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-flowns2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.graffle b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.graffle
deleted file mode 100644
index 69ed88d35e..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.png b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.png
deleted file mode 100644
index 1b12b3e0fc..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.svg b/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.svg
deleted file mode 100644
index 09560e94a0..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-ha-dvr-overview.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.graffle b/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.graffle
deleted file mode 100644
index 5675be93b4..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.png b/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.png
deleted file mode 100644
index cafc1ba663..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.svg b/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.svg
deleted file mode 100644
index 1685a0f340..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-compconn1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.graffle b/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.graffle
deleted file mode 100644
index b5b8fbdab8..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.png b/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.png
deleted file mode 100644
index 9f1e69b93f..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.svg b/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.svg
deleted file mode 100644
index 0833cb2670..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-ha-vrrp-overview.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.graffle b/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.graffle
deleted file mode 100644
index b41509c4d1..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.png b/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.png
deleted file mode 100644
index dce7d3899a..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.svg b/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.svg
deleted file mode 100644
index 4ec9028d9a..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.graffle b/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.graffle
deleted file mode 100644
index f01b00ad98..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.png b/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.png
deleted file mode 100644
index ce72ca1a2a..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.svg b/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.svg
deleted file mode 100644
index 481da72132..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-provider-compconn2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.graffle b/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.graffle
deleted file mode 100644
index 37ed2786cd..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.png b/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.png
deleted file mode 100644
index d6bd50a4b8..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.svg b/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.svg
deleted file mode 100644
index 7b0b24efff..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.graffle b/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.graffle
deleted file mode 100644
index d6e37f334b..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.png b/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.png
deleted file mode 100644
index cc2723d9da..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.svg b/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.svg
deleted file mode 100644
index a67769522a..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-provider-flowew2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.graffle b/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.graffle
deleted file mode 100644
index 7eb3edd254..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.png b/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.png
deleted file mode 100644
index ae00b628e1..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.svg b/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.svg
deleted file mode 100644
index 171fc0f6e8..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-provider-flowns1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-overview.graffle b/doc/networking-guide/source/figures/deploy-ovs-provider-overview.graffle
deleted file mode 100644
index 9d5abfa6aa..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-overview.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-overview.png b/doc/networking-guide/source/figures/deploy-ovs-provider-overview.png
deleted file mode 100644
index 678828d24a..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-provider-overview.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-provider-overview.svg b/doc/networking-guide/source/figures/deploy-ovs-provider-overview.svg
deleted file mode 100644
index f09c846996..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-provider-overview.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.graffle b/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.graffle
deleted file mode 100644
index 21cae57e03..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.png b/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.png
deleted file mode 100644
index 24af2c4e12..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.svg b/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.svg
deleted file mode 100644
index 1e7f11528f..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-selfservice-compconn1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.graffle b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.graffle
deleted file mode 100644
index 09466c6322..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.png b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.png
deleted file mode 100644
index c92b3a67a7..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.svg b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.svg
deleted file mode 100644
index 8d3257a5b8..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.graffle b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.graffle
deleted file mode 100644
index ec61be50f4..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.png b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.png
deleted file mode 100644
index c29a7c9d05..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.svg b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.svg
deleted file mode 100644
index 451cec0e1e..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowew2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.graffle b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.graffle
deleted file mode 100644
index 005c9be0ea..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.png b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.png
deleted file mode 100644
index f1d83ce183..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.svg b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.svg
deleted file mode 100644
index c48b5ee898..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns1.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.graffle b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.graffle
deleted file mode 100644
index 6eced6be25..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.png b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.png
deleted file mode 100644
index a4b4b2506f..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.svg b/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.svg
deleted file mode 100644
index a87277a429..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-selfservice-flowns2.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.graffle b/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.graffle
deleted file mode 100644
index 93ad8d94ee..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.graffle and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.png b/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.png
deleted file mode 100644
index 623793e2db..0000000000
Binary files a/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.svg b/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.svg
deleted file mode 100644
index febd75e441..0000000000
--- a/doc/networking-guide/source/figures/deploy-ovs-selfservice-overview.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/doc/networking-guide/source/figures/fwaas.png b/doc/networking-guide/source/figures/fwaas.png
deleted file mode 100644
index a0b6d5456f..0000000000
Binary files a/doc/networking-guide/source/figures/fwaas.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/fwaas.svg b/doc/networking-guide/source/figures/fwaas.svg
deleted file mode 100644
index 8789f94ca1..0000000000
--- a/doc/networking-guide/source/figures/fwaas.svg
+++ /dev/null
@@ -1,33668 +0,0 @@
-
-
-
-
diff --git a/doc/networking-guide/source/figures/lbaasv2-diagram.png b/doc/networking-guide/source/figures/lbaasv2-diagram.png
deleted file mode 100644
index f77453b01b..0000000000
Binary files a/doc/networking-guide/source/figures/lbaasv2-diagram.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/lbaasv2-diagram.svg b/doc/networking-guide/source/figures/lbaasv2-diagram.svg
deleted file mode 100644
index c431292b37..0000000000
--- a/doc/networking-guide/source/figures/lbaasv2-diagram.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/figures/port-chain-architecture-diagram.png b/doc/networking-guide/source/figures/port-chain-architecture-diagram.png
deleted file mode 100644
index 1262b28005..0000000000
Binary files a/doc/networking-guide/source/figures/port-chain-architecture-diagram.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/port-chain-diagram.png b/doc/networking-guide/source/figures/port-chain-diagram.png
deleted file mode 100644
index 6ac6b81312..0000000000
Binary files a/doc/networking-guide/source/figures/port-chain-diagram.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-compute1.svg b/doc/networking-guide/source/figures/scenario-classic-mt-compute1.svg
deleted file mode 100644
index 44c24c6d14..0000000000
--- a/doc/networking-guide/source/figures/scenario-classic-mt-compute1.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-compute2.svg b/doc/networking-guide/source/figures/scenario-classic-mt-compute2.svg
deleted file mode 100644
index d6fedf338a..0000000000
--- a/doc/networking-guide/source/figures/scenario-classic-mt-compute2.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-flowew1.png b/doc/networking-guide/source/figures/scenario-classic-mt-flowew1.png
deleted file mode 100644
index 14cedbaf59..0000000000
Binary files a/doc/networking-guide/source/figures/scenario-classic-mt-flowew1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-flowew1.svg b/doc/networking-guide/source/figures/scenario-classic-mt-flowew1.svg
deleted file mode 100644
index b74823afc5..0000000000
--- a/doc/networking-guide/source/figures/scenario-classic-mt-flowew1.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-flowew2.png b/doc/networking-guide/source/figures/scenario-classic-mt-flowew2.png
deleted file mode 100644
index fd34320f21..0000000000
Binary files a/doc/networking-guide/source/figures/scenario-classic-mt-flowew2.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-flowew2.svg b/doc/networking-guide/source/figures/scenario-classic-mt-flowew2.svg
deleted file mode 100644
index 1a0bc42c68..0000000000
--- a/doc/networking-guide/source/figures/scenario-classic-mt-flowew2.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-flowns1.png b/doc/networking-guide/source/figures/scenario-classic-mt-flowns1.png
deleted file mode 100644
index 0c56f590fa..0000000000
Binary files a/doc/networking-guide/source/figures/scenario-classic-mt-flowns1.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-flowns1.svg b/doc/networking-guide/source/figures/scenario-classic-mt-flowns1.svg
deleted file mode 100644
index 40de69f0d3..0000000000
--- a/doc/networking-guide/source/figures/scenario-classic-mt-flowns1.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-networks.png b/doc/networking-guide/source/figures/scenario-classic-mt-networks.png
deleted file mode 100644
index f046929d2e..0000000000
Binary files a/doc/networking-guide/source/figures/scenario-classic-mt-networks.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-networks.svg b/doc/networking-guide/source/figures/scenario-classic-mt-networks.svg
deleted file mode 100644
index 40b1421984..0000000000
--- a/doc/networking-guide/source/figures/scenario-classic-mt-networks.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-services.png b/doc/networking-guide/source/figures/scenario-classic-mt-services.png
deleted file mode 100644
index 22eeb112d9..0000000000
Binary files a/doc/networking-guide/source/figures/scenario-classic-mt-services.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt-services.svg b/doc/networking-guide/source/figures/scenario-classic-mt-services.svg
deleted file mode 100644
index 0ef86831af..0000000000
--- a/doc/networking-guide/source/figures/scenario-classic-mt-services.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt.png b/doc/networking-guide/source/figures/scenario-classic-mt.png
deleted file mode 100644
index 9192b9db53..0000000000
Binary files a/doc/networking-guide/source/figures/scenario-classic-mt.png and /dev/null differ
diff --git a/doc/networking-guide/source/figures/scenario-classic-mt.svg b/doc/networking-guide/source/figures/scenario-classic-mt.svg
deleted file mode 100644
index 43e8b4f546..0000000000
--- a/doc/networking-guide/source/figures/scenario-classic-mt.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
\ No newline at end of file
diff --git a/doc/networking-guide/source/fwaas-v1-scenario.rst b/doc/networking-guide/source/fwaas-v1-scenario.rst
deleted file mode 100644
index ae76b89efc..0000000000
--- a/doc/networking-guide/source/fwaas-v1-scenario.rst
+++ /dev/null
@@ -1,137 +0,0 @@
-Firewall-as-a-Service (FWaaS) v1 scenario
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Enable FWaaS v1
----------------
-
-FWaaS management options are also available in the Dashboard.
-
-#. Enable the FWaaS plug-in in the ``/etc/neutron/neutron.conf`` file:
-
- .. code-block:: ini
-
- service_plugins = firewall
-
- [service_providers]
- # ...
- service_provider = FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver:default
-
- [fwaas]
- driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
- enabled = True
-
- .. note::
-
- On Ubuntu, modify the ``[fwaas]`` section in the
- ``/etc/neutron/fwaas_driver.ini`` file instead of
- ``/etc/neutron/neutron.conf``.
-
-#. Configure the FWaaS plugin for the L3 agent.
-
- In the ``AGENT`` section of ``l3_agent.ini``, make sure the FWaaS extension
- is loaded:
-
- .. code-block:: ini
-
- [AGENT]
- extensions = fwaas
-
- Edit the FWaaS section in the ``/etc/neutron/neutron.conf`` file to indicate
- the agent version and driver:
-
- .. code-block:: ini
-
- [fwaas]
- agent_version = v1
- driver = iptables
- enabled = True
- conntrack_driver = conntrack
-
-#. Create the required tables in the database:
-
- .. code-block:: console
-
- # neutron-db-manage --subproject neutron-fwaas upgrade head
-
-#. Enable the option in the ``local_settings.py`` file,
- which is typically located on the controller node:
-
- .. code-block:: python
-
- OPENSTACK_NEUTRON_NETWORK = {
- # ...
- 'enable_firewall' = True,
- # ...
- }
-
- .. note::
-
- By default, ``enable_firewall`` option value is ``True`` in
- ``local_settings.py`` file.
-
- Apply the settings by restarting the web server.
-
-#. Restart the ``neutron-l3-agent`` and ``neutron-server`` services
- to apply the settings.
-
-Configure Firewall-as-a-Service v1
-----------------------------------
-
-Create the firewall rules and create a policy that contains them.
-Then, create a firewall that applies the policy.
-
-#. Create a firewall rule:
-
- .. code-block:: console
-
- $ neutron firewall-rule-create --protocol {tcp,udp,icmp,any} \
- --source-ip-address SOURCE_IP_ADDRESS \
- --destination-ip-address DESTINATION_IP_ADDRESS \
- --source-port SOURCE_PORT_RANGE --destination-port DEST_PORT_RANGE \
- --action {allow,deny,reject}
-
- The Networking client requires a protocol value. If the rule is protocol
- agnostic, you can use the ``any`` value.
-
- .. note::
-
- When the source or destination IP address are not of the same IP
- version (for example, IPv6), the command returns an error.
-
-#. Create a firewall policy:
-
- .. code-block:: console
-
- $ neutron firewall-policy-create --firewall-rules \
- "FIREWALL_RULE_IDS_OR_NAMES" myfirewallpolicy
-
- Separate firewall rule IDs or names with spaces. The order in which you
- specify the rules is important.
-
- You can create a firewall policy without any rules and add rules later,
- as follows:
-
- * To add multiple rules, use the update operation.
-
- * To add a single rule, use the insert-rule operation.
-
- For more details, see `Networking command-line client
- `_
- in the OpenStack Command-Line Interface Reference.
-
- .. note::
-
- FWaaS always adds a default ``deny all`` rule at the lowest precedence
- of each policy. Consequently, a firewall policy with no rules blocks
- all traffic by default.
-
-#. Create a firewall:
-
- .. code-block:: console
-
- $ neutron firewall-create FIREWALL_POLICY_UUID
-
- .. note::
-
- The firewall remains in PENDING\_CREATE state until you create a
- Networking router and attach an interface to it.
diff --git a/doc/networking-guide/source/fwaas-v2-scenario.rst b/doc/networking-guide/source/fwaas-v2-scenario.rst
deleted file mode 100644
index e784da3be0..0000000000
--- a/doc/networking-guide/source/fwaas-v2-scenario.rst
+++ /dev/null
@@ -1,111 +0,0 @@
-Firewall-as-a-Service (FWaaS) v2 scenario
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Enable FWaaS v2
----------------
-
-#. Enable the FWaaS plug-in in the ``/etc/neutron/neutron.conf`` file:
-
- .. code-block:: ini
-
- service_plugins = firewall_v2
-
- [service_providers]
- # ...
- service_provider = FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver:default
-
- [fwaas]
- agent_version = v2
- driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas_v2.IptablesFwaasDriver
- enabled = True
-
- .. note::
-
- On Ubuntu and Centos, modify the ``[fwaas]`` section in the
- ``/etc/neutron/fwaas_driver.ini`` file instead of
- ``/etc/neutron/neutron.conf``.
-
-#. Configure the FWaaS plugin for the L3 agent.
-
- In the ``AGENT`` section of ``l3_agent.ini``, make sure the FWaaS extension
- is loaded:
-
- .. code-block:: ini
-
- [AGENT]
- extensions = fwaas
-
-#. Create the required tables in the database:
-
- .. code-block:: console
-
- # neutron-db-manage --subproject neutron-fwaas upgrade head
-
-#. Restart the ``neutron-l3-agent`` and ``neutron-server`` services
- to apply the settings.
-
- .. note::
-
- Firewall v2 is not supported by horizon yet.
-
-Configure Firewall-as-a-Service v2
-----------------------------------
-
-Create the firewall rules and create a policy that contains them.
-Then, create a firewall that applies the policy.
-
-#. Create a firewall rule:
-
- .. code-block:: console
-
- $ neutron firewall-rule-create --protocol {tcp,udp,icmp,any} \
- --source-ip-address SOURCE_IP_ADDRESS \
- --destination-ip-address DESTINATION_IP_ADDRESS \
- --source-port SOURCE_PORT_RANGE --destination-port DEST_PORT_RANGE \
- --action {allow,deny,reject}
-
- The Networking client requires a protocol value. If the rule is protocol
- agnostic, you can use the ``any`` value.
-
- .. note::
-
- When the source or destination IP address are not of the same IP
- version (for example, IPv6), the command returns an error.
-
-#. Create a firewall policy:
-
- .. code-block:: console
-
- $ neutron firewall-policy-create --firewall-rules \
- "FIREWALL_RULE_IDS_OR_NAMES" myfirewallpolicy
-
- Separate firewall rule IDs or names with spaces. The order in which you
- specify the rules is important.
-
- You can create a firewall policy without any rules and add rules later,
- as follows:
-
- * To add multiple rules, use the update operation.
-
- * To add a single rule, use the insert-rule operation.
-
- For more details, see `Networking command-line client
- `_
- in the OpenStack Command-Line Interface Reference.
-
- .. note::
-
- FWaaS always adds a default ``deny all`` rule at the lowest precedence
- of each policy. Consequently, a firewall policy with no rules blocks
- all traffic by default.
-
-#. Create a firewall:
-
- .. code-block:: console
-
- $ neutron firewall-create FIREWALL_POLICY_UUID
-
- .. note::
-
- The firewall remains in PENDING\_CREATE state until you create a
- Networking router and attach an interface to it.
diff --git a/doc/networking-guide/source/fwaas.rst b/doc/networking-guide/source/fwaas.rst
deleted file mode 100644
index 9ece4849ae..0000000000
--- a/doc/networking-guide/source/fwaas.rst
+++ /dev/null
@@ -1,71 +0,0 @@
-Firewall-as-a-Service (FWaaS)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The Firewall-as-a-Service (FWaaS) plug-in applies firewalls to
-OpenStack objects such as projects, routers, and router ports.
-
-.. note::
-
- We anticipate this to expand to VM ports in the Ocata cycle.
-
-The central concepts with OpenStack firewalls are the notions of a firewall
-policy and a firewall rule. A policy is an ordered collection of rules. A rule
-specifies a collection of attributes (such as port ranges, protocol, and IP
-addresses) that constitute match criteria and an action to take (allow or deny)
-on matched traffic. A policy can be made public, so it can be shared across
-projects.
-
-Firewalls are implemented in various ways, depending on the driver used. For
-example, an iptables driver implements firewalls using iptable rules. An
-OpenVSwitch driver implements firewall rules using flow entries in flow tables.
-A Cisco firewall driver manipulates NSX devices.
-
-FWaaS v1
---------
-
-The original FWaaS implementation, v1, provides protection for routers. When
-a firewall is applied to a router, all internal ports are protected.
-
-The following diagram depicts FWaaS v1 protection. It illustrates the flow of
-ingress and egress traffic for the VM2 instance:
-
-.. figure:: figures/fwaas.png
-
-FWaaS v2
---------
-
-The newer FWaaS implementation, v2, provides a much more granular service.
-The notion of a firewall has been replaced with firewall group to indicate
-that a firewall consists of two policies: an ingress policy and an egress
-policy. A firewall group is applied not at the router level (all ports on a
-router) but at the port level. Currently, router ports can be specified. For
-Ocata, VM ports can also be specified.
-
-FWaaS v1 versus v2
-------------------
-
-The following table compares v1 and v2 features.
-
-+------------------------------------------+-----+------+
-| Feature | v1 | v2 |
-+==========================================+=====+======+
-| Supports L3 firewalling for routers | YES | NO* |
-+------------------------------------------+-----+------+
-| Supports L3 firewalling for router ports | NO | YES |
-+------------------------------------------+-----+------+
-| Supports L2 firewalling (VM ports) | NO | NO** |
-+------------------------------------------+-----+------+
-| CLI support | YES | YES |
-+------------------------------------------+-----+------+
-| Horizon support | YES | NO |
-+------------------------------------------+-----+------+
-
-\* A firewall group can be applied to all ports on a given router in order to
-effect this.
-
-\*\* This feature is planned for Ocata.
-
-For further information, see `v1 configuration guide
-`_ or
-`v2 configuration guide
-`_.
diff --git a/doc/networking-guide/source/index.rst b/doc/networking-guide/source/index.rst
deleted file mode 100644
index 8045667063..0000000000
--- a/doc/networking-guide/source/index.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-.. meta::
- :description: This guide targets OpenStack administrators seeking
- to deploy and manage OpenStack Networking (neutron).
- :keywords: neutron, networking, OpenStack
-
-==========================
-OpenStack Networking Guide
-==========================
-
-Abstract
-~~~~~~~~
-
-This guide targets OpenStack administrators seeking to deploy and
-manage OpenStack Networking (neutron).
-
-This guide documents the OpenStack Ocata release.
-
-Contents
-~~~~~~~~
-
-.. toctree::
- :maxdepth: 2
-
- common/conventions
- intro
- config
- deploy
- ops
- migration
- misc
- common/appendix.rst
diff --git a/doc/networking-guide/source/intro-basic-networking.rst b/doc/networking-guide/source/intro-basic-networking.rst
deleted file mode 100644
index 8f645fd079..0000000000
--- a/doc/networking-guide/source/intro-basic-networking.rst
+++ /dev/null
@@ -1,481 +0,0 @@
-.. _intro-basic-networking:
-
-================
-Basic networking
-================
-
-Ethernet
-~~~~~~~~
-
-Ethernet is a networking protocol, specified by the IEEE 802.3 standard. Most
-wired network interface cards (NICs) communicate using Ethernet.
-
-In the `OSI model `_ of networking
-protocols, Ethernet occupies the second layer, which is known as the data
-link layer. When discussing Ethernet, you will often hear terms such as
-*local network*, *layer 2*, *L2*, *link layer* and *data link layer*.
-
-In an Ethernet network, the hosts connected to the network communicate
-by exchanging *frames*. Every host on an Ethernet network is uniquely
-identified by an address called the media access control (MAC) address.
-In particular, every virtual machine instance in an OpenStack environment
-has a unique MAC address, which is different from the MAC address of the
-compute host. A MAC address has 48 bits and is typically represented as a
-hexadecimal string, such as ``08:00:27:b9:88:74``. The MAC address is
-hard-coded into the NIC by the manufacturer, although modern NICs
-allow you to change the MAC address programmatically. In Linux, you can
-retrieve the MAC address of a NIC using the :command:`ip` command:
-
-.. code-block:: console
-
- $ ip link show eth0
- 2: eth0: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
- link/ether 08:00:27:b9:88:74 brd ff:ff:ff:ff:ff:ff
-
-Conceptually, you can think of an Ethernet network as a single bus
-that each of the network hosts connects to. In early implementations,
-an Ethernet network consisted of a single coaxial cable that hosts
-would tap into to connect to the network. However, network hosts in
-modern Ethernet networks connect directly to a network device called a
-*switch*. Still, this conceptual model is useful, and in network diagrams
-(including those generated by the OpenStack dashboard) an Ethernet network
-is often depicted as if it was a single bus. You'll sometimes hear an
-Ethernet network referred to as a *layer 2 segment*.
-
-In an Ethernet network, every host on the network can send a frame directly to
-every other host. An Ethernet network also supports broadcasts so
-that one host can send a frame to every host on the network by sending to the
-special MAC address ``ff:ff:ff:ff:ff:ff``. ARP_ and DHCP_
-are two notable protocols that use Ethernet broadcasts. Because Ethernet
-networks support broadcasts, you will sometimes hear an Ethernet network
-referred to as a *broadcast domain*.
-
-When a NIC receives an Ethernet frame, by default the NIC checks to
-see if the destination MAC address matches the address of the NIC (or
-the broadcast address), and the Ethernet frame is discarded if the MAC
-address does not match. For a compute host, this behavior is
-undesirable because the frame may be intended for one of the
-instances. NICs can be configured for *promiscuous mode*, where they
-pass all Ethernet frames to the operating system, even if the MAC
-address does not match. Compute hosts should always have the
-appropriate NICs configured for promiscuous mode.
-
-As mentioned earlier, modern Ethernet networks use switches to
-interconnect the network hosts. A switch is a box of networking
-hardware with a large number of ports that forward Ethernet frames
-from one connected host to another. When hosts first send frames over
-the switch, the switch doesn’t know which MAC address is associated
-with which port. If an Ethernet frame is destined for an unknown MAC
-address, the switch broadcasts the frame to all ports. The switch learns
-which MAC addresses are at which ports by observing the traffic. Once
-it knows which MAC address is associated with a port, it can send
-Ethernet frames to the correct port instead of broadcasting. The
-switch maintains the mappings of MAC addresses to switch ports in a
-table called a *forwarding table* or *forwarding information base*
-(FIB). Switches can be daisy-chained together, and the resulting
-connection of switches and hosts behaves like a single network.
-
-VLANs
-~~~~~
-
-VLAN is a networking technology that enables a single switch to act as
-if it was multiple independent switches. Specifically, two hosts that
-are connected to the same switch but on different VLANs do not see
-each other's traffic. OpenStack is able to take advantage of VLANs to
-isolate the traffic of different projects, even if the projects happen
-to have instances running on the same compute host. Each VLAN has an
-associated numerical ID, between 1 and 4095. We say "VLAN 15" to refer
-to the VLAN with a numerical ID of 15.
-
-To understand how VLANs work, let's consider VLAN applications in a
-traditional IT environment, where physical hosts are attached to a
-physical switch, and no virtualization is involved. Imagine a scenario
-where you want three isolated networks but you only have a single
-physical switch. The network administrator would choose three VLAN
-IDs, for example, 10, 11, and 12, and would configure the switch to
-associate switchports with VLAN IDs. For example, switchport 2 might be
-associated with VLAN 10, switchport 3 might be associated with VLAN
-11, and so forth. When a switchport is configured for a specific VLAN,
-it is called an *access port*. The switch is responsible for ensuring
-that the network traffic is isolated across the VLANs.
-
-Now consider the scenario that all of the switchports in the first
-switch become occupied, and so the organization buys a second switch
-and connects it to the first switch to expand the available number of
-switchports. The second switch is also configured to support VLAN IDs
-10, 11, and 12. Now imagine host A connected to switch 1 on a port
-configured for VLAN ID 10 sends an Ethernet frame intended for host B
-connected to switch 2 on a port configured for VLAN ID 10. When switch
-1 forwards the Ethernet frame to switch 2, it must communicate that
-the frame is associated with VLAN ID 10.
-
-If two switches are to be connected together, and the switches are configured
-for VLANs, then the switchports used for cross-connecting the switches must be
-configured to allow Ethernet frames from any VLAN to be
-forwarded to the other switch. In addition, the sending switch must tag each
-Ethernet frame with the VLAN ID so that the receiving switch can ensure that
-only hosts on the matching VLAN are eligible to receive the frame.
-
-A switchport that is configured to pass frames from all VLANs and tag them with
-the VLAN IDs is called a *trunk port*. IEEE 802.1Q is the network standard
-that describes how VLAN tags are encoded in Ethernet frames when trunking is
-being used.
-
-Note that if you are using VLANs on your physical switches to implement project
-isolation in your OpenStack cloud, you must ensure that all of your
-switchports are configured as trunk ports.
-
-It is important that you select a VLAN range not being used by your current
-network infrastructure. For example, if you estimate that your cloud must
-support a maximum of 100 projects, pick a VLAN range outside of that value,
-such as VLAN 200–299. OpenStack, and all physical network infrastructure that
-handles project networks, must then support this VLAN range.
-
-Trunking is used to connect between different switches. Each trunk uses a tag
-to identify which VLAN is in use. This ensures that switches on the same VLAN
-can communicate.
-
-
-.. _ARP:
-
-Subnets and ARP
-~~~~~~~~~~~~~~~
-
-While NICs use MAC addresses to address network hosts, TCP/IP applications use
-IP addresses. The Address Resolution Protocol (ARP) bridges the gap between
-Ethernet and IP by translating IP addresses into MAC addresses.
-
-IP addresses are broken up into two parts: a *network number* and a *host
-identifier*. Two hosts are on the same *subnet* if they have the same network
-number. Recall that two hosts can only communicate directly over Ethernet if
-they are on the same local network. ARP assumes that all machines that are in
-the same subnet are on the same local network. Network administrators must
-take care when assigning IP addresses and netmasks to hosts so that any two
-hosts that are in the same subnet are on the same local network, otherwise ARP
-does not work properly.
-
-To calculate the network number of an IP address, you must know the *netmask*
-associated with the address. A netmask indicates how many of the bits in
-the 32-bit IP address make up the network number.
-
-There are two syntaxes for expressing a netmask:
-
-* dotted quad
-* classless inter-domain routing (CIDR)
-
-Consider an IP address of 192.0.2.5, where the first 24 bits of the
-address are the network number. In dotted quad notation, the netmask
-would be written as ``255.255.255.0``. CIDR notation includes both the
-IP address and netmask, and this example would be written as
-``192.0.2.5/24``.
-
-.. note::
-
- Creating CIDR subnets including a multicast address or a loopback address
- cannot be used in an OpenStack environment. For example, creating a subnet
- using ``224.0.0.0/16`` or ``127.0.1.0/24`` is not supported.
-
-Sometimes we want to refer to a subnet, but not any particular IP
-address on the subnet. A common convention is to set the host
-identifier to all zeros to make reference to a subnet. For example, if
-a host's IP address is ``192.0.2.24/24``, then we would say the
-subnet is ``192.0.2.0/24``.
-
-To understand how ARP translates IP addresses to MAC addresses,
-consider the following example. Assume host *A* has an IP address of
-``192.0.2.5/24`` and a MAC address of ``fc:99:47:49:d4:a0``, and
-wants to send a packet to host *B* with an IP address of
-``192.0.2.7``. Note that the network number is the same for both
-hosts, so host *A* is able to send frames directly to host *B*.
-
-The first time host *A* attempts to communicate with host *B*, the
-destination MAC address is not known. Host *A* makes an ARP request to
-the local network. The request is a broadcast with a message like
-this:
-
-*To: everybody (ff:ff:ff:ff:ff:ff). I am looking for the computer who
-has IP address 192.0.2.7. Signed: MAC address fc:99:47:49:d4:a0*.
-
-Host *B* responds with a response like this:
-
-*To: fc:99:47:49:d4:a0. I have IP address 192.0.2.7. Signed: MAC
-address 54:78:1a:86:00:a5.*
-
-Host *A* then sends Ethernet frames to host *B*.
-
-You can initiate an ARP request manually using the :command:`arping` command.
-For example, to send an ARP request to IP address ``192.0.2.132``:
-
-.. code-block:: console
-
- $ arping -I eth0 192.0.2.132
- ARPING 192.0.2.132 from 192.0.2.131 eth0
- Unicast reply from 192.0.2.132 [54:78:1A:86:1C:0B] 0.670ms
- Unicast reply from 192.0.2.132 [54:78:1A:86:1C:0B] 0.722ms
- Unicast reply from 192.0.2.132 [54:78:1A:86:1C:0B] 0.723ms
- Sent 3 probes (1 broadcast(s))
- Received 3 response(s)
-
-To reduce the number of ARP requests, operating systems maintain an ARP cache
-that contains the mappings of IP addresses to MAC address. On a Linux machine,
-you can view the contents of the ARP cache by using the :command:`arp`
-command:
-
-.. code-block:: console
-
- $ arp -n
- Address HWtype HWaddress Flags Mask Iface
- 192.0.2.3 ether 52:54:00:12:35:03 C eth0
- 192.0.2.2 ether 52:54:00:12:35:02 C eth0
-
-.. _DHCP:
-
-DHCP
-~~~~
-
-Hosts connected to a network use the :term:`Dynamic Host Configuration
-Protocol (DHCP)` to dynamically obtain IP addresses. A DHCP
-server hands out the IP addresses to network hosts, which are the DHCP
-clients.
-
-DHCP clients locate the DHCP server by sending a UDP_ packet from port
-68 to address ``255.255.255.255`` on port 67. Address
-``255.255.255.255`` is the local network broadcast address: all hosts
-on the local network see the UDP packets sent to this address.
-However, such packets are not forwarded to other networks.
-Consequently, the DHCP server must be on the same local network as the
-client, or the server will not receive the broadcast. The DHCP server
-responds by sending a UDP packet from port 67 to port 68 on the
-client. The exchange looks like this:
-
-1. The client sends a discover ("I’m a client at MAC address
- ``08:00:27:b9:88:74``, I need an IP address")
-2. The server sends an offer ("OK ``08:00:27:b9:88:74``, I’m offering
- IP address ``192.0.2.112``")
-3. The client sends a request ("Server ``192.0.2.131``, I would like
- to have IP ``192.0.2.112``")
-4. The server sends an acknowledgement ("OK ``08:00:27:b9:88:74``, IP
- ``192.0.2.112`` is yours")
-
-
-OpenStack uses a third-party program called
-`dnsmasq `_
-to implement the DHCP server.
-Dnsmasq writes to the syslog, where you can observe the DHCP request
-and replies::
-
- Apr 23 15:53:46 c100-1 dhcpd: DHCPDISCOVER from 08:00:27:b9:88:74 via eth2
- Apr 23 15:53:46 c100-1 dhcpd: DHCPOFFER on 192.0.2.112 to 08:00:27:b9:88:74 via eth2
- Apr 23 15:53:48 c100-1 dhcpd: DHCPREQUEST for 192.0.2.112 (192.0.2.131) from 08:00:27:b9:88:74 via eth2
- Apr 23 15:53:48 c100-1 dhcpd: DHCPACK on 192.0.2.112 to 08:00:27:b9:88:74 via eth2
-
-When troubleshooting an instance that is not reachable over the network, it can
-be helpful to examine this log to verify that all four steps of the DHCP
-protocol were carried out for the instance in question.
-
-
-IP
-~~
-
-The Internet Protocol (IP) specifies how to route packets between
-hosts that are connected to different local networks. IP relies on
-special network hosts called *routers* or *gateways*. A router is a
-host that is connected to at least two local networks and can forward
-IP packets from one local network to another. A router has multiple IP
-addresses: one for each of the networks it is connected to.
-
-In the OSI model of networking protocols IP occupies the third layer,
-known as the network layer. When discussing IP, you will often hear terms
-such as *layer 3*, *L3*, and *network layer*.
-
-A host sending a packet to an IP address consults its *routing table*
-to determine which machine on the local network(s) the packet should
-be sent to. The routing table maintains a list of the subnets
-associated with each local network that the host is directly connected
-to, as well as a list of routers that are on these local networks.
-
-On a Linux machine, any of the following commands displays the routing table:
-
-.. code-block:: console
-
- $ ip route show
- $ route -n
- $ netstat -rn
-
-Here is an example of output from :command:`ip route show`:
-
-.. code-block:: console
-
- $ ip route show
- default via 192.0.2.2 dev eth0
- 192.0.2.0/24 dev eth0 proto kernel scope link src 192.0.2.15
- 198.51.100.0/25 dev eth1 proto kernel scope link src 198.51.100.100
- 198.51.100.192/26 dev virbr0 proto kernel scope link src 198.51.100.193
-
-Line 1 of the output specifies the location of the default route,
-which is the effective routing rule if none of the other rules match.
-The router associated with the default route (``192.0.2.2`` in the
-example above) is sometimes referred to as the *default gateway*. A
-DHCP_ server typically transmits the IP address of the default gateway
-to the DHCP client along with the client's IP address and a netmask.
-
-Line 2 of the output specifies that IPs in the ``192.0.2.0/24`` subnet are on
-the local network associated with the network interface eth0.
-
-Line 3 of the output specifies that IPs in the ``198.51.100.0/25`` subnet
-are on the local network associated with the network interface eth1.
-
-Line 4 of the output specifies that IPs in the ``198.51.100.192/26`` subnet are
-on the local network associated with the network interface virbr0.
-
-The output of the :command:`route -n` and :command:`netstat -rn` commands are
-formatted in a slightly different way. This example shows how the same
-routes would be formatted using these commands:
-
-.. code-block:: console
-
- $ route -n
- Kernel IP routing table
- Destination Gateway Genmask Flags MSS Window irtt Iface
- 0.0.0.0 192.0.2.2 0.0.0.0 UG 0 0 0 eth0
- 192.0.2.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
- 198.51.100.0 0.0.0.0 255.255.255.128 U 0 0 0 eth1
- 198.51.100.192 0.0.0.0 255.255.255.192 U 0 0 0 virbr0
-
-The :command:`ip route get` command outputs the route for a destination
-IP address. From the below example, destination IP address ``192.0.2.14`` is on
-the local network of eth0 and would be sent directly:
-
-.. code-block:: console
-
- $ ip route get 192.0.2.14
- 192.0.2.14 dev eth0 src 192.0.2.15
-
-The destination IP address ``203.0.113.34`` is not on any of the connected
-local networks and would be forwarded to the default gateway at ``192.0.2.2``:
-
-.. code-block:: console
-
- $ ip route get 203.0.113.34
- 203.0.113.34 via 192.0.2.2 dev eth0 src 192.0.2.15
-
-It is common for a packet to hop across multiple routers to reach its final
-destination. On a Linux machine, the ``traceroute`` and more recent ``mtr``
-programs prints out the IP address of each router that an IP packet
-traverses along its path to its destination.
-
-.. _UDP:
-
-TCP/UDP/ICMP
-~~~~~~~~~~~~
-
-For networked software applications to communicate over an IP network, they
-must use a protocol layered atop IP. These protocols occupy the fourth
-layer of the OSI model known as the *transport layer* or *layer 4*. See the
-`Protocol Numbers `_
-web page maintained by the Internet Assigned Numbers
-Authority (IANA) for a list of protocols that layer atop IP and their
-associated numbers.
-
-The *Transmission Control Protocol* (TCP) is the most
-commonly used layer 4 protocol in networked applications. TCP is a
-*connection-oriented* protocol: it uses a client-server model where a client
-connects to a server, where *server* refers to the application that receives
-connections. The typical interaction in a TCP-based application proceeds as
-follows:
-
-
-1. Client connects to server.
-2. Client and server exchange data.
-3. Client or server disconnects.
-
-Because a network host may have multiple TCP-based applications running, TCP
-uses an addressing scheme called *ports* to uniquely identify TCP-based
-applications. A TCP port is associated with a number in the range 1-65535, and
-only one application on a host can be associated with a TCP port at a time, a
-restriction that is enforced by the operating system.
-
-A TCP server is said to *listen* on a port. For example, an SSH server
-typically listens on port 22. For a client to connect to a server
-using TCP, the client must know both the IP address of a server's host
-and the server's TCP port.
-
-The operating system of the TCP client application automatically
-assigns a port number to the client. The client owns this port number
-until the TCP connection is terminated, after which the operating
-system reclaims the port number. These types of ports are referred to
-as *ephemeral ports*.
-
-IANA maintains a `registry of port numbers
-`_
-for many TCP-based services, as well as services that use other layer 4
-protocols that employ ports. Registering a TCP port number is not required, but
-registering a port number is helpful to avoid collisions with other
-services. See `firewalls and default ports
-`_
-in OpenStack Administrator Guide for the default TCP ports used by
-various services involved in an OpenStack deployment.
-
-
-The most common application programming interface (API) for writing TCP-based
-applications is called *Berkeley sockets*, also known as *BSD sockets* or,
-simply, *sockets*. The sockets API exposes a *stream oriented* interface for
-writing TCP applications. From the perspective of a programmer, sending data
-over a TCP connection is similar to writing a stream of bytes to a file. It is
-the responsibility of the operating system's TCP/IP implementation to break up
-the stream of data into IP packets. The operating system is also
-responsible for automatically retransmitting dropped packets, and for
-handling flow control to ensure that transmitted data does not overrun
-the sender's data buffers, receiver's data buffers, and network
-capacity. Finally, the operating system is responsible for
-re-assembling the packets in the correct order into a stream of data
-on the receiver's side. Because TCP detects and retransmits lost
-packets, it is said to be a *reliable* protocol.
-
-The *User Datagram Protocol* (UDP) is another layer 4 protocol that is
-the basis of several well-known networking protocols. UDP is a
-*connectionless* protocol: two applications that communicate over UDP
-do not need to establish a connection before exchanging data. UDP is
-also an *unreliable* protocol. The operating system does not attempt
-to retransmit or even detect lost UDP packets. The operating system
-also does not provide any guarantee that the receiving application
-sees the UDP packets in the same order that they were sent in.
-
-UDP, like TCP, uses the notion of ports to distinguish between different
-applications running on the same system. Note, however, that operating systems
-treat UDP ports separately from TCP ports. For example, it is possible for one
-application to be associated with TCP port 16543 and a separate application to
-be associated with UDP port 16543.
-
-Like TCP, the sockets API is the most common API for writing UDP-based
-applications. The sockets API provides a *message-oriented* interface for
-writing UDP applications: a programmer sends data over UDP by transmitting a
-fixed-sized message. If an application requires retransmissions of lost packets
-or a well-defined ordering of received packets, the programmer is responsible
-for implementing this functionality in the application code.
-
-DHCP_, the Domain Name System (DNS), the Network Time Protocol (NTP), and
-:ref:`VXLAN` are examples of UDP-based protocols used in OpenStack deployments.
-
-UDP has support for one-to-many communication: sending a single packet
-to multiple hosts. An application can broadcast a UDP packet to all of
-the network hosts on a local network by setting the receiver IP
-address as the special IP broadcast address ``255.255.255.255``. An
-application can also send a UDP packet to a set of receivers using *IP
-multicast*. The intended receiver applications join a multicast group
-by binding a UDP socket to a special IP address that is one of the
-valid multicast group addresses. The receiving hosts do not have to be
-on the same local network as the sender, but the intervening routers
-must be configured to support IP multicast routing. VXLAN is an
-example of a UDP-based protocol that uses IP multicast.
-
-The *Internet Control Message Protocol* (ICMP) is a protocol used for sending
-control messages over an IP network. For example, a router that receives an IP
-packet may send an ICMP packet back to the source if there is no route in the
-router's routing table that corresponds to the destination address
-(ICMP code 1, destination host unreachable) or if the IP packet is too
-large for the router to handle (ICMP code 4, fragmentation required
-and "don't fragment" flag is set).
-
-The :command:`ping` and :command:`mtr` Linux command-line tools are two
-examples of network utilities that use ICMP.
diff --git a/doc/networking-guide/source/intro-nat.rst b/doc/networking-guide/source/intro-nat.rst
deleted file mode 100644
index d700a1d746..0000000000
--- a/doc/networking-guide/source/intro-nat.rst
+++ /dev/null
@@ -1,87 +0,0 @@
-.. _intro-nat:
-
-===========================
-Network address translation
-===========================
-
-*Network Address Translation* (NAT) is a process for modifying the source or
-destination addresses in the headers of an IP packet while the packet is
-in transit. In general, the sender and receiver applications are not aware that
-the IP packets are being manipulated.
-
-NAT is often implemented by routers, and so we will refer to the host
-performing NAT as a *NAT router*. However, in OpenStack deployments it
-is typically Linux servers that implement the NAT functionality, not
-hardware routers. These servers use the
-`iptables `_
-software package to implement the NAT functionality.
-
-There are multiple variations of NAT, and here we describe three kinds
-commonly found in OpenStack deployments.
-
-SNAT
-~~~~
-
-In *Source Network Address Translation* (SNAT), the NAT router modifies the IP
-address of the sender in IP packets. SNAT is commonly used to enable
-hosts with *private addresses* to communicate with servers on the
-public Internet.
-
-`RFC 5737 `_
-reserves the following three subnets as private addresses:
-
-* ``192.0.2.0/24``
-* ``198.51.100.0/24``
-* ``203.0.113.0/24``
-
-These IP addresses are not publicly routable, meaning that a host on the public
-Internet can not send an IP packet to any of these addresses. Private IP
-addresses are widely used in both residential and corporate environments.
-
-Often, an application running on a host with a private IP address will need to
-connect to a server on the public Internet. An example is a user
-who wants to access a public website such as www.openstack.org. If the IP
-packets reach the web server at www.openstack.org with a private IP address as
-the source, then the web server cannot send packets back to the sender.
-
-SNAT solves this problem by modifying the source IP address to an IP address
-that is routable on the public Internet. There are different variations of
-SNAT; in the form that OpenStack deployments use, a NAT router on the path
-between the sender and receiver replaces the packet's source IP
-address with the router's public IP address. The router also modifies
-the source TCP or UDP port to another value, and the router maintains
-a record of the sender's true IP address and port, as well as the
-modified IP address and port.
-
-When the router receives a packet with the matching IP address and port, it
-translates these back to the private IP address and port, and forwards the
-packet along.
-
-Because the NAT router modifies ports as well as IP addresses, this
-form of SNAT is sometimes referred to as *Port Address Translation*
-(PAT). It is also sometimes referred to as *NAT overload*.
-
-OpenStack uses SNAT to enable applications running inside of instances to
-connect out to the public Internet.
-
-DNAT
-~~~~
-
-In *Destination Network Address Translation* (DNAT), the NAT router
-modifies the IP address of the destination in IP packet headers.
-
-OpenStack uses DNAT to route packets from instances to the OpenStack
-metadata service. Applications running inside of instances access the
-OpenStack metadata service by making HTTP GET requests to a web server
-with IP address 169.254.169.254. In an OpenStack deployment, there is
-no host with this IP address. Instead, OpenStack uses DNAT to change
-the destination IP of these packets so they reach the network
-interface that a metadata service is listening on.
-
-One-to-one NAT
-~~~~~~~~~~~~~~
-
-In *one-to-one NAT*, the NAT router maintains a one-to-one mapping
-between private IP addresses and public IP addresses. OpenStack uses
-one-to-one NAT to implement floating IP addresses.
-
diff --git a/doc/networking-guide/source/intro-network-components.rst b/doc/networking-guide/source/intro-network-components.rst
deleted file mode 100644
index 49f0d6db0f..0000000000
--- a/doc/networking-guide/source/intro-network-components.rst
+++ /dev/null
@@ -1,52 +0,0 @@
-.. _intro-network-components:
-
-==================
-Network components
-==================
-
-Switches
-~~~~~~~~
-
-Switches are Multi-Input Multi-Output (MIMO) devices that enable packets
-to travel from one node to another. Switches connect hosts that belong
-to the same layer-2 network. Switches enable forwarding of the
-packet received on one port (input) to another port (output) so that they
-reach the desired destination node. Switches operate at layer-2 in the
-networking model. They forward the traffic based on the destination
-Ethernet address in the packet header.
-
-Routers
-~~~~~~~
-
-Routers are special devices that enable packets to travel from one
-layer-3 network to another. Routers enable communication between two nodes
-on different layer-3 networks that are not directly connected to each other.
-Routers operate at layer-3 in the networking model. They route the traffic
-based on the destination IP address in the packet header.
-
-Firewalls
-~~~~~~~~~
-
-Firewalls are used to regulate traffic to and from a host or a network.
-A firewall can be either a specialized device connecting two networks or
-a software-based filtering mechanism implemented on an operating system.
-Firewalls are used to restrict traffic to a host based on the rules
-defined on the host. They can filter packets based on several criteria such as
-source IP address, destination IP address, port numbers, connection state,
-and so on. It is primarily used to protect the hosts from unauthorized access
-and malicious attacks. Linux-based operating systems implement firewalls
-through ``iptables``.
-
-Load balancers
-~~~~~~~~~~~~~~
-
-Load balancers can be software-based or hardware-based devices that allow
-traffic to evenly be distributed across several servers. By distributing the
-traffic across multiple servers, it avoids overload of a single server thereby
-preventing a single point of failure in the product. This further improves the
-performance, network throughput, and response time of the servers.
-Load balancers are typically used in a 3-tier architecture. In this model,
-a load balancer receives a request from the front-end web server,
-which then forwards the request to one of the available back-end database
-servers for processing. The response from the database server is passed back to
-the web server for further processing.
diff --git a/doc/networking-guide/source/intro-network-namespaces.rst b/doc/networking-guide/source/intro-network-namespaces.rst
deleted file mode 100644
index 20622fc8b1..0000000000
--- a/doc/networking-guide/source/intro-network-namespaces.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-.. _intro-network-namespaces:
-
-==================
-Network namespaces
-==================
-
-A namespace is a way of scoping a particular set of identifiers. Using a
-namespace, you can use the same identifier multiple times in different
-namespaces. You can also restrict an identifier set visible to particular
-processes.
-
-For example, Linux provides namespaces for networking and processes, among
-other things. If a process is running within a process namespace, it can only
-see and communicate with other processes in the same namespace. So, if a shell
-in a particular process namespace ran :command:`ps waux`, it would only show
-the other processes in the same namespace.
-
-Linux network namespaces
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-In a network namespace, the scoped 'identifiers' are network devices; so a
-given network device, such as ``eth0``, exists in a particular namespace.
-Linux starts up with a default network namespace, so if your operating system
-does not do anything special, that is where all the network devices will be
-located. But it is also possible to create further non-default namespaces, and
-create new devices in those namespaces, or to move an existing device from one
-namespace to another.
-
-Each network namespace also has its own routing table, and in fact this is the
-main reason for namespaces to exist. A routing table is keyed by destination IP
-address, so network namespaces are what you need if you want the same
-destination IP address to mean different things at different times - which is
-something that OpenStack Networking requires for its feature of providing
-overlapping IP addresses in different virtual networks.
-
-Each network namespace also has its own set of iptables (for both IPv4 and
-IPv6). So, you can apply different security to flows with the same IP
-addressing in different namespaces, as well as different routing.
-
-Any given Linux process runs in a particular network namespace. By default this
-is inherited from its parent process, but a process with the right capabilities
-can switch itself into a different namespace; in practice this is mostly done
-using the :command:`ip netns exec NETNS COMMAND...` invocation, which starts
-``COMMAND`` running in the namespace named ``NETNS``. Suppose such a process
-sends out a message to IP address A.B.C.D, the effect of the namespace is that
-A.B.C.D will be looked up in that namespace's routing table, and that will
-determine the network device that the message is transmitted through.
-
-Virtual routing and forwarding (VRF)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Virtual routing and forwarding is an IP technology that allows multiple
-instances of a routing table to coexist on the same router at the same time.
-It is another name for the network namespace functionality described above.
diff --git a/doc/networking-guide/source/intro-os-networking.rst b/doc/networking-guide/source/intro-os-networking.rst
deleted file mode 100644
index f724bc64be..0000000000
--- a/doc/networking-guide/source/intro-os-networking.rst
+++ /dev/null
@@ -1,379 +0,0 @@
-.. _intro-os-networking:
-
-====================
-OpenStack Networking
-====================
-
-OpenStack Networking allows you to create and manage network objects,
-such as networks, subnets, and ports, which other OpenStack services
-can use. Plug-ins can be implemented to accommodate different
-networking equipment and software, providing flexibility to OpenStack
-architecture and deployment.
-
-The Networking service, code-named neutron, provides an API that lets you
-define network connectivity and addressing in the cloud. The Networking
-service enables operators to leverage different networking technologies
-to power their cloud networking. The Networking service also provides an
-API to configure and manage a variety of network services ranging from L3
-forwarding and :term:`NAT ` to load
-balancing, perimeter firewalls, and virtual private networks.
-
-It includes the following components:
-
-API server
- The OpenStack Networking API includes support for Layer 2 networking
- and :term:`IP address management (IPAM) `, as
- well as an extension for a Layer 3 router construct that enables routing
- between Layer 2 networks and gateways to external networks. OpenStack
- Networking includes a growing list of plug-ins that enable interoperability
- with various commercial and open source network technologies,
- including routers, switches, virtual switches and software-defined
- networking (SDN) controllers.
-
-OpenStack Networking plug-in and agents
- Plugs and unplugs ports, creates networks or subnets, and provides
- IP addressing. The chosen plug-in and agents differ depending on the
- vendor and technologies used in the particular cloud. It is
- important to mention that only one plug-in can be used at a time.
-
-Messaging queue
- Accepts and routes RPC requests between agents to complete API operations.
- Message queue is used in the ML2 plug-in for RPC between the neutron
- server and neutron agents that run on each hypervisor, in the ML2
- mechanism drivers for :term:`Open vSwitch` and :term:`Linux bridge`.
-
-Concepts
-~~~~~~~~
-
-To configure rich network topologies, you can create and configure networks
-and subnets and instruct other OpenStack services like Compute to attach
-virtual devices to ports on these networks.
-OpenStack Compute is a prominent consumer of OpenStack Networking to provide
-connectivity for its instances.
-In particular, OpenStack Networking supports each project having multiple
-private networks and enables projects to choose their own IP addressing scheme,
-even if those IP addresses overlap with those that other projects use. There
-are two types of network, project and provider networks. It is possible to
-share any of these types of networks among projects as part of the network
-creation process.
-
-.. _intro-os-networking-provider:
-
-Provider networks
------------------
-
-Provider networks offer layer-2 connectivity to instances with optional
-support for DHCP and metadata services. These networks connect, or map, to
-existing layer-2 networks in the data center, typically using VLAN (802.1q)
-tagging to identify and separate them.
-
-Provider networks generally offer simplicity, performance, and reliability
-at the cost of flexibility. By default only administrators can create or
-update provider networks because they require configuration of physical
-network infrastructure. It is possible to change the user who is allowed to
-create or update provider networks with the following parameters of
-``policy.json``:
-
-* ``create_network:provider:physical_network``
-* ``update_network:provider:physical_network``
-
-.. warning::
-
- The creation and modification of provider networks enables use of
- physical network resources, such as VLAN-s. Enable these changes
- only for trusted projects.
-
-Also, provider networks only handle layer-2 connectivity for instances, thus
-lacking support for features such as routers and floating IP addresses.
-
-In many cases, operators who are already familiar with virtual networking
-architectures that rely on physical network infrastructure for layer-2,
-layer-3, or other services can seamlessly deploy the OpenStack Networking
-service. In particular, provider networks appeal to operators looking to
-migrate from the Compute networking service (nova-network) to the OpenStack
-Networking service. Over time, operators can build on this minimal
-architecture to enable more cloud networking features.
-
-In general, the OpenStack Networking software components that handle layer-3
-operations impact performance and reliability the most. To improve performance
-and reliability, provider networks move layer-3 operations to the physical
-network infrastructure.
-
-In one particular use case, the OpenStack deployment resides in a mixed
-environment with conventional virtualization and bare-metal hosts that use a
-sizable physical network infrastructure. Applications that run inside the
-OpenStack deployment might require direct layer-2 access, typically using
-VLANs, to applications outside of the deployment.
-
-Routed provider networks
-------------------------
-
-Routed provider networks offer layer-3 connectivity to instances. These
-networks map to existing layer-3 networks in the data center. More
-specifically, the network maps to multiple layer-2 segments, each of which is
-essentially a provider network. Each has a router gateway attached to it which
-routes traffic between them and externally. The Networking service does not
-provide the routing.
-
-Routed provider networks offer performance at scale that is difficult to
-achieve with a plain provider network at the expense of guaranteed layer-2
-connectivity.
-
-See :ref:`config-routed-provider-networks` for more information.
-
-.. _intro-os-networking-selfservice:
-
-Self-service networks
----------------------
-
-Self-service networks primarily enable general (non-privileged) projects
-to manage networks without involving administrators. These networks are
-entirely virtual and require virtual routers to interact with provider
-and external networks such as the Internet. Self-service networks also
-usually provide DHCP and metadata services to instances.
-
-In most cases, self-service networks use overlay protocols such as VXLAN
-or GRE because they can support many more networks than layer-2 segmentation
-using VLAN tagging (802.1q). Furthermore, VLANs typically require additional
-configuration of physical network infrastructure.
-
-IPv4 self-service networks typically use private IP address ranges (RFC1918)
-and interact with provider networks via source NAT on virtual routers.
-Floating IP addresses enable access to instances from provider networks
-via destination NAT on virtual routers. IPv6 self-service networks always
-use public IP address ranges and interact with provider networks via
-virtual routers with static routes.
-
-The Networking service implements routers using a layer-3 agent that typically
-resides at least one network node. Contrary to provider networks that connect
-instances to the physical network infrastructure at layer-2, self-service
-networks must traverse a layer-3 agent. Thus, oversubscription or failure
-of a layer-3 agent or network node can impact a significant quantity of
-self-service networks and instances using them. Consider implementing one or
-more high-availability features to increase redundancy and performance
-of self-service networks.
-
-Users create project networks for connectivity within projects. By default,
-they are fully isolated and are not shared with other projects. OpenStack
-Networking supports the following types of network isolation and overlay
-technologies.
-
-Flat
- All instances reside on the same network, which can also be shared
- with the hosts. No VLAN tagging or other network segregation takes place.
-
-VLAN
- Networking allows users to create multiple provider or project networks
- using VLAN IDs (802.1Q tagged) that correspond to VLANs present in the
- physical network. This allows instances to communicate with each other
- across the environment. They can also communicate with dedicated servers,
- firewalls, load balancers, and other networking infrastructure on the
- same layer 2 VLAN.
-
-GRE and VXLAN
- VXLAN and GRE are encapsulation protocols that create overlay networks
- to activate and control communication between compute instances. A
- Networking router is required to allow traffic to flow outside of the
- GRE or VXLAN project network. A router is also required to connect
- directly-connected project networks with external networks, including the
- Internet. The router provides the ability to connect to instances directly
- from an external network using floating IP addresses.
-
-.. image:: figures/NetworkTypes.png
- :width: 100%
- :alt: Project and provider networks
-
-Subnets
--------
-
-A block of IP addresses and associated configuration state. This
-is also known as the native IPAM (IP Address Management) provided by the
-networking service for both project and provider networks.
-Subnets are used to allocate IP addresses when new ports are created on a
-network.
-
-Subnet pools
-------------
-
-End users normally can create subnets with any valid IP addresses without other
-restrictions. However, in some cases, it is nice for the admin or the project
-to pre-define a pool of addresses from which to create subnets with automatic
-allocation.
-
-Using subnet pools constrains what addresses can be used by requiring that
-every subnet be within the defined pool. It also prevents address reuse or
-overlap by two subnets from the same pool.
-
-See :ref:`config-subnet-pools` for more information.
-
-Ports
------
-
-A port is a connection point for attaching a single device, such as the NIC
-of a virtual server, to a virtual network. The port also describes the
-associated network configuration, such as the MAC and IP addresses to be
-used on that port.
-
-Routers
--------
-
-Routers provide virtual layer-3 services such as routing and NAT
-between self-service and provider networks or among self-service
-networks belonging to a project. The Networking service uses a
-layer-3 agent to manage routers via namespaces.
-
-Security groups
----------------
-
-Security groups provide a container for virtual firewall rules that control
-ingress (inbound to instances) and egress (outbound from instances) network
-traffic at the port level. Security groups use a default deny policy and
-only contain rules that allow specific traffic. Each port can reference one
-or more security groups in an additive fashion. The firewall driver
-translates security group rules to a configuration for the underlying packet
-filtering technology such as ``iptables``.
-
-Each project contains a ``default`` security group that allows all egress
-traffic and denies all ingress traffic. You can change the rules in the
-``default`` security group. If you launch an instance without specifying a
-security group, the ``default`` security group automatically applies to it.
-Similarly, if you create a port without specifying a security group, the
-``default`` security group automatically applies to it.
-
-.. note::
-
- If you use the metadata service, removing the default egress rules denies
- access to TCP port 80 on 169.254.169.254, thus preventing instances from
- retrieving metadata.
-
-Security group rules are stateful. Thus, allowing ingress TCP port 22 for
-secure shell automatically creates rules that allow return egress traffic
-and ICMP error messages involving those TCP connections.
-
-By default, all security groups contain a series of basic (sanity) and
-anti-spoofing rules that perform the following actions:
-
-* Allow egress traffic only if it uses the source MAC and IP addresses
- of the port for the instance, source MAC and IP combination in
- ``allowed-address-pairs``, or valid MAC address (port or
- ``allowed-address-pairs``) and associated EUI64 link-local IPv6 address.
-* Allow egress DHCP discovery and request messages that use the source MAC
- address of the port for the instance and the unspecified IPv4 address
- (0.0.0.0).
-* Allow ingress DHCP and DHCPv6 responses from the DHCP server on the
- subnet so instances can acquire IP addresses.
-* Deny egress DHCP and DHCPv6 responses to prevent instances from
- acting as DHCP(v6) servers.
-* Allow ingress/egress ICMPv6 MLD, neighbor solicitation, and neighbor
- discovery messages so instances can discover neighbors and join
- multicast groups.
-* Deny egress ICMPv6 router advertisements to prevent instances from acting
- as IPv6 routers and forwarding IPv6 traffic for other instances.
-* Allow egress ICMPv6 MLD reports (v1 and v2) and neighbor solicitation
- messages that use the source MAC address of a particular instance and
- the unspecified IPv6 address (::). Duplicate address detection (DAD) relies
- on these messages.
-* Allow egress non-IP traffic from the MAC address of the port for the
- instance and any additional MAC addresses in ``allowed-address-pairs`` on
- the port for the instance.
-
-Although non-IP traffic, security groups do not implicitly allow all ARP
-traffic. Separate ARP filtering rules prevent instances from using ARP
-to intercept traffic for another instance. You cannot disable or remove
-these rules.
-
-You can disable security groups including basic and anti-spoofing rules
-by setting the port attribute ``port_security_enabled`` to ``False``.
-
-Extensions
-----------
-
-The OpenStack Networking service is extensible. Extensions serve two
-purposes: they allow the introduction of new features in the API
-without requiring a version change and they allow the introduction of
-vendor specific niche functionality. Applications can programmatically
-list available extensions by performing a GET on the
-:code:`/extensions` URI. Note that this is a versioned request; that
-is, an extension available in one API version might not be available
-in another.
-
-DHCP
-----
-
-The optional DHCP service manages IP addresses for instances on provider
-and self-service networks. The Networking service implements the DHCP
-service using an agent that manages ``qdhcp`` namespaces and the
-``dnsmasq`` service.
-
-Metadata
---------
-
-The optional metadata service provides an API for instances to obtain
-metadata such as SSH keys.
-
-Service and component hierarchy
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Server
-------
-
-* Provides API, manages database, etc.
-
-Plug-ins
---------
-
-* Manages agents
-
-Agents
-------
-
-* Provides layer 2/3 connectivity to instances
-
-* Handles physical-virtual network transition
-
-* Handles metadata, etc.
-
-Layer 2 (Ethernet and Switching)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-* Linux Bridge
-
-* OVS
-
-Layer 3 (IP and Routing)
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-* L3
-
-* DHCP
-
-Miscellaneous
-^^^^^^^^^^^^^
-
-* Metadata
-
-Services
---------
-
-Routing services
-^^^^^^^^^^^^^^^^
-
-VPNaaS
-^^^^^^
-
-The Virtual Private Network-as-a-Service (VPNaaS) is a neutron
-extension that introduces the VPN feature set.
-
-LBaaS
-^^^^^
-
-The Load-Balancer-as-a-Service (LBaaS) API provisions and configures
-load balancers. The reference implementation is based on the HAProxy
-software load balancer.
-
-FWaaS
-^^^^^
-
-The Firewall-as-a-Service (FWaaS) API is an experimental API that
-enables early adopters and vendors to test their networking
-implementations.
diff --git a/doc/networking-guide/source/intro-overlay-protocols.rst b/doc/networking-guide/source/intro-overlay-protocols.rst
deleted file mode 100644
index 4f29be4800..0000000000
--- a/doc/networking-guide/source/intro-overlay-protocols.rst
+++ /dev/null
@@ -1,34 +0,0 @@
-.. _intro-overlay-protocols:
-
-==========================
-Overlay (tunnel) protocols
-==========================
-
-Tunneling is a mechanism that makes transfer of payloads feasible over an
-incompatible delivery network. It allows the network user to gain access to
-denied or insecure networks. Data encryption may be employed to transport the
-payload, ensuring that the encapsulated user network data appears as public
-even though it is private and can easily pass the conflicting network.
-
-
-Generic routing encapsulation (GRE)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Generic routing encapsulation (GRE) is a protocol that runs over IP and is
-employed when delivery and payload protocols are compatible but payload
-addresses are incompatible. For instance, a payload might think it is running
-on a datalink layer but it is actually running over a transport layer using
-datagram protocol over IP. GRE creates a private point-to-point connection
-and works by encapsulating a payload. GRE is a foundation protocol for other
-tunnel protocols but the GRE tunnels provide only weak authentication.
-
-.. _VXLAN:
-
-Virtual extensible local area network (VXLAN)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The purpose of VXLAN is to provide scalable network isolation. VXLAN is a
-Layer 2 overlay scheme on a Layer 3 network. It allows an overlay layer-2
-network to spread across multiple underlay layer-3 network domains. Each
-overlay is termed a VXLAN segment. Only VMs within the same VXLAN segment
-can communicate.
diff --git a/doc/networking-guide/source/intro.rst b/doc/networking-guide/source/intro.rst
deleted file mode 100644
index 7a9af382c6..0000000000
--- a/doc/networking-guide/source/intro.rst
+++ /dev/null
@@ -1,73 +0,0 @@
-.. _intro:
-
-============
-Introduction
-============
-
-The OpenStack :term:`Networking service `
-provides an API that allows users to set up and define network connectivity
-and addressing in the cloud. The project code-name for Networking services is
-neutron. OpenStack Networking handles the creation and management of a virtual
-networking infrastructure, including networks, switches, subnets, and
-routers for devices managed by the OpenStack Compute service
-(nova). Advanced services such as firewalls or :term:`virtual private
-networks (VPNs) ` can also be used.
-
-OpenStack Networking consists of the neutron-server, a database for
-persistent storage, and any number of plug-in agents, which provide
-other services such as interfacing with native Linux networking
-mechanisms, external devices, or SDN controllers.
-
-OpenStack Networking is entirely standalone and can be deployed to a
-dedicated host. If your deployment uses a controller host to run
-centralized Compute components, you can deploy the Networking server
-to that specific host instead.
-
-OpenStack Networking integrates with various OpenStack
-components:
-
-* OpenStack :term:`Identity service (keystone)` is used for authentication
- and authorization of API requests.
-
-* OpenStack :term:`Compute service (nova)` is used to plug each virtual
- NIC on the VM into a particular network.
-
-* OpenStack :term:`Dashboard (horizon)` is used by administrators
- and project users to create and manage network services through a web-based
- graphical interface.
-
-.. note::
-
- The network address ranges used in this guide are chosen in accordance with
- `RFC 5737 `_ and
- `RFC 3849 `_, and as such are restricted
- to the following:
-
- **IPv4:**
-
- * 192.0.2.0/24
- * 198.51.100.0/24
- * 203.0.113.0/24
-
- **IPv6:**
-
- * 2001:DB8::/32
-
- The network address ranges in the examples of this guide should not be used
- for any purpose other than documentation.
-
-.. note::
-
- To reduce clutter, this guide removes command output without relevance
- to the particular action.
-
-.. toctree::
- :maxdepth: 2
-
- intro-basic-networking
- intro-network-components
- intro-overlay-protocols
- intro-network-namespaces
- intro-nat
- intro-os-networking
- fwaas
diff --git a/doc/networking-guide/source/locale/id/LC_MESSAGES/networking-guide.po b/doc/networking-guide/source/locale/id/LC_MESSAGES/networking-guide.po
deleted file mode 100644
index edefdfc3fa..0000000000
--- a/doc/networking-guide/source/locale/id/LC_MESSAGES/networking-guide.po
+++ /dev/null
@@ -1,14107 +0,0 @@
-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) 2015, OpenStack contributors
-# This file is distributed under the same license as the Networking Guide package.
-#
-# Translators:
-# OpenStack Infra , 2015. #zanata
-# suhartono , 2016. #zanata
-# suhartono , 2017. #zanata
-msgid ""
-msgstr ""
-"Project-Id-Version: Networking Guide 15.0\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2017-06-12 16:24+0000\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2017-06-10 04:17+0000\n"
-"Last-Translator: suhartono \n"
-"Language: id\n"
-"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 3.9.6\n"
-"Language-Team: Indonesian\n"
-
-msgid "(Optional) FDB L2 agent extension"
-msgstr "(Optional) FDB L2 agent extension"
-
-msgid "**HostA and HostB: DHCP agent**"
-msgstr "**HostA and HostB: DHCP agent**"
-
-msgid "**HostA and HostB: L2 agent**"
-msgstr "**HostA and HostB: L2 agent**"
-
-msgid "**IPv4:**"
-msgstr "**IPv4:**"
-
-msgid "**IPv6:**"
-msgstr "**IPv6:**"
-
-msgid "**controlnode: neutron server**"
-msgstr "**controlnode: neutron server**"
-
-msgid "*Invalid combination.*"
-msgstr "*Invalid combination.*"
-
-msgid "*N/S*"
-msgstr "*N/S*"
-
-msgid ""
-"*Network Address Translation* (NAT) is a process for modifying the source or "
-"destination addresses in the headers of an IP packet while the packet is in "
-"transit. In general, the sender and receiver applications are not aware that "
-"the IP packets are being manipulated."
-msgstr ""
-"*Network Address Translation* (NAT) adalah proses untuk memodifikasi sumber "
-"atau alamat tujuan dalam header paket IP saat paket sedang dalam transit. "
-"Secara umum, pengirim dan penerima aplikasi tidak menyadari bahwa paket IP "
-"sedang dimanipulasi."
-
-msgid "..."
-msgstr "..."
-
-msgid "0,1,1"
-msgstr "0,1,1"
-
-msgid "1,0,0"
-msgstr "1,0,0"
-
-msgid "1,0,1"
-msgstr "1,0,1"
-
-msgid "192.0.2.0/24"
-msgstr "192.0.2.0/24"
-
-msgid "192.0.2.0/24 for instance floating IP addresses"
-msgstr "192.0.2.0/24 untuk alamat IP mengambang instance."
-
-msgid "198.51.100.0/24"
-msgstr "198.51.100.0/24"
-
-msgid ""
-"198.51.100.0/24 for floating IP agent gateway IPs configured on compute nodes"
-msgstr ""
-"198.51.100.0/24 untuk IP mengambang dimana IP gerbang agen dikonfigurasi "
-"pada node komputasi"
-
-msgid "198.51.100.1"
-msgstr "198.51.100.1"
-
-msgid "2001:DB8::/32"
-msgstr "2001:DB8::/32"
-
-msgid "203.0.113.0/24"
-msgstr "203.0.113.0/24"
-
-msgid "203.0.113.0/25 for all other IP allocations on the external network"
-msgstr "203.0.113.0/25 untuk semua alokasi IP lainnya pada jaringan eksternal"
-
-msgid "203.0.113.1"
-msgstr "203.0.113.1"
-
-msgid "4"
-msgstr "4"
-
-msgid "6"
-msgstr "6"
-
-msgid ""
-":ref:`deploy-ovs-ha-dvr` supports augmentation using Virtual Router "
-"Redundancy Protocol (VRRP). Using this configuration, virtual routers "
-"support both the ``--distributed`` and ``--ha`` options."
-msgstr ""
-":ref:`deploy-ovs-ha-dvr` mendukung pembesaran menggunakan Virtual Router "
-"Redundancy Protocol (VRRP). Menggunakan konfigurasi ini, router virtual "
-"mendukung kedua opsi ``--distributed`` dan ``--ha``."
-
-msgid ""
-":term:`Service function chain (SFC)` essentially refers to the :term:"
-"`software-defined networking (SDN)` version of :term:`policy-based routing "
-"(PBR)`. In many cases, SFC involves security, although it can include a "
-"variety of other features."
-msgstr ""
-":term:`Service function chain (SFC)` pada dasarnya mengacu pada versi:term:"
-"`software-defined networking (SDN)` dari :term:`policy-based routing (PBR)`. "
-"Dalam banyak kasus, SFC melibatkan keamanan, meskipun dapat mencakup "
-"berbagai fitur lainnya."
-
-msgid ""
-"A BGP speaker requires association with a provider network to determine "
-"eligible prefixes. The association builds a list of all virtual routers with "
-"gateways on provider and self-service networks in the same address scope so "
-"the BGP speaker can advertise self-service network prefixes with the "
-"corresponding router as the next-hop IP address. Associate the BGP speaker "
-"with the provider network."
-msgstr ""
-"Speaker BGP memerlukan hubungan dengan jaringan provider untuk menentukan "
-"prefixe yang memenuhi syarat. Asosiasi membangun daftar semua router "
-"virtual dengan gateway pada jaringan provider dan self-service di lingkup "
-"alamat yang sama sehingga speaker BGP dapat menyiarkan update dan perubahan "
-"awalan jaringan self-service dengan sesuai router sebagai alamat IP next-"
-"hop. Mengasosiasikan speaker BGP dengan jaringan provider."
-
-msgid ""
-"A TCP server is said to *listen* on a port. For example, an SSH server "
-"typically listens on port 22. For a client to connect to a server using TCP, "
-"the client must know both the IP address of a server's host and the server's "
-"TCP port."
-msgstr ""
-"Sebuah server TCP dikatakan *listen* pada port. Sebagai contoh, server SSH "
-"biasanya mendengarkan pada port 22. Untuk klien supaya terhubung ke server "
-"yang menggunakan TCP, klien harus tahu kedua alamat IP dari host server dan "
-"port TCP server."
-
-msgid ""
-"A basic example of SFC involves routing packets from one location to another "
-"through a firewall that lacks a \"next hop\" IP address from a conventional "
-"routing perspective. A more complex example involves an ordered series of "
-"service functions, each implemented using multiple instances (VMs). Packets "
-"must flow through one instance and a hashing algorithm distributes flows "
-"across multiple instances at each hop."
-msgstr ""
-"Sebuah contoh dasar dari SFC melibatkan routing paket dari satu lokasi ke "
-"lokasi lain melalui firewall yang tidak memiliki alamat IP \"next hop\" dari "
-"perspektif routing yang konvensional. Sebuah contoh yang lebih kompleks "
-"melibatkan serangkaian memerintahkan fungsi layanan, masing-masing "
-"dilaksanakan dengan menggunakan beberapa instance (VMs). Paket harus "
-"mengalir melalui satu instance dan algoritma hashing mendistribusikan arus "
-"di beberapa instance pada setiap hop."
-
-msgid ""
-"A block of IP addresses and associated configuration state. This is also "
-"known as the native IPAM (IP Address Management) provided by the networking "
-"service for both project and provider networks. Subnets are used to allocate "
-"IP addresses when new ports are created on a network."
-msgstr ""
-"Sebuah blok alamat IP dan terkait keadaan konfigurasi. Hal ini juga dikenal "
-"sebagai native IPAM (IP Address Management) yang disediakan oleh layanan "
-"jaringan untuk proyek dan jaringan provider. Subnet digunakan untuk "
-"mengalokasikan alamat IP ketika port baru diciptakan pada jaringan."
-
-msgid ""
-"A combination of the source attributes defines the source of the flow. A "
-"combination of the destination attributes defines the destination of the "
-"flow. The ``l7_parameters`` attribute is a place holder that may be used to "
-"support flow classification using layer 7 fields, such as a URL. If "
-"unspecified, the ``logical_source_port`` and ``logical_destination_port`` "
-"attributes default to ``none``, the ``ethertype`` attribute defaults to "
-"``IPv4``, and all other attributes default to a wildcard value."
-msgstr ""
-"Kombinasi atribut sumber mendefinisikan sumber aliran. Kombinasi atribut "
-"tujuan mendefinisikan tujuan aliran. Atribut `` l7_parameters`` adalah "
-"tempat dudukan yang dapat digunakan untuk mendukung klasifikasi aliran "
-"menggunakan layer 7 field, seperti URL. Jika tidak ditentukan, atribut "
-"``logical_source_port`` dan ``logical_destination_port`` defaultnya ke "
-"``none``, atribut ``ethertype`` defaultnya ke ``IPv4``, dan semua atribut "
-"lainnya defaultnya nilai wildcard."
-
-msgid ""
-"A database management command-line tool uses the Alembic library to manage "
-"the migration."
-msgstr ""
-"Sebuah alat command-line pengelolaan database menggunakan perpustakaan "
-"Alembic untuk mengelola migrasi."
-
-msgid ""
-"A flow classifier can only belong to one port chain to prevent ambiguity as "
-"to which chain should handle packets in the flow. A check prevents such "
-"ambiguity. However, you can associate multiple flow classifiers with a port "
-"chain because multiple flows can request the same service function path."
-msgstr ""
-"Sebuah classifier aliran hanya bisa memilik salah satu rantai port untuk "
-"mencegah ambiguitas dimana rantai harus menangani paket dalam aliran. Sebuah "
-"cek mencegah ambiguitas tersebut. Namun, Anda dapat mengaitkan beberapa "
-"pengklasifikasi aliran dengan rantai port karena beberapa arus dapat meminta "
-"jalur fungsi pelayanan yang sama."
-
-msgid ""
-"A flow classifier identifies a flow. A port chain can contain multiple flow "
-"classifiers. Omitting the flow classifier effectively prevents steering of "
-"traffic through the port chain."
-msgstr ""
-"Sebuah klassifier aliran mengidentifikasi aliran. Sebuah rantai port dapat "
-"berisi beberapa pengklasifikasi aliran. Menghilangkan klassifier aliran "
-"efektif mencegah pengendalian lalu lintas melalui rantai port."
-
-msgid "A host on the Internet sends a packet to the instance."
-msgstr "Sebuah host di Internet mengirimkan sebuah paket ke instance."
-
-msgid ""
-"A host sending a packet to an IP address consults its *routing table* to "
-"determine which machine on the local network(s) the packet should be sent "
-"to. The routing table maintains a list of the subnets associated with each "
-"local network that the host is directly connected to, as well as a list of "
-"routers that are on these local networks."
-msgstr ""
-"Sebuah host mengirimkan paket ke alamat IP berkonsultasi dengan *routing "
-"table* nya untuk menentukan dimana mesin pada jaringan lokal dimana paket "
-"harus dikirim. Tabel routing menyimpan daftar subnet yang terkait dengan "
-"setiap jaringan lokal dimana host terhubung langsung, serta daftar router "
-"yang ada di jaringan lokal."
-
-msgid ""
-"A namespace is a way of scoping a particular set of identifiers. Using a "
-"namespace, you can use the same identifier multiple times in different "
-"namespaces. You can also restrict an identifier set visible to particular "
-"processes."
-msgstr ""
-"Sebuah namespace adalah cara pelingkupan (scoping) sekumpulan pengenal "
-"(identifier) tertentu. Dengan menggunakan namespace, Anda dapat menggunakan "
-"identifier yang sama beberapa kali dalam namespace yang berbeda. Anda juga "
-"dapat membatasi sekumpulan identifier terlihat pada proses tertentu."
-
-msgid ""
-"A pool holds a list of members that serve content through the load balancer."
-msgstr ""
-"Sebuah kolam memegang daftar anggota yang melayani konten melalui "
-"penyeimbang beban."
-
-msgid ""
-"A port chain consists of a sequence of port pair groups. Each port pair "
-"group is a hop in the port chain. A group of port pairs represents service "
-"functions providing equivalent functionality. For example, a group of "
-"firewall service functions."
-msgstr ""
-"Sebuah rantai port terdiri dari urutan kelompok pasangan port. Setiap "
-"kelompok pasangan port adalah hop dalam rantai port. Sekelompok pasang port "
-"merupakan fungsi layanan yang menyediakan fungsi setara. Misalnya, "
-"sekelompok fungsi layanan firewall."
-
-msgid ""
-"A port chain is a unidirectional service chain. The first port acts as the "
-"head of the service function chain and the second port acts as the tail of "
-"the service function chain. A bidirectional service function chain consists "
-"of two unidirectional port chains."
-msgstr ""
-"Sebuah rantai port merupakan rantai layanan searah. Port pertama bertindak "
-"sebagai kepala rantai fungsi pelayanan dan port tindakan kedua sebagai ekor "
-"rantai fungsi pelayanan. Sebuah rantai fungsi pelayanan dua arah terdiri "
-"dari dua rantai port searah."
-
-msgid "A port chain, or service function path, consists of the following:"
-msgstr "Sebuah rantai port, atau jalur fungsi pelayanan, terdiri dari:"
-
-msgid ""
-"A port is a connection point for attaching a single device, such as the NIC "
-"of a virtual server, to a virtual network. The port also describes the "
-"associated network configuration, such as the MAC and IP addresses to be "
-"used on that port."
-msgstr ""
-"Sebuah port adalah titik koneksi untuk menghubungkan satu perangkat, seperti "
-"NIC dari server virtual, untuk jaringan virtual. Port juga menjelaskan "
-"konfigurasi jaringan yang terkait, seperti MAC dan alamat IP yang akan "
-"digunakan pada port tersebut."
-
-msgid ""
-"A port pair group may contain one or more port pairs. Multiple port pairs "
-"enable load balancing/distribution over a set of functionally equivalent "
-"service functions."
-msgstr ""
-"Sekelompok pasangan port mungkin berisi satu atau lebih pasangan port. "
-"Beberapa pasangan port mengaktifkan load balancing/distribution lebih dari "
-"satu set fungsi layanan setara (equivalent) secara fungsional."
-
-msgid ""
-"A port pair represents a service function instance that includes an ingress "
-"and egress port. A service function containing a bidirectional port uses the "
-"same ingress and egress port."
-msgstr ""
-"Sepasang port merupakan fungsi pelayanan instance yang mencakup ingress and "
-"egress port. Sebuah fungsi pelayanan yang berisi port dua arah menggunakan "
-"ingress and egress port yang sama."
-
-msgid ""
-"A provider network using IP address range 203.0.113.0/24, and supporting "
-"floating IP addresses 203.0.113.101, 203.0.113.102, and 203.0.113.103."
-msgstr ""
-"Sebuah jaringan operator menggunakan kisaran alamat IP 203.0.113.0/24, dan "
-"mendukung alamat IP mengambang 203.0.113.101, 203.0.113.102, dan "
-"203.0.113.103."
-
-msgid ""
-"A routed provider network enables a single provider network to represent "
-"multiple layer-2 networks (broadcast domains) or segments and enables the "
-"operator to present one network to users. However, the particular IP "
-"addresses available to an instance depend on the segment of the network "
-"available on the particular compute node."
-msgstr ""
-"Sebuah jaringan operator yang diarahkan mengaktifkan jaringan penyedia "
-"tunggal untuk mewakili beberapa jaringan lapisan-2 (broadcast domains) atau "
-"segmen dan mengaktifkan operator untuk menyajikan satu jaringan ke pengguna. "
-"Namun, alamat IP tertentu tersedia untuk sebuah instance bergantung pada "
-"segmen jaringan yang tersedia pada node komputasi tertentu."
-
-msgid "A self-service network using IP address range 198.51.100.0/24."
-msgstr "Jaringan self-service menggunakan rentang alamat IP 198.51.100.0/24."
-
-msgid ""
-"A set of flow classifiers that specify the classified traffic flows entering "
-"the chain."
-msgstr ""
-"Satu set pengklasifikasi aliran yang menspesifikasikan arus lalu lintas yang "
-"terklasifikasikan memasuki rantai."
-
-msgid "A set of ports that define the sequence of service functions."
-msgstr "Satu set port yang menentukan urutan fungsi layanan."
-
-msgid "A setting of ``-1`` disables the quota for a tenant."
-msgstr "Pengaturan ``-1`` menonaktifkan kuota untuk penyewa (tenant)."
-
-msgid ""
-"A single network can be assigned to more than one DHCP agents and one DHCP "
-"agent can host more than one network. You can add a network to a DHCP agent "
-"and remove one from it."
-msgstr ""
-"Sebuah jaringan tunggal dapat ditugaskan untuk lebih dari satu agen DHCP dan "
-"satu agen DHCP dapat host lebih dari satu jaringan. Anda dapat menambahkan "
-"jaringan ke agen DHCP dan menghapus satu dari itu."
-
-msgid ""
-"A subnet pool manages a pool of addresses from which subnets can be "
-"allocated. It ensures that there is no overlap between any two subnets "
-"allocated from the same pool."
-msgstr ""
-"Sebuah kolam subnet mengelola kolam alamat dimana subnet dapat dialokasikan. "
-"Ini memastikan bahwa tidak ada tumpang tindih antara dua subnet yang "
-"dialokasikan dari kolam yang sama."
-
-msgid ""
-"A switchport that is configured to pass frames from all VLANs and tag them "
-"with the VLAN IDs is called a *trunk port*. IEEE 802.1Q is the network "
-"standard that describes how VLAN tags are encoded in Ethernet frames when "
-"trunking is being used."
-msgstr ""
-"Sebuah switchport yang dikonfigurasi untuk melewatkan frame dari semua VLAN "
-"dan menandai (tag) mereka dengan ID VLAN disebut *port trunk *. IEEE 802.1Q "
-"adalah standar jaringan yang menggambarkan bagaimana VLAN tag dikodekan "
-"dalam frame Ethernet ketika trunking (pengindukan) sedang dijalankan."
-
-msgid ""
-"A trunk can be in a ``DEGRADED`` state when a temporary failure during the "
-"provisioning process is encountered. This includes situations where a "
-"subport add or remove operation fails. When in a degraded state, the trunk "
-"is still usable and some subports may be usable as well. Operations that "
-"cause the trunk to go into a ``DEGRADED`` state can be retried to fix "
-"temporary failures and move the trunk into an ``ACTIVE`` state."
-msgstr ""
-"Sebuah trunk dapat berada dalam keadaan ``DEGRADED`` ketika kegagalan "
-"sementara selama proses pengadaan terjadi. Degraded ini termasuk situasi "
-"dimana sebuah subport menambah atau menghapus kegagalan operasi. Ketika "
-"dalam keadaan rusak, trunk masih dapat digunakan dan beberapa subports "
-"mungkin masih dapat digunakan juga. Operasi yang menyebabkan trunk beralih "
-"ke keadaan ``DEGRADED`` dapat dicoba kembali untuk memperbaiki kegagalan "
-"sementara dan memindahkan trunk menjadi keadaan ``ACTIVE``."
-
-msgid ""
-"A trunk is ``DOWN`` when it is first created without an instance launched on "
-"it, or when the instance associated with the trunk has been deleted."
-msgstr ""
-"Sebuah trunk menjadi `` DOWN`` ketika trunk yang pertama kali dibuat tanpa "
-"sebuah instance yang diluncurkan di atasnya, atau ketika instance terkait "
-"dengan trunk telah dihapus."
-
-msgid ""
-"A trunk is in ``BUILD`` state while the resources associated with the trunk "
-"are in the process of being provisioned. Once the trunk and all of the "
-"subports have been provisioned successfully, the trunk transitions to "
-"``ACTIVE``. If there was a partial failure, the trunk transitions to "
-"``DEGRADED``."
-msgstr ""
-"Sebuah trunk berada dalam keadaan ``BUILD`` sedangkan sumber daya terkait "
-"dengan trunk berada dalam proses yang ditetapkan. Setelah trunk dan semua "
-"subports telah ditetapkan berhasil, transisi trunk akan berada ke "
-"``ACTIVE``. Jika ada kegagalan parsial, transisi trank akan berada ke "
-"``DEGRADED``."
-
-msgid ""
-"A trunk is in ``ERROR`` state if the request leads to a conflict or an error "
-"that cannot be fixed by retrying the request. The ``ERROR`` status can be "
-"encountered if the network is not compatible with the trunk configuration or "
-"the binding process leads to a persistent failure. When a trunk is in "
-"``ERROR`` state, it must be brought to a sane state (``ACTIVE``), or else "
-"requests to add subports will be rejected."
-msgstr ""
-"Sebuah trunk dalam keadaan ``ERROR`` jika permintaan mengarah ke konflik "
-"atau kesalahan yang tidak dapat diperbaiki dengan permintaan ulang. Keadaan "
-"``ERROR`` dapat ditemui jika jaringan tidak kompatibel dengan konfigurasi "
-"trunk atau proses pengikatan yang mengarah ke kegagalan terus-menerus. "
-"Ketika trunk dalam keadaan ``ERROR``, trunk itu harus dibawa ke keadaan "
-"sehat (``ACTIVE``), atau permintaan untuk penambahan subports akan ditolak."
-
-msgid "A, AAAA and PTR records will be created in the DNS service."
-msgstr "Catatan A, AAAA and PTR akan dibuat di layanan DNS."
-
-msgid "API server"
-msgstr "API server"
-
-msgid "Ability to leverage tags by deployment tools."
-msgstr "Kemampuan untuk memanfaatkan tag oleh alat pengerahan."
-
-msgid ""
-"Ability to map IDs from different management/orchestration systems to "
-"OpenStack networks in mixed environments. For example, in the Kuryr project, "
-"the Docker network ID is mapped to the Neutron network ID."
-msgstr ""
-"Kemampuan untuk memetakan ID dari sistem management/orchestration yang "
-"berbeda untuk jaringan OpenStack di lingkungan campuran. Misalnya, dalam "
-"proyek Kuryr, ID jaringan Docker dipetakan ke ID jaringan Neutron."
-
-msgid ""
-"Ability to map different networks in different OpenStack locations to one "
-"logically same network (for multi-site OpenStack)."
-msgstr ""
-"Kemampuan untuk memetakan jaringan yang berbeda di lokasi OpenStack yang "
-"berbeda ke satu jaringan logis yang sama (untuk OpenStack multi-situs)."
-
-msgid ""
-"Ability to tag information about provider networks (for example, high-"
-"bandwidth, low-latency, and so on)."
-msgstr ""
-"Kemampuan untuk menandai informasi tentang jaringan provider (misalnya, "
-"high-bandwidth, low-latency, dan sebagainya)."
-
-msgid "Abstract"
-msgstr "Abstrak"
-
-msgid ""
-"Accepts and routes RPC requests between agents to complete API operations. "
-"Message queue is used in the ML2 plug-in for RPC between the neutron server "
-"and neutron agents that run on each hypervisor, in the ML2 mechanism drivers "
-"for :term:`Open vSwitch` and :term:`Linux bridge`."
-msgstr ""
-"Menerima dan me-rute RPC request antara agen agen untuk menyelesaikan "
-"operasi API. Antrian pesan digunakan dalam ML2 plug-in untuk RPC antara "
-"server neutron dan agen neutron yang berjalan di setiap hypervisor, di "
-"driver mekanisme ML2 untuk :term:`Open vSwitch` and :term:`Linux bridge`."
-
-msgid "Access"
-msgstr "Access"
-
-msgid ""
-"Access to addresses in a scope are managed through subnet pools. Subnet "
-"pools can either be created in an address scope, or updated to belong to an "
-"address scope."
-msgstr ""
-"Akses ke alamat di lingkup dikelola melalui kolam subnet. Kolam subnet baik "
-"dapat dibuat dalam lingkup alamat, ataupun diperbarui milik lingkup alamat."
-
-msgid "Accessing address scopes"
-msgstr "Mengakses lingkup alamat"
-
-msgid "Achieving high availability with availability zone"
-msgstr "Pencapaian ketersediaan tinggi dengan zona ketersediaan"
-
-msgid "Add LBaaS panels to Dashboard"
-msgstr "Tambahkan panel LBaaS ke Dashboard"
-
-msgid "Add VRRP to an existing router"
-msgstr "Tambahkan VRRP ke router yang ada"
-
-msgid ""
-"Add ``dns`` to ``extension_drivers`` in the ``[ml2]`` section of ``/etc/"
-"neutron/plugins/ml2/ml2_conf.ini``. The following is an example:"
-msgstr ""
-"Tambahkan ``dns`` ke ``extension_drivers`` dalam bagian ``[ml2]`` dari ``/"
-"etc/neutron/plugins/ml2/ml2_conf.ini``. Berikut ini adalah contoh:"
-
-msgid "Add ``macvtap`` to mechanism drivers."
-msgstr "Tambahkan ``macvtap`` untuk driver mekanisme."
-
-msgid ""
-"Add ``sriovnicswitch`` as mechanism driver. Edit the ``ml2_conf.ini`` file "
-"on each controller:"
-msgstr ""
-"Tambahkan ``sriovnicswitch`` sebagai driver mekanisme. Edit file ``ml2_conf."
-"ini`` pada setiap controller:"
-
-msgid "Add ``vxlan`` to type drivers and project network types."
-msgstr "Tambahkan ``vxlan`` untuk tipe driver dan tipe jaringan proyek."
-
-msgid "Add a BGP peer to the BGP speaker."
-msgstr "Tambahkan rekan BGP ke BGP speaker."
-
-msgid ""
-"Add a ``placement`` section to the ``neutron.conf`` file with authentication "
-"credentials for the Compute service placement API:"
-msgstr ""
-"Tambahkan bagian ``placement`` ke file ``neutron.conf`` dengan kredential "
-"otentikasi untuk placement API di layanan Compute:"
-
-msgid "Add a tag to a resource:"
-msgstr "Tambahkan tag ke sumber daya:"
-
-msgid "Add an interface to the router on demo-subnet1:"
-msgstr "Tambahkan antarmuka ke router pada demo-subnet1:"
-
-msgid "Add one network interface: overlay."
-msgstr "Tambahkan satu antarmuka jaringan: overlay."
-
-msgid "Add one network node with the following components:"
-msgstr "Tambahkan satu node jaringan dengan komponen-komponen berikut:"
-
-msgid "Add one or more compute nodes with the following components:"
-msgstr ""
-"Tambahkan satu atau lebih node komputasi dengan komponen-komponen berikut:"
-
-msgid "Add subports to an existing trunk:"
-msgstr "Tambahkan subports ke trunk yang sudah ada:"
-
-msgid "Add subports to the trunk"
-msgstr "Tambahkan subport dan trunk"
-
-msgid ""
-"Add the FDB section and the ``shared_physical_device_mappings`` parameter. "
-"This parameter maps each physical port to its physical network name. Each "
-"physical network can be mapped to several ports:"
-msgstr ""
-"Tambahkan bagian FDB dan parameter ``shared_physical_device_mappings``. "
-"Parameter ini memetakan setiap port fisik untuk nama jaringan fisik. Setiap "
-"jaringan fisik dapat dipetakan ke beberapa port:"
-
-msgid "Add the IPv4 and IPv6 subnets as interfaces on the router."
-msgstr "Tambahkan subnet IPv4 dan IPv6 sebagai interface pada router."
-
-msgid ""
-"Add the LBaaS v2 service plug-in to the ``service_plugins`` configuration "
-"directive in ``/etc/neutron/neutron.conf``. The plug-in list is comma-"
-"separated:"
-msgstr ""
-"Tambahkan plug-in layanan LBaaS v2 ke perintah konfigurasi "
-"``service_plugins`` dalam ``/etc/neutron/neutron.conf``. Daftar plug-in "
-"dipisahkan dengan koma:"
-
-msgid ""
-"Add the LBaaS v2 service provider to the ``service_provider`` configuration "
-"directive within the ``[service_providers]`` section in ``/etc/neutron/"
-"neutron_lbaas.conf``:"
-msgstr ""
-"Tambahkan penyedia layanan LBaaS v2 ke direktif konfigurasi "
-"``service_provider`` dalam bagian ``[service_providers]`` dalam ``/etc/"
-"neutron/neutron_lbaas.conf``:"
-
-msgid ""
-"Add the Octavia service provider to the ``service_provider`` configuration "
-"directive within the ``[service_providers]`` section in ``/etc/neutron/"
-"neutron_lbaas.conf``:"
-msgstr ""
-"Tambahkan provider layanan Octavia ke konfigurasi direktif "
-"``service_provider`` dalam bagian ``[service_providers]`` dalam ``/etc/"
-"neutron/neutron_lbaas.conf``:"
-
-msgid ""
-"Add the QoS service to the ``service_plugins`` setting in ``/etc/neutron/"
-"neutron.conf``. For example:"
-msgstr ""
-"Menambahkan layanan QoS ke pengaturan ``service_plugins`` dalam ``/etc/"
-"neutron/neutron.conf``. Sebagi contoh:"
-
-msgid ""
-"Add the ``ml2_conf_sriov.ini`` file as parameter to the ``neutron-server`` "
-"service. Edit the appropriate initialization script to configure the "
-"``neutron-server`` service to load the SR-IOV configuration file:"
-msgstr ""
-"Tambahkan file ``ml2_conf_sriov.ini`` sebagai parameter untuk layanan "
-"``neutron-server``. Edit script inisialisasi yang tepat untuk "
-"mengkonfigurasi layanan ``neutron-server`` untuk memuat file konfigurasi SR-"
-"IOV:"
-
-msgid "Add the following to ``/etc/neutron/neutron.conf``:"
-msgstr "Tambahkan hal berikut ke ``/etc/neutron/neutron.conf``:"
-
-msgid "Add the following to ``/etc/neutron/plugins/ml2/ml2_conf.ini``:"
-msgstr "Tambahkan berikut ke ``/etc/neutron/plugins/ml2/ml2_conf.ini``:"
-
-msgid "Add the provider network as a gateway on each router."
-msgstr "Tambahkan jaringan provider sebagai gateway pada masing-masing router."
-
-msgid "Add the provider network as a gateway on the router."
-msgstr "Tambahkan jaringan provider sebagai gateway pada router."
-
-msgid "Add the provider network as the gateway on the router."
-msgstr "Tambahkan jaringan provider sebagai gateway pada router."
-
-msgid ""
-"Add the provider network interface as a port on the OVS provider bridge ``br-"
-"provider``:"
-msgstr ""
-"Tambahkan antarmuka jaringan operator sebagai port pada OVS provider bridge "
-"``br-provider``:"
-
-msgid "Adding an HTTP listener"
-msgstr "Tambahkan pendengar HTTP"
-
-msgid "Adding an HTTPS listener"
-msgstr "Menambahkan pendengar HTTPS"
-
-msgid ""
-"Address are assigned using EUI-64, and OpenStack Networking provides routing."
-msgstr ""
-"Alamat ditugaskan penggunaan EUI-64, dan OpenStack Networking menyediakan "
-"routing."
-
-msgid "Address configuration and optional information using DHCPv6."
-msgstr "Konfigurasi alamat dan informasi opsional menggunakan DHCPv6."
-
-msgid "Address configuration using RA and optional information using DHCPv6."
-msgstr ""
-"Konfigurasi alamat menggunakan RA dan informasi opsional menggunakan DHCPv6."
-
-msgid "Address configuration using Router Advertisement (RA)."
-msgstr "Konfigurasi alamat menggunakan Router Advertisement (RA)."
-
-msgid "Address modes for ports"
-msgstr "Mode alamat untuk port"
-
-msgid "Address scopes"
-msgstr "Lingkup alamat (address scopes)"
-
-msgid ""
-"Address scopes build from subnet pools. While subnet pools provide a "
-"mechanism for controlling the allocation of addresses to subnets, address "
-"scopes show where addresses can be routed between networks, preventing the "
-"use of overlapping addresses in any two subnets. Because all addresses "
-"allocated in the address scope do not overlap, neutron routers do not NAT "
-"between your projects' network and your external network. As long as the "
-"addresses within an address scope match, the Networking service performs "
-"simple routing between networks."
-msgstr ""
-"Lingkup alamat membangun kolam subnet. Sementara kolam subnet menyediakan "
-"mekanisme untuk mengendalikan alokasi alamat untuk subnet, lingkup alamat "
-"menunjukkan dimana alamat dapat dialihkan antara jaringan, mencegah "
-"penggunaan tumpang tindih alamat dalam dua subnet. Karena semua alamat "
-"dialokasikan dalam lingkup alamat tidak tumpang tindih, router neutron tidak "
-"men-NAT (Network Address Translation) antara jaringan proyek Anda dan "
-"jaringan eksternal Anda. Selama alamat dalam suatu kecocokan lingkup alamat, "
-"layanan Networking melakukan routing sederhana antar jaringan."
-
-msgid "Addresses"
-msgstr "Addresses (alamat)"
-
-msgid "Addresses and optional information are assigned using DHCPv6."
-msgstr "Alamat dan informasi opsional ditugaskan untuk penggunaan DHCPv6."
-
-msgid ""
-"Addresses are assigned using EUI-64, and an external router will be used for "
-"routing."
-msgstr ""
-"Alamat ditugaskan penggunaan EUI-64, dan router eksternal akan digunakan "
-"untuk routing."
-
-msgid "Addresses for subnets"
-msgstr "Alamat untuk subnet"
-
-msgid "Adjust security groups to allow pings and SSH (both IPv4 and IPv6):"
-msgstr "Atur kelompok keamanan untuk mengizinkan ping dan SSH (IPv4 dan IPv6):"
-
-msgid ""
-"Admin role is required to use the agent management and scheduler extensions. "
-"Ensure you run the following commands under a project with an admin role."
-msgstr ""
-"Peran admin diperlukan untuk menggunakan manajemen agen dan ekstensi "
-"scheduler. Pastikan Anda menjalankan perintah berikut di bawah sebuah proyek "
-"dengan peran admin."
-
-msgid "Administrator enforcement"
-msgstr "Penegakan administrator"
-
-msgid ""
-"Administrators are able to enforce policies on project ports or networks. As "
-"long as the policy is not shared, the project is not be able to detach any "
-"policy attached to a network or port."
-msgstr ""
-"Administrator dapat menegakkan kebijakan pada port proyek atau jaringan. "
-"Selama kebijakan tersebut tidak dibagi, proyek ini tidak dapat melepaskan "
-"kebijakan apa saja yang melekat ke jaringan atau port."
-
-msgid "Advanced services"
-msgstr "Layanan canggih"
-
-msgid ""
-"Advertisement of a floating IP address requires satisfying the following "
-"conditions:"
-msgstr ""
-"Penyiaran update dan perubahan alamat IP mengambang membutuhkan terpenuhinya "
-"kondisi berikut:"
-
-msgid ""
-"Advertisement of a self-service network requires satisfying the following "
-"conditions:"
-msgstr ""
-"Penyiaran update dan perubahan jaringan self-service membutuhkan memenuhi "
-"kondisi berikut:"
-
-msgid ""
-"After choosing a :ref:`mechanism driver `, the "
-"deployment examples generally include the following building blocks:"
-msgstr ""
-"Setelah pemilihan :ref:`mechanism driver `, contoh "
-"pengerahan umumnya memasukan blok bangunan berikut:"
-
-msgid ""
-"After creating a peering session, you cannot change the local or remote "
-"autonomous system numbers."
-msgstr ""
-"Setelah membuat sesi peering, Anda tidak dapat mengubah nomor sistem otonomi "
-"lokal atau remote."
-
-msgid ""
-"After deletion, if you restart the DHCP agent, it appears on the agent list "
-"again."
-msgstr ""
-"Setelah penghapusan, jika Anda me-restart agen DHCP, akan muncul pada daftar "
-"agen lagi."
-
-msgid "After installing Dibbler, edit the ``/etc/dibbler/server.conf`` file:"
-msgstr "Setelah menginstal Dibbler, edit file ``/etc/dibbler/server.conf``:"
-
-msgid ""
-"After installing a new version of the Neutron server, upgrade the database "
-"using the following command:"
-msgstr ""
-"Setelah menginstal versi baru dari server Neutron, meng-upgrade database "
-"kemudian gunakan perintah berikut:"
-
-msgid ""
-"After re-starting the ``neutron-server``, users will be able to assign a "
-"``dns_name`` attribute to their ports."
-msgstr ""
-"Setelah memulai kembali ``neutron-server``, pengguna akan dapat menetapkan "
-"atribut ``dns_name`` ke portnya."
-
-msgid ""
-"After the agent is disabled, you can safely remove the agent. Even after "
-"disabling the agent, resources on the agent are kept assigned. Ensure you "
-"remove the resources on the agent before you delete the agent."
-msgstr ""
-"Setelah agen dinonaktifkan, Anda dapat dengan aman menghapus agen. Bahkan "
-"setelah menonaktifkan agen, sumber daya pada agen terus ditugaskan. Pastikan "
-"Anda menghapus sumber daya pada agen sebelum Anda menghapus agen."
-
-msgid ""
-"After you stop the DHCP agent on HostA, you can delete it by the following "
-"command:"
-msgstr ""
-"Setelah Anda berhenti agen DHCP pada HostA, Anda dapat menghapusnya dengan "
-"perintah berikut:"
-
-msgid "Agent nodes"
-msgstr "Agent nodes"
-
-msgid "Agents"
-msgstr "Agents"
-
-msgid ""
-"All InfiniBand networks must have a subnet manager running for the network "
-"to function. This is true even when doing a simple network of two machines "
-"with no switch and the cards are plugged in back-to-back. A subnet manager "
-"is required for the link on the cards to come up. It is possible to have "
-"more than one subnet manager. In this case, one of them will act as the "
-"master, and any other will act as a slave that will take over when the "
-"master subnet manager fails."
-msgstr ""
-"Semua jaringan InfiniBand harus memiliki manajer subnet yang menjalankan "
-"jaringan supaya berfungsi. Hal ini berlaku bahkan ketika melakukan jaringan "
-"sederhana dari dua mesin tanpa switch dan kartu terpasang (plugged) di back-"
-"to-back. Seorang manajer subnet diperlukan untuk link di kartu untuk "
-"bangkit. Hal ini dimungkinkan untuk memiliki lebih dari satu manajer subnet. "
-"Dalam hal ini, salah satu dari mereka akan bertindak sebagai master, dan "
-"lain akan bertindak sebagai slave yang akan mengambil alih ketika manajer "
-"subnet master gagal."
-
-msgid ""
-"All OpenStack Networking services and OpenStack Compute instances connect to "
-"a virtual network via ports making it possible to create a traffic steering "
-"model for service chaining using only ports. Including these ports in a port "
-"chain enables steering of traffic through one or more instances providing "
-"service functions."
-msgstr ""
-"Semua layanan OpenStack Jaringan dan instance OpenStack Compute "
-"menghubungkan ke jaringan virtual melalui port sehingga hal itu memungkinkan "
-"untuk membuat model kemudi lalu lintas (traffic steering model) untuk "
-"layanan chaining hanya menggunakan port. Termasuk port ini dalam rantai port "
-"mengaktifkan kemudi lalu lintas melalui satu atau lebih instance yang "
-"menyediakan fungsi layanan."
-
-msgid ""
-"All instances reside on the same network, which can also be shared with the "
-"hosts. No VLAN tagging or other network segregation takes place."
-msgstr ""
-"Semua instance berada pada jaringan yang sama, yang juga dapat dibagi dengan "
-"host. Tidak ada VLAN tagging atau pemisahan jaringan lainnya berlangsung."
-
-msgid ""
-"Allow egress DHCP discovery and request messages that use the source MAC "
-"address of the port for the instance and the unspecified IPv4 address "
-"(0.0.0.0)."
-msgstr ""
-"Izinkan egress DHCP discovery and request messages yang menggunakan sumber "
-"alamat MAC dari port untuk instance dan alamat IPv4 yang tidak ditentukan "
-"(0.0.0.0)."
-
-msgid ""
-"Allow egress ICMPv6 MLD reports (v1 and v2) and neighbor solicitation "
-"messages that use the source MAC address of a particular instance and the "
-"unspecified IPv6 address (::). Duplicate address detection (DAD) relies on "
-"these messages."
-msgstr ""
-"Izinkan egress ICMPv6 MLD reports (v1 dan v2) dan neighbor solicitation "
-"messages yang menggunakan sumber alamat MAC dari instance khusus dan alamat "
-"IPv6 tidak ditentukan (: :). Duplicate address detection (DAD) tergantung "
-"pada pesan ini."
-
-msgid ""
-"Allow egress non-IP traffic from the MAC address of the port for the "
-"instance and any additional MAC addresses in ``allowed-address-pairs`` on "
-"the port for the instance."
-msgstr ""
-"Izinkan egress non-IP traffic dari alamat MAC dari port instance dan setiap "
-"alamat MAC tambahan dalam ``allowed-address-pairs`` pada port instance."
-
-msgid ""
-"Allow egress traffic only if it uses the source MAC and IP addresses of the "
-"port for the instance, source MAC and IP combination in ``allowed-address-"
-"pairs``, or valid MAC address (port or ``allowed-address-pairs``) and "
-"associated EUI64 link-local IPv6 address."
-msgstr ""
-"Izinkan lalu lintas egress hanya jika menggunakan sumber alamat MAC dan IP "
-"dari port untuk instance, sumber kombinasi MAC dan IP dalam ``allowed-"
-"address-pairs``, atau alamat MAC valid (port or ``allowed-address-pairs``) "
-"dan alamat IPv6 llink-local EUI64 terkait."
-
-msgid ""
-"Allow ingress DHCP and DHCPv6 responses from the DHCP server on the subnet "
-"so instances can acquire IP addresses."
-msgstr ""
-"Izinkan ingress DHCP and DHCPv6 responses dari server DHCP di subnet "
-"sehingga kasus dapat memperoleh alamat IP."
-
-msgid ""
-"Allow ingress/egress ICMPv6 MLD, neighbor solicitation, and neighbor "
-"discovery messages so instances can discover neighbors and join multicast "
-"groups."
-msgstr ""
-"Izinkan ingress/egress ICMPv6 MLD, neighbor solicitation, dan pesan penemuan "
-"tetangga sehingga instance dapat menemukan neighbors dan bergabung dengan "
-"multicast groups."
-
-msgid "Allowing a network to be used as an external network"
-msgstr "Membiarkan jaringan untuk digunakan sebagai jaringan eksternal"
-
-msgid ""
-"Also, provider networks only handle layer-2 connectivity for instances, thus "
-"lacking support for features such as routers and floating IP addresses."
-msgstr ""
-"Juga, jaringan provider hanya menangani layer-2 connectivity untik instance, "
-"sehingga kurang dukungan untuk fitur seperti router dan alamat IP mengambang."
-
-msgid ""
-"Alternatively the ``pci_passthrough_whitelist`` parameter also supports "
-"whitelisting by:"
-msgstr ""
-"Atau dengan parameter ``pci_passthrough_whitelist`` juga mendukung daftar "
-"putih (whitelisting):"
-
-msgid ""
-"Alternatively, you can create VFs by passing the ``max_vfs`` to the kernel "
-"module of your network interface. However, the ``max_vfs`` parameter has "
-"been deprecated, so the PCI SYS interface is the preferred method."
-msgstr ""
-"Atau, Anda dapat membuat VF dengan melewatkan ``max_vfs`` ke modul kernel "
-"dari antarmuka jaringan Anda. Namun, parameter ``max_vfs`` telah usang, "
-"sehingga interface PCI SYS menjadi metode yang disukai."
-
-msgid ""
-"Alternatively, you can launch each instance with one network interface and "
-"attach additional ports later."
-msgstr ""
-"Atau, Anda dapat memulai setiap instance dengan satu antarmuka jaringan dan "
-"menghubungkan port tambahan nanti."
-
-msgid ""
-"Although OpenStack does not make use of libvirt's networking, this "
-"networking will not interfere with OpenStack's behavior, and can be safely "
-"left enabled. However, libvirt's networking can be a nuisance when debugging "
-"OpenStack networking issues. Because libvirt creates an additional bridge, "
-"dnsmasq process, and iptables ruleset, these may distract an operator "
-"engaged in network troubleshooting. Unless you need to start up virtual "
-"machines using libvirt directly, you can safely disable libvirt's network."
-msgstr ""
-"Meskipun OpenStack tidak memanfaatkan jaringan libvirt ini, jaringan ini "
-"tidak akan mengganggu perilaku OpenStack, dan dapat dengan aman ditinggalkan "
-"diaktifkan. Namun, jaringan libvirt bisa menjadi gangguan ketika debugging "
-"masalah jaringan OpenStack. Karena Libvirt menciptakan jembatan tambahan, "
-"proses dnsmasq, dan iptables ruleset, ini mungkin mengalihkan perhatian "
-"operator terlibat dalam pemecahan masalah jaringan. Kecuali Anda perlu untuk "
-"memulai mesin virtual menggunakan libvirt langsung, Anda dapat dengan aman "
-"menonaktifkan jaringan libvirt ini."
-
-msgid ""
-"Although non-IP traffic, security groups do not implicitly allow all ARP "
-"traffic. Separate ARP filtering rules prevent instances from using ARP to "
-"intercept traffic for another instance. You cannot disable or remove these "
-"rules."
-msgstr ""
-"Meskipun lalu lintas non-IP, kelompok keamanan tidak secara implisit "
-"mengizinkan semua lalu lintas ARP. Pisahkan aturan penyaringan ARP untuk "
-"mencegah instance menggunakan ARP untuk menghadang lalu lintas instance "
-"lain. Anda tidak dapat menonaktifkan atau menghapus aturan ini."
-
-msgid ""
-"Although self-service networks generally use private IP address ranges "
-"(RFC1918) for IPv4 subnets, BGP dynamic routing can advertise any IPv4 "
-"address ranges."
-msgstr ""
-"Meskipun jaringan self-service umumnya menggunakan rentang alamat IP private "
-"(RFC1918) untuk subnet IPv4, BGP routing dinamis dapat menyiarkan upadate "
-"dan perubahan rentang alamat IPv4."
-
-msgid ""
-"Although, the Networking service provides high availability for routers and "
-"high availability and fault tolerance for networks' DHCP services, "
-"availability zones provide an extra layer of protection by segmenting a "
-"Networking service deployment in isolated failure domains. By deploying HA "
-"nodes across different availability zones, it is guaranteed that network "
-"services remain available in face of zone-wide failures that affect the "
-"deployment."
-msgstr ""
-"Meskipun, layanan Networking menyediakan ketersediaan tinggi untuk router "
-"dan dan toleransi kesalahan untuk layanan DHCP jaringan ', zona ketersediaan "
-"memberikan lapisan tambahan perlindungan dengan segmentasi pengerahan "
-"layanan jaringan di daerah kegagalan terisolasi (isolated failure domain). "
-"Dengan mengerahkan HA node (High Availability node) di zona ketersediaan "
-"yang berbeda, dijamin bahwa layanan jaringan tetap tersedia dalam menghadapi "
-"kegagalan zone-wide yang mempengaruhi pengerahan."
-
-msgid ""
-"An L2 agent serves layer 2 (Ethernet) network connectivity to OpenStack "
-"resources. It typically runs on each Network Node and on each Compute Node."
-msgstr ""
-"Agen L2 membantu konektivitas jaringan layer 2 (Ethernet) untuk sumber "
-"OpenStack. Ini biasanya berjalan pada setiap Network Node dan pada setiap "
-"Compute Node."
-
-msgid ""
-"An administrator can mark a pool as default. Only one pool from each address "
-"family can be marked default."
-msgstr ""
-"Administrator dapat menandai kolam sebagai default. Hanya satu kolam dari "
-"masing-masing keluarga alamat dapat ditandai default."
-
-msgid ""
-"An administrator might want to disable an agent if a system hardware or "
-"software upgrade is planned. Some agents that support scheduling also "
-"support disabling and enabling agents, such as L3 and DHCP agents. After the "
-"agent is disabled, the scheduler does not schedule new resources to the "
-"agent."
-msgstr ""
-"Administrator mungkin ingin menonaktifkan agen jika sistem perangkat keras "
-"atau upgrade perangkat lunak direncanakan. Beberapa agen yang mendukung "
-"penjadwalan juga mendukung penonaktifan dan pengaktifan agen, seperti L3 dan "
-"DHCP agen. Setelah agen dinonaktifkan, scheduler tidak menjadwalkan sumber "
-"daya baru untuk agen."
-
-msgid ""
-"An availability zone groups network nodes that run services like DHCP, L3, "
-"FW, and others. It is defined as an agent's attribute on the network node. "
-"This allows users to associate an availability zone with their resources so "
-"that the resources get high availability."
-msgstr ""
-"Sebuah node ketersediaan kelompok zona jaringan yang menjalankan layanan "
-"seperti DHCP, L3, FW, dan lain-lain. Hal ini didefinisikan sebagai atribut "
-"agen pada node jaringan. Hal ini memungkinkan pengguna untuk menghubungkan "
-"zona ketersediaan sumber daya mereka sehingga sumber daya mendapatkan "
-"ketersediaan tinggi."
-
-msgid ""
-"An availability zone is used to make network resources highly available. The "
-"operators group the nodes that are attached to different power sources under "
-"separate availability zones and configure scheduling for resources with high "
-"availability so that they are scheduled on different availability zones."
-msgstr ""
-"Zona ketersediaan digunakan untuk membuat sumber daya jaringan sangat "
-"tersedia. Operator mengelompokan node yang melekat pada sumber daya yang "
-"berbeda di bawah zona ketersediaan terpisah dan mengkonfigurasi penjadwalan "
-"sumber daya dengan ketersediaan tinggi sehingga mereka dijadwalkan pada zona "
-"ketersediaan yang berbeda."
-
-msgid ""
-"An instance with an interface with an IPv4 address in a routed provider "
-"network must be placed by the Compute scheduler in a host that has access to "
-"a segment with available IPv4 addresses. To make this possible, the "
-"Networking service communicates to the Compute scheduler the inventory of "
-"IPv4 addresses associated with each segment of a routed provider network. "
-"The operator must configure the authentication credentials that the "
-"Networking service will use to communicate with the Compute scheduler's "
-"placement API. Please see below an example configuration."
-msgstr ""
-"Sebuah instance dengan sebuah antarmuka dengan alamat IPv4 dalam jaringan "
-"provider yang diarahkan harus ditempatkan oleh Compute scheduler di host "
-"yang memiliki akses ke segmen dengan alamat IPv4 yang tersedia. Untuk "
-"membuat ini mungkin terjadi, layanan Networking berkomunikasi dengan Compute "
-"scheduler persediaan alamat IPv4 yang terkait dengan setiap segmen jaringan "
-"provider yang diarahkan. Operator harus mengkonfigurasi kredensial "
-"otentikasi dimana layanan Networking akan digunakan untuk berkomunikasi "
-"dengan placement API di Compute scheduler. Silakan lihat di bawah "
-"konfigurasi contoh."
-
-msgid ""
-"Any given Linux process runs in a particular network namespace. By default "
-"this is inherited from its parent process, but a process with the right "
-"capabilities can switch itself into a different namespace; in practice this "
-"is mostly done using the :command:`ip netns exec NETNS COMMAND...` "
-"invocation, which starts ``COMMAND`` running in the namespace named "
-"``NETNS``. Suppose such a process sends out a message to IP address A.B.C.D, "
-"the effect of the namespace is that A.B.C.D will be looked up in that "
-"namespace's routing table, and that will determine the network device that "
-"the message is transmitted through."
-msgstr ""
-"Setiap proses Linux yang ada berjalan dalam namespace jaringan tertentu. "
-"Secara default proses ini merupakan warisan (inherited) dari proses "
-"induknya, tetapi sebuah proses dengan kemampuan yang tepat dapat mengalihkan "
-"dirinya menjadi namespace yang berbeda; dalam praktek ini banyak dilakukan "
-"dengan menggunakan :command:`ip netns exec NETNS COMMAND...` invocation, "
-"yang memulai menjalankan `` COMMAND`` di namespace yang bernama `` NETNS``. "
-"Misalkan proses tersebut mengirimkan pesan ke alamat IP A.B.C.D, efek dari "
-"namespace adalah bahwa A.B.C.D akan mencari dalam tabel routing yang "
-"memiliki namespace ini, dan yang akan menentukan perangkat jaringan dimana "
-"pesan akan diantarkan."
-
-msgid ""
-"Any plug-in or ml2 mechanism driver can claim support for some QoS rule "
-"types by providing a plug-in/driver class property called "
-"``supported_qos_rule_types`` that returns a list of strings that correspond "
-"to `QoS rule types `_."
-msgstr ""
-"Driver mekanisme ml2 atau plug-in dapat mengklaim dukungan untuk beberapa "
-"tipe atauran QoS dengan menyediakan properti kelas plug-in/driver yang "
-"disebut ``supported_qos_rule_types`` yang mengembalikan daftar string yang "
-"sesuai dengan `QoS rule types `_."
-
-msgid ""
-"Anyone with access to the Networking service can create their own address "
-"scopes. However, network administrators can create shared address scopes, "
-"allowing other projects to create networks within that address scope."
-msgstr ""
-"Siapapun dengan akses ke layanan Networking dapat membuat lingkup alamat "
-"mereka sendiri. Namun, administrator jaringan dapat membuat lingkup alamat "
-"bersama, yang memungkinkan proyek lain untuk membuat jaringan dalam lingkup "
-"alamat itu."
-
-msgid ""
-"Apply the security group to the load balancer's network port using "
-"``vip_port_id`` from the :command:`neutron lbaas-loadbalancer-show` command:"
-msgstr ""
-"Terapkan kelompok keamanan untuk port jaringan penyeimbang beban menggunakan "
-"`` vip_port_id`` dari perintah :command:`neutron lbaas-loadbalancer-show`:"
-
-msgid "Apply the settings by restarting the web server."
-msgstr "Terapkan pengaturan dengan me-restart web server."
-
-# #-#-#-#-# config_ml2_plug_in.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# config_server.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# deploy_scenario4b.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# scenario_dvr_ovs.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# scenario_l3ha_lb.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# scenario_l3ha_ovs.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# scenario_legacy_lb.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# scenario_legacy_ovs.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# scenario_provider_ovs.pot (Networking Guide 0.9) #-#-#-#-#
-msgid "Architecture"
-msgstr "Arsitektur"
-
-msgid "Arp Spoofing Prevention"
-msgstr "Arp Spoofing Prevention"
-
-msgid ""
-"As a regular project in an OpenStack cloud, you can create a subnet pool of "
-"your own and use it to manage your own pool of addresses. This does not "
-"require any admin privileges. Your pool will not be visible to any other "
-"project."
-msgstr ""
-"Sebagai proyek reguler di cloud OpenStack, Anda dapat membuat subnet kolam "
-"Anda sendiri dan menggunakannya untuk mengelola kolam alamat Anda sendiri. "
-"Ini tidak memerlukan hak istimewa admin. Kolam renang Anda tidak akan "
-"terlihat dari proyek lainnya."
-
-msgid ""
-"As introduced in other guide entries, neutron provides a means of making an "
-"object (``network``, ``qos-policy``) available to every project. This is "
-"accomplished using the ``shared`` flag on the supported object:"
-msgstr ""
-"Seperti yang diperkenalkan dalam entri buku lainnya, neutron menyediakan "
-"sarana untuk membuat sebuah objek (``network``, ``qos-policy``) tersedia "
-"untuk setiap proyek. Hal ini dicapai dengan menggunakan ``shared`` flag pada "
-"objek yang didukung:"
-
-msgid ""
-"As mentioned earlier, modern Ethernet networks use switches to interconnect "
-"the network hosts. A switch is a box of networking hardware with a large "
-"number of ports that forward Ethernet frames from one connected host to "
-"another. When hosts first send frames over the switch, the switch doesn’t "
-"know which MAC address is associated with which port. If an Ethernet frame "
-"is destined for an unknown MAC address, the switch broadcasts the frame to "
-"all ports. The switch learns which MAC addresses are at which ports by "
-"observing the traffic. Once it knows which MAC address is associated with a "
-"port, it can send Ethernet frames to the correct port instead of "
-"broadcasting. The switch maintains the mappings of MAC addresses to switch "
-"ports in a table called a *forwarding table* or *forwarding information "
-"base* (FIB). Switches can be daisy-chained together, and the resulting "
-"connection of switches and hosts behaves like a single network."
-msgstr ""
-"Seperti disebutkan sebelumnya, jaringan Ethernet modern menggunakan switch "
-"untuk menghubungkan host jaringan. Sebuah switch adalah sebuah kotak dari "
-"perangkat keras jaringan dengan sejumlah besar port yang meneruskan frame "
-"Ethernet dari satu host terhubung ke host yang lain. Ketika host pertama "
-"mengirim frame melintasi switch, switch tidak tahu dimana alamat MAC "
-"dikaitkan dengan port. Jika sebuah frame Ethernet diperuntukan untuk alamat "
-"MAC yang tidak diketahui, switch menyiarkan frame ke semua port. Switch ini "
-"mempelajari alamat MAC dengan cara pengamatan lalu lintas pada port. Setelah "
-"tahu dimana alamat MAC terkait dengan port, switch dapat mengirim frame "
-"Ethernet ke port yang benar bukan ke penyiaran. Switch mempertahankan "
-"pemetaan dari alamat MAC ke switch port dalam tabel yang disebut *forwarding "
-"table* atau *forwarding information base* (FIB). Switch dapat daisy-chained "
-"bersama-sama, dan koneksi yang dihasilkan dari switch dan host berperilaku "
-"seperti jaringan tunggal."
-
-msgid ""
-"As of Juno, the OpenStack Networking service (neutron) provides two new "
-"attributes to the subnet object, which allows users of the API to configure "
-"IPv6 subnets."
-msgstr ""
-"Sebagaimana Juno, layanan OpenStack Networking (neutron) menyediakan dua "
-"atribut baru ke objek subnet, yang memungkinkan pengguna API untuk "
-"mengkonfigurasi subnet IPv6."
-
-msgid ""
-"As of the writing of this guide, there is not placement API CLI client, so "
-"the :command:`curl` command is used for this example."
-msgstr ""
-"Pada penulisan buku ini, tidak ada penempatan API CLI client, sehingga "
-"perintah :command:`curl` digunakan untuk contoh ini."
-
-msgid ""
-"As the `admin` user, verify the neutron routers are allocated IP addresses "
-"from their correct subnets. Use ``openstack port list`` to find ports "
-"associated with the routers."
-msgstr ""
-"Sebagai pengguna `admin`, lakukan verifikasi apakah router neutron "
-"dialokasikan alamat IP dari subnet yang benar. Gunakan ``openstack port "
-"list`` untuk menemukan port yang berhubungan dengan router."
-
-msgid "Assign a network to a given DHCP agent."
-msgstr "Tetapkan jaringan untuk agen DHCP yang diberikan."
-
-msgid ""
-"Assign a valid domain name to the network's ``dns_domain`` attribute. This "
-"name must end with a period (``.``)."
-msgstr ""
-"Tetapkan nama domain yang valid untuk atribut ``dns_domain`` milik jaringan. "
-"Nama ini harus diakhiri dengan titik (``.``)."
-
-msgid ""
-"Associate a floating IP with the instance port and verify it was allocated "
-"an IP address from the correct subnet:"
-msgstr ""
-"Kaitkan IP mengambang dengan port instance dan lakukan verifikasi apakah "
-"alamat IP yang dialokasikan itu dari subnet yang benar:"
-
-msgid "Associate the floating IPv4 address with the instance."
-msgstr "Lakukan asosiasi alamat IPv4 mengambang dengan instance."
-
-msgid "Associating a floating IP address"
-msgstr "Mengaitkannya alamat IP mengambang"
-
-msgid ""
-"At a high level, the basic steps to launching an instance on a trunk are the "
-"following:"
-msgstr ""
-"Pada tingkat tinggi, langkah dasar untuk meluncurkan sebuah instance pada "
-"trunk adalah sebagai berikut:"
-
-msgid ""
-"At this moment the Networking DB is the source of truth, but nova-api is the "
-"only public read-write API."
-msgstr ""
-"Pada saat ini Networking DB adalah sumber kebenaran (source of truth), "
-"tetapi nova-api adalah satu-satunya public read-write API."
-
-msgid ""
-"At this point, all compute nodes have been migrated, but they are still "
-"using the nova-api API and Compute gateways. Finally, enable OpenStack "
-"Networking by following these steps:"
-msgstr ""
-"Pada titik ini, semua node komputasi telah bermigrasi, namun mereka masih "
-"menggunakan nova-api API dan gateway Compute. Akhirnya, aktifkan OpenStack "
-"Networking dengan mengikuti langkah-langkah ini:"
-
-msgid "Attaching router gateways to networks (since Mitaka)."
-msgstr "Pelekatan router gateways untuk jaringan (sejak Mitaka)."
-
-msgid "Attribute name"
-msgstr "Nama atribut"
-
-msgid "Auto Configuration Flag = 1"
-msgstr "Auto Configuration Flag = 1"
-
-msgid ""
-"Automated removal of empty bridges has been disabled to fix a race condition "
-"between the Compute (nova) and Networking (neutron) services. Previously, it "
-"was possible for a bridge to be deleted during the time when the only "
-"instance using it was rebooted."
-msgstr ""
-"Penghapusan secara otomatis jembatan kosong telah dinonaktifkan untuk "
-"memperbaiki kondisi perebuatan (race condition) antara layanan Compute "
-"(nova) dan Networking (neutron). Kondisi sebelumnya, penghapusan jembatan "
-"memungkinkan dilakukan ketika instance dilakukan reboot, dan bila hanya "
-"instance yang sedang menggunakan layanan itu. "
-
-msgid "Automatic allocation of network topologies"
-msgstr "Alokasi otomatis topologi jaringan"
-
-msgid "Availability zone aware scheduler"
-msgstr " Alat penjadwal mengetahui zona ketersediaan"
-
-msgid ""
-"Availability zone is selected from ``default_availability_zones`` in ``/etc/"
-"neutron/neutron.conf`` if a resource is created without "
-"``availability_zone_hints``:"
-msgstr ""
-"ona ketersediaan dipilih dari ``default_availability_zones`` dalam ``/etc/"
-"neutron/neutron.conf`` jika sumber daya dibuat tanpa "
-"``availability_zone_hints``:"
-
-msgid "Availability zone of agents"
-msgstr "Zona ketersediaan agen"
-
-msgid "Availability zone related attributes"
-msgstr "Zona Ketersediaan terkait atribut"
-
-msgid "Availability zones"
-msgstr "Zona ketersediaan"
-
-msgid "BGP dynamic routing"
-msgstr "Border Gateway Protocol (BGP) routing dinamis"
-
-msgid ""
-"BGP dynamic routing advertises prefixes for self-service networks and host "
-"routes for floating IP addresses."
-msgstr ""
-"BGP routing dinamis menyiarkan update dan perubahan awalan untuk jaringan "
-"self-service dan host route alamat IP mengambang."
-
-msgid ""
-"BGP dynamic routing consists of a service plug-in and an agent. The service "
-"plug-in implements the Networking service extension and the agent manages "
-"BGP peering sessions. A cloud administrator creates and configures a BGP "
-"speaker using the CLI or API and manually schedules it to one or more hosts "
-"running the agent. Agents can reside on hosts with or without other "
-"Networking service agents. Prefix advertisement depends on the binding of "
-"external networks to a BGP speaker and the address scope of external and "
-"internal IP address ranges or subnets."
-msgstr ""
-"BGP routing dinamis terdiri dari plug-in layanan dan agen. Plug-in layanan "
-"mengimplementasikan ekstensi layanan Networking dan agen mengelola BGP "
-"peering sessions. Seorang administrator cloud menciptakan dan "
-"mengkonfigurasi pembicara BGP menggunakan CLI atau API dan secara manual "
-"menjadwalkan untuk satu atau lebih host menjalankan agen. Agen dapat berada "
-"pada host dengan atau tanpa agen layanan Networking lainnya. Awalan "
-"penyiaran update dan perubahan tergantung pada pengikatan jaringan eksternal "
-"ke speaker BGP dan ruang lingkup alamat rentang atau subnet alamat IP "
-"eksternal dan internal. "
-
-msgid ""
-"BGP dynamic routing enables advertisement of self-service (private) network "
-"prefixes to physical network devices that support BGP such as routers, thus "
-"removing the conventional dependency on static routes. The feature relies "
-"on :ref:`address scopes ` and requires knowledge of "
-"their operation for proper deployment."
-msgstr ""
-"BGP routing dinamis memungkinkan penyiaran update dan perubahan awalan "
-"jaringan self-service (private) untuk perangkat jaringan fisik yang "
-"mendukung BGP seperti router, sehingga menghilangkan ketergantungan "
-"konvensional pada rute statis. Fitur ini bergantung pada :ref:`address "
-"scopes ` dan membutuhkan pengetahuan tentang operasi "
-"mereka untuk pengerahan yang tepat."
-
-msgid ""
-"BGP dynamic routing supports peering via IPv6 and advertising IPv6 prefixes."
-msgstr ""
-"Routing dinamis BGP mendukung peering (kebersamaan) melalui IPv6 dan "
-"penyiaran update dan perubahan awalan IPv6."
-
-msgid ""
-"BGP dynamic routing supports scheduling a BGP speaker to multiple agents "
-"which effectively multiplies prefix advertisements to the same peer. If an "
-"agent fails, the peer continues to receive advertisements from one or more "
-"operational agents."
-msgstr ""
-"Routing dinamis BGP mendukung penjadwalan BGP speaker ke beberapa agen yang "
-"secara efektif melipatgandakan penyiaran update dan perubahan awalan untuk "
-"rekan yang sama. Jika agen gagal, peer terus menerima penyiaran update dan "
-"perubahan dari satu atau lebih agen operasional."
-
-msgid ""
-"BGP routing can be used to automatically set up a static route for your "
-"instances."
-msgstr ""
-"Border Gateway Protocol (BGP) routing dapat digunakan untuk secara otomatis "
-"mengatur rute statis untuk instance Anda."
-
-msgid "Backwards compatibility"
-msgstr "Kompatibilitas mundur"
-
-msgid "Backwards compatibility with pre-Juno IPv6 behavior."
-msgstr "Kompatibilitas mundur dengan perilaku pra-Juno IPv6."
-
-msgid "Basic networking"
-msgstr "Jaringan dasar"
-
-msgid ""
-"Because a network host may have multiple TCP-based applications running, TCP "
-"uses an addressing scheme called *ports* to uniquely identify TCP-based "
-"applications. A TCP port is associated with a number in the range 1-65535, "
-"and only one application on a host can be associated with a TCP port at a "
-"time, a restriction that is enforced by the operating system."
-msgstr ""
-"Karena host jaringan mungkin memiliki beberapa aplikasi TCP-based yang "
-"sedang berjalan, TCP menggunakan skema pengalamatan yang disebut *port * "
-"untuk mengidentifikasi aplikasi TCP-based secara unik. Sebuah port TCP "
-"dikaitkan dengan angka dalam kisaran 1-65535, dan hanya satu aplikasi pada "
-"host dapat dikaitkan dengan port TCP pada suatu waktu, pembatasan yang "
-"diberlakukan dengan sistem operasi."
-
-msgid ""
-"Because the NAT router modifies ports as well as IP addresses, this form of "
-"SNAT is sometimes referred to as *Port Address Translation* (PAT). It is "
-"also sometimes referred to as *NAT overload*."
-msgstr ""
-"Karena router NAT memodifikasi port serta alamat IP, bentuk SNAT kadang-"
-"kadang disebut sebagai *Port Address Translation* (PAT). Hal ini juga kadang-"
-"kadang disebut sebagai *NAT overload*."
-
-msgid ""
-"Before Kilo, Networking had no automation around the addresses used to "
-"create a subnet. To create one, you had to come up with the addresses on "
-"your own without any help from the system. There are valid use cases for "
-"this but if you are interested in the following capabilities, then subnet "
-"pools might be for you."
-msgstr ""
-"Sebelum Kilo, Networking tidak mempunyai otomatisasi sekitar alamat yang "
-"digunakan untuk membuat subnet. Untuk membuat subnet, Anda harus membawa "
-"alamat Anda sendiri tanpa bantuan dari sistem. Ada use case valid untuk ini, "
-"tetapi jika Anda tertarik pada kemampuan berikut, maka kolam subnet mungkin "
-"benar untuk Anda."
-
-msgid ""
-"Before executing any of the use cases, the user must create in the DNS "
-"service under his project a DNS zone where the A and AAAA records will be "
-"created. For the description of the use cases below, it is assumed the zone "
-"``example.org.`` was created previously."
-msgstr ""
-"Sebelum melaksanakan salah satu use case, pengguna harus membuat dalam "
-"layanan DNS di bawah proyeknya zona DNS dimana catatan A dan AAAA akan "
-"dibuat. Untuk deskripsi use case di bawah, diasumsikan zona ``example.org.`` "
-"diciptakan sebelumnya."
-
-msgid ""
-"Before routed provider networks, the Networking service could not present a "
-"multi-segment layer-3 network as a single entity. Thus, each operator "
-"typically chose one of the following architectures:"
-msgstr ""
-"Sebelum jaringan penyedia yang diarahkan, layanan Networking tidak bisa "
-"menghadirkan jaringan lapisan-3 multi-segment sebagai satu kesatuan. Dengan "
-"demikian, masing-masing operator biasanya memilih salah satu arsitektur "
-"berikut:"
-
-msgid ""
-"Before the end-user can use the auto-allocation feature, the operator must "
-"create the resources that will be used for the auto-allocated network "
-"topology creation. To perform this task, proceed with the following steps:"
-msgstr ""
-"Sebelum end-user dapat menggunakan fitur auto-alokasi, operator harus "
-"menciptakan sumber daya yang akan digunakan untuk pembuatan topologi "
-"jaringan auto-allocated. Untuk melakukan tugas ini, lanjutkan dengan langkah-"
-"langkah berikut:"
-
-msgid ""
-"Begin a continuous ``ping`` of both the floating IPv4 address and IPv6 "
-"address of the instance. While performing the next three steps, you should "
-"see a minimal, if any, interruption of connectivity to the instance."
-msgstr ""
-"Mulailah terus menerus ``ping`` dari alamat IPv4 mengambang dan alamat IPv6 "
-"dari instance. Sambil melakukan tiga langkah selanjutnya, Anda akan melihat "
-"minimal, jika ada, gangguan konektivitas untuk instance."
-
-msgid ""
-"Begin with segments. The Networking service defines a segment using the "
-"following components:"
-msgstr ""
-"Mulailah dengan segmen. Layanan Networking mendefinisikan segmen menggunakan "
-"komponen berikut:"
-
-msgid ""
-"Beginning with Mitaka, a subnet pool can be marked as the default. This is "
-"handled with a new extension."
-msgstr ""
-"Dimulai dengan Mitaka, kolam subnet dapat ditandai sebagai default. Hal ini "
-"ditangani dengan ekstensi baru."
-
-msgid "Binding QoS policies permissions to networks or ports (since Mitaka)."
-msgstr "Pengikatan izin kebijakan QoS ke jaringan atau port (sejak Mitaka)."
-
-msgid ""
-"Boot a VM on ``net2``. Let both DHCP agents host ``net2``. Fail the agents "
-"in turn to see if the VM can still get the desired IP."
-msgstr ""
-"Boot VM pada ``net2``. Biarkan kedua agen DHCP menjadi host ``net2``. Gagal "
-"agen pada gilirannya untuk melihat apakah VM masih bisa mendapatkan IP yang "
-"diinginkan."
-
-msgid "Boot a VM on ``net2``:"
-msgstr "Boot VM pada ``net2``:"
-
-msgid ""
-"Boot an instance or alternatively, create a port specifying a valid value to "
-"its ``dns_name`` attribute. If the port is going to be used for an instance "
-"boot, the value assigned to ``dns_name`` must be equal to the ``hostname`` "
-"that the Compute service will assign to the instance. Otherwise, the boot "
-"will fail."
-msgstr ""
-"Boot instance atau alternatifnya, buatlah port yang menetapkan nilai valid "
-"untuk atribut ``dns_name``. Jika port tersebut akan digunakan untuk boot "
-"instance, nilai yang ditetapkan untuk ``dns_name`` harus sama dengan "
-"``hostname`` dimana layanan Compute akan menetapkan ke instance. Jika "
-"tidak, boot akan gagal."
-
-msgid ""
-"Boot an instance specifying the externally accessible network. "
-"Alternatively, create a port on the externally accessible network specifying "
-"a valid value to its ``dns_name`` attribute. If the port is going to be used "
-"for an instance boot, the value assigned to ``dns_name`` must be equal to "
-"the ``hostname`` that the Compute service will assign to the instance. "
-"Otherwise, the boot will fail."
-msgstr ""
-"Boot instance yang menentukan jaringan accessible secara eksternal. Atau, "
-"buatlah port pada jaringan accessible secara eksternal yang menentukan "
-"nilai yang valid untuk atribut ``dns_name`` nya. Jika port tersebut akan "
-"digunakan untuk instance boot, nilai yang diberikan untuk ``dns_name`` harus "
-"sama dengan ``hostname`` dimana layanan Compute akan menetapkan ke instance. "
-"Jika tidak, boot akan gagal."
-
-msgid "Both DHCP agents host the ``net2`` network."
-msgstr "Kedua agen DHCP menjadi host jaringan ``net2``."
-
-msgid ""
-"Both instances reside on the same compute node to illustrate how VLAN "
-"tagging enables multiple logical layer-2 networks to use the same physical "
-"layer-2 network."
-msgstr ""
-"Kedua instance berada pada node komputasi yang sama untuk menggambarkan "
-"bagaimana VLAN tagging mengaktifkan beberapa jaringan lapisan-2 logis "
-"menggunakan jaringan lapisan-2 fisik yang sama."
-
-msgid ""
-"Both instances reside on the same compute node to illustrate how VXLAN "
-"enables multiple overlays to use the same layer-3 network."
-msgstr ""
-"Kedua instance berada pada node komputasi yang sama untuk menggambarkan "
-"bagaimana VXLAN mengaktifkan beberapa overlays menggunakan jaringanlapisan-3 "
-"yang sama."
-
-msgid ""
-"Both the Linux bridge and the Open vSwitch dataplane modules support "
-"forwarding IPv6 packets amongst the guests and router ports. Similar to "
-"IPv4, there is no special configuration or setup required to enable the "
-"dataplane to properly forward packets from the source to the destination "
-"using IPv6. Note that these dataplanes will forward Link-local Address (LLA) "
-"packets between hosts on the same network just fine without any "
-"participation or setup by OpenStack components after the ports are all "
-"connected and MAC addresses learned."
-msgstr ""
-"Jembatan Linux dan modul dataplane Open vSwitch mendukung penerusan paket "
-"IPv6 di antara guest dan port router. Mirip dengan IPv4, tidak ada "
-"konfigurasi khusus atau pengaturan diperlukan untuk mengaktifkan dataplane "
-"untuk meneruskan paket dari sumber ke tujuan menggunakan IPv6. Perhatikan "
-"bahwa dataplanes ini akan meneruskan paket Link-local Address (LLA) antara "
-"host pada jaringan yang sama saja tanpa partisipasi atau setup oleh komponen "
-"OpenStack setelah port terhubung semua dan alamat MAC dipelajari."
-
-msgid ""
-"Bring up the Networking (l3) nodes. The new routers will have identical MAC"
-"+IPs as old Compute gateways so some sort of immediate cutover is possible, "
-"except for stateful connections issues such as NAT."
-msgstr ""
-"Munculkan Networking (l3) node. Router baru akan memiliki MAC+IP identik "
-"seperti gateway Compute tua dan juga beberapa jenis immediate cutover "
-"memungkinkan, kecuali untuk masalah koneksi stateful seperti NAT."
-
-msgid "Building an LBaaS v2 load balancer"
-msgstr "Membangun penyeimbang beban LBaaS v2"
-
-msgid "Buying guide"
-msgstr "Panduan membeli"
-
-msgid ""
-"By creating subnets from scoped subnet pools, the network is associated with "
-"the address scope."
-msgstr ""
-"Dengan pembuatan subnet dari kolam subnet terlingkup (scoped), jaringan "
-"dikaitkan dengan lingkup alamat."
-
-msgid ""
-"By default, IPv6 subnets on provider networks rely on physical network "
-"infrastructure for stateless address autoconfiguration (SLAAC) and router "
-"advertisement."
-msgstr ""
-"Secara default, subnet IPv6 pada jaringan penyedia bergantung pada "
-"infrastruktur jaringan fisik untuk stateless address autoconfiguration "
-"(SLAAC) dan router advertisement."
-
-msgid ""
-"By default, ``enable_firewall`` option value is ``True`` in ``local_settings."
-"py`` file."
-msgstr ""
-"Secara default, nilai opsi ``enable_firewall`` adalah ``True`` dalam file "
-"``local_settings.py``."
-
-msgid ""
-"By default, all security groups contain a series of basic (sanity) and anti-"
-"spoofing rules that perform the following actions:"
-msgstr ""
-"Secara default, semua kelompok keamanan berisi serangkaian aturan dasar "
-"(sanity) dan anti-spoofing yang melakukan tindakan berikut:"
-
-msgid ""
-"By default, libvirt creates a network named *default*. The details of this "
-"network may vary by distribution; on Ubuntu this network involves:"
-msgstr ""
-"Secara default, libvirt menciptakan jaringan bernama *default*. Rincian "
-"jaringan ini mungkin berbeda dengan distribusi; pada Ubuntu jaringan ini "
-"melibatkan:"
-
-msgid ""
-"By default, libvirt's networking functionality is enabled, and libvirt "
-"creates a network when the system boots. To implement this network, libvirt "
-"leverages some of the same technologies that OpenStack Network does. In "
-"particular, libvirt uses:"
-msgstr ""
-"Secara default, fungsi jaringan libvirt ini diaktifkan, dan libvirt "
-"menciptakan jaringan saat booting. Untuk mengimplementasikan jaringan ini, "
-"libvirt memanfaatkan beberapa teknologi yang sama dimana OpenStack Network "
-"menggunakannya. Secara khusus, libvirt menggunakan:"
-
-msgid ""
-"By default, the VXLAN protocol lacks knowledge of target location and uses "
-"multicast to discover it. After discovery, it stores the location in the "
-"local forwarding database. In large deployments, the discovery process can "
-"generate a significant amount of network that all nodes must process. To "
-"eliminate the latter and generally increase efficiency, the Networking "
-"service includes the layer-2 population mechanism driver that automatically "
-"populates the forwarding database for VXLAN interfaces. The example "
-"configuration enables this driver. For more information, see :ref:`config-"
-"plugin-ml2`."
-msgstr ""
-"Secara default, protokol VXLAN tidak memiliki pengetahuan tentang lokasi "
-"target dan menggunakan multicast untuk menemukan itu. Setelah ada penemuan, "
-"protokol menyimpan lokasi di database forwarding lokal. Dalam pengerahan "
-"besar, proses penemuan dapat menghasilkan sejumlah besar jaringan dimana "
-"semua node harus memproses. Untuk menghilangkan terakhir ini dan umumnya "
-"meningkatkan efisiensi, layanan Networking meliputi layer-2 population "
-"mechanism driver yang secara otomatis mengisi database forwarding untuk "
-"antarmuka VXLAN. Contoh konfigurasi mengaktifkan driver ini. Untuk informasi "
-"lebih lanjut, lihat: ref: `config-plugin-ml2`."
-
-msgid "CLI support"
-msgstr "Dukungan CLI (command-line interface)"
-
-msgid ""
-"Can be used for instance network attachments as well as for attachments of "
-"other network resources like routers, DHCP, and so on."
-msgstr ""
-"Dapat digunakan untuk attachment jaringan instance serta untuk attachment "
-"dari sumber daya jaringan lain seperti router, DHCP, dan sebagainya."
-
-msgid ""
-"Can only be used for instance network attachments (device_owner = compute) "
-"and not for attachment of other resources like routers, DHCP, and so on."
-msgstr ""
-"Hanya dapat digunakan untuk attachment jaringan instance (device_owner = "
-"compute) dan bukan untuk attachment dari sumber lain seperti router, DHCP, "
-"dan sebagainya."
-
-msgid ""
-"Can only be used for instance network attachments (device_owner = compute)."
-msgstr ""
-"Hanya dapat digunakan untuk attachment jaringan instance (device_owner = "
-"compute)."
-
-msgid "Case 1"
-msgstr "Case 1"
-
-msgid "Case 1: Each virtual network uses unique DNS resolver(s)"
-msgstr "Case 1: Setiap jaringan virtual menggunakan DNS resolver unik"
-
-msgid "Case 2"
-msgstr "Case 2"
-
-msgid "Case 2: All virtual networks use same DNS resolver(s)"
-msgstr "Case 2: Semua jaringan virtual menggunakan DNS resolver yang sama"
-
-msgid "Case 3"
-msgstr "Case 3"
-
-msgid "Case 3: All virtual networks use DNS resolver(s) on the host"
-msgstr "Case 3: Semua jaringan virtual menggunakan DNS resolver pada host"
-
-msgid ""
-"Check that ``ebrctl`` is listed somewhere in ``/etc/nova/rootwrap.d/*``:"
-msgstr ""
-"Cek bahwa ``ebrctl`` terdaftar di suatu tempat di ``/etc/nova/rootwrap.d/*``:"
-
-msgid ""
-"Check the instance status. The ``Networks`` field contains an IP address "
-"from the subnet having the ``compute:nova`` service type."
-msgstr ""
-"Periksa status instance. Field ``Networks`` berisi alamat IP dari subnet "
-"memiliki tipe layanan ``compute:nova``"
-
-msgid "Checking connectivity"
-msgstr "Pemeriksaan konektivitas"
-
-msgid "Clear tags from a resource:"
-msgstr "Hapus tag dari sumber daya:"
-
-msgid "Client and server exchange data."
-msgstr "Klien dan pertukaran data server."
-
-msgid "Client connects to server."
-msgstr "Klien terhubung ke server."
-
-msgid "Client or server disconnects."
-msgstr "Klien atau server terputus."
-
-msgid ""
-"Clone the `neutron-lbaas-dashboard repository `__ and check out the release branch "
-"that matches the installed version of Dashboard:"
-msgstr ""
-"Lakukan cloning `neutron-lbaas-dashboard repository `__ dan periksa cabang rilis "
-"yang cocok dengan versi Dashboard terpasang:"
-
-msgid "Compatibility notes"
-msgstr "Catatan kompatibilitas"
-
-msgid "Complex queries may have contradictory parameters. Example::"
-msgstr "Query yang kompleks mungkin memiliki parameter bertentangan. Contoh::"
-
-msgid ""
-"Compute needs a per-hypervisor \"has_transitioned\" boolean change in the "
-"data model to be used during the migration process. This flag is no longer "
-"required once the process is complete."
-msgstr ""
-"Komputasi kebutuhan per-hypervisor \"has_transitioned\" perubahan boolean "
-"dalam model data yang akan digunakan selama proses migrasi. Flag ini tidak "
-"lagi diperlukan setelah proses selesai."
-
-msgid "Compute node 1:"
-msgstr "Compute node 1 (node komputasi 1):"
-
-msgid "Compute node 2:"
-msgstr "Compute node 2 (node komputasi 2):"
-
-msgid "Compute nodes"
-msgstr "Compute nodes (simpul komputasi)"
-
-msgid "Compute service API: 2.41"
-msgstr "Layanan Compute API: 2.41"
-
-msgid ""
-"Compute: Contains the hypervisor component of the OpenStack Compute service "
-"and the OpenStack Networking layer-2, DHCP, and metadata components. High-"
-"availability options may include additional components."
-msgstr ""
-"Compute: Mengandung komponen hypervisor layanan OpenStack Compute dan "
-"OpenStack Networking layer-2, DHCP, dan komponen metadata. Opsi ketersediaan "
-"tinggi (high-availability) mungkin termasuk komponen tambahan."
-
-msgid "Concepts"
-msgstr "Konsep"
-
-msgid ""
-"Conceptually, you can think of an Ethernet network as a single bus that each "
-"of the network hosts connects to. In early implementations, an Ethernet "
-"network consisted of a single coaxial cable that hosts would tap into to "
-"connect to the network. However, network hosts in modern Ethernet networks "
-"connect directly to a network device called a *switch*. Still, this "
-"conceptual model is useful, and in network diagrams (including those "
-"generated by the OpenStack dashboard) an Ethernet network is often depicted "
-"as if it was a single bus. You'll sometimes hear an Ethernet network "
-"referred to as a *layer 2 segment*."
-msgstr ""
-"Secara konseptual, Anda bisa memikirkan jaringan Ethernet sebagai bus "
-"tunggal dimana masing-masing host jaringan menghubungkannya. Dalam "
-"implementasi awal, jaringan Ethernet terdiri dari kabel koaksial tunggal "
-"dimana host akan memanfaatkan hubungan ke jaringan. Namun, host jaringan "
-"dalam jaringan Ethernet modern yang terhubung langsung ke perangkat jaringan "
-"yang disebut *switch*. Namun, model konseptual ini berguna, dan dalam "
-"diagram jaringan (termasuk jaringan yang dihasilkan oleh dashboard "
-"OpenStack), jaringan Ethernet sering digambarkan seolah-olah itu adalah bus "
-"tunggal. Kadang-kadang Anda akan mendengar bahwa jaringan Ethernet disebut "
-"sebagai *layer 2 segment*."
-
-msgid "Configuration"
-msgstr "Konfigurasi"
-
-msgid "Configuration example"
-msgstr "Contoh konfigurasi"
-
-msgid ""
-"Configuration for the DHCP agent is typically done in the ``dhcp_agent.ini`` "
-"configuration file. Make sure that on agent start you pass this "
-"configuration file as argument."
-msgstr ""
-"Konfigurasi untuk agen DHCP biasanya dilakukan dalam file konfigurasi "
-"``dhcp_agent.ini``. Pastikan bahwa pada agent start Anda melewati file "
-"konfigurasi ini sebagai argumen."
-
-msgid ""
-"Configuration for the L3 agent is typically done in the ``l3_agent.ini`` "
-"configuration file. Make sure that on agent start you pass this "
-"configuration file as argument."
-msgstr ""
-"Konfigurasi untuk agen L3 biasanya dilakukan dalam file konfigurasi "
-"``l3_agent.ini``. Pastikan bahwa pada agent start Anda melewati file "
-"konfigurasi ini sebagai argumen."
-
-msgid ""
-"Configuration for the L3 metering agent is typically done in the "
-"``metering_agent.ini`` configuration file. Make sure that on agent start you "
-"pass this configuration file as argument."
-msgstr ""
-"Konfigurasi untuk agen metering L3 biasanya dilakukan di file konfigurasi "
-"``metering_agent.ini``. Pastikan bahwa pada agent start Anda melewati file "
-"konfigurasi ini sebagai argumen."
-
-msgid ""
-"Configuration for the Linux bridge agent is typically done in the "
-"``linuxbridge_agent.ini`` configuration file. Make sure that on agent start "
-"you pass this configuration file as argument."
-msgstr ""
-"Konfigurasi untuk agen jembatan Linux biasanya dilakukan di file konfigurasi "
-"``linuxbridge_agent.ini``. Pastikan bahwa pada agent starti Anda melewati "
-"file konfigurasi ini sebagai argumen."
-
-msgid ""
-"Configuration for the MacVTap agent is typically done in the ``macvtap_agent."
-"ini`` configuration file. Make sure that on agent start you pass this "
-"configuration file as argument."
-msgstr ""
-"Konfigurasi untuk agen MacVTap biasanya dilakukan di file konfigurasi "
-"``macvtap_agent.ini``. Pastikan bahwa pada agent start Anda melewati file "
-"konfigurasi ini sebagai argumen."
-
-msgid ""
-"Configuration for the Metadata agent is typically done in the "
-"``metadata_agent.ini`` configuration file. Make sure that on agent start you "
-"pass this configuration file as argument."
-msgstr ""
-"Konfigurasi untuk agen Metadata biasanya dilakukan di file konfigurasi "
-"``metadata_agent.ini`` . Pastikan bahwa pada agent start Anda melewati file "
-"konfigurasi ini sebagai argumen."
-
-msgid ""
-"Configuration for the Open vSwitch agent is typically done in the "
-"``openvswitch_agent.ini`` configuration file. Make sure that on agent start "
-"you pass this configuration file as argument."
-msgstr ""
-"Konfigurasi untuk agen Open vSwitch biasanya dilakukan di file konfigurasi "
-"``openvswitch_agent.ini`` . Pastikan bahwa pada agent start Anda melewati "
-"file konfigurasi ini sebagai argumen."
-
-msgid ""
-"Configuration for the SRIOV nic switch agent is typically done in the "
-"``sriov_agent.ini`` configuration file. Make sure that on agent start you "
-"pass this configuration file as argument."
-msgstr ""
-"Konfigurasi untuk SRIOV nic switch agen biasanya dilakukan dalam file "
-"konfigurasi ``sriov_agent.ini``. Pastikan bahwa pada agent start Anda "
-"melewati file konfigurasi ini sebagai argumen."
-
-msgid "Configuration of the externally accessible network for use case 1"
-msgstr ""
-"Konfigurasi jaringan yang dapat diakses secara eksternal untuk use case 1"
-
-msgid "Configuration of those drivers is not part of this document."
-msgstr "Konfigurasi driver ini bukan bagian dari dokumen ini."
-
-msgid "Configurations"
-msgstr "Configurations"
-
-msgid "Configure Firewall-as-a-Service v1"
-msgstr "Lakukan konfigurasi Firewall-as-a-Service v1"
-
-msgid "Configure Firewall-as-a-Service v2"
-msgstr "Lakukan konfigurasi Firewall-as-a-Service v2"
-
-msgid "Configure a DNS resolver on an existing subnet."
-msgstr "Lakukan konfigurasi DNS resolver pada subnet yang ada."
-
-msgid "Configure a DNS resolver when creating a subnet."
-msgstr "Lakukan konfigurasi DNS resolver saat membuat subnet."
-
-msgid ""
-"Configure an external DNS driver. The Networking service provides a driver "
-"reference implementation based on the OpenStack DNS service. It is expected "
-"that third party vendors will provide other implementations in the future. "
-"For detailed configuration instructions, see :ref:`config-dns-int-ext-serv`."
-msgstr ""
-"Lakukan konfigurasi driver DNS eksternal. Layanan Networking menyediakan "
-"implementasi referensi driver berdasarkan layanan DNS OpenStack. Diharapkan "
-"vendor pihak ketiga akan menyediakan implementasi lain di masa depan. Untuk "
-"petunjuk konfigurasi rinci, lihat :ref: `config-dns-int-ext-serv`."
-
-msgid "Configure common options:"
-msgstr "Mengkonfigurasi opsi umum:"
-
-msgid ""
-"Configure communication of the Networking service with the Compute scheduler."
-msgstr ""
-"Lakukan konfigurasi komunikasi layanan Networking dengan Compute scheduler."
-
-msgid "Configure drivers and network types:"
-msgstr "Mengkonfigurasi driver dan tipe jaringan:"
-
-msgid "Configure network mappings."
-msgstr "Mengkonfigurasi pemetaan jaringan."
-
-msgid "Configure network mappings:"
-msgstr "Mengkonfigurasi pemetaan jaringan:"
-
-msgid "Configure neutron-server (Controller)"
-msgstr "Lakukan konfigurasi neutron-server (Controller)"
-
-msgid "Configure nova-scheduler (Controller)"
-msgstr "Lakukan konfigurasi nova-scheduler (Controller)"
-
-msgid "Configure the FWaaS plugin for the L3 agent."
-msgstr "Lakukan konfigurasi agen FWaaS plugin for the L3."
-
-msgid ""
-"Configure the L3 agent. Add the following to ``/etc/neutron/l3_agent.ini``:"
-msgstr ""
-"Lakukan konfigurasi agen L3. Tambahkan hal berikut ke ``/etc/neutron/"
-"l3_agent.ini``:"
-
-msgid ""
-"Configure the Open vSwitch agent. Add the following to ``/etc/neutron/"
-"plugins/ml2/ml2_conf.ini``:"
-msgstr ""
-"Lakukan konfigurasi agen Open vSwitch. Tambahkan hal berikut ke ``/etc/"
-"neutron/plugins/ml2/ml2_conf.ini``:"
-
-msgid "Configure the VXLAN network ID (VNI) range."
-msgstr "Lakukan konfigurasi kisaran VXLAN network ID (VNI)."
-
-msgid "Configure the driver."
-msgstr "Mengkonfigurasi driver."
-
-msgid ""
-"Configure the layer-2 agent on each node to map one or more segments to the "
-"appropriate physical network bridge or interface and restart the agent."
-msgstr ""
-"Mengkonfigurasi agen lapisan-2 pada setiap node untuk memetakan satu atau "
-"lebih segmen pada jembatan jaringan fisik yang sesuai atau antarmuka dan me-"
-"restart agen."
-
-msgid "Configure the router ID."
-msgstr "Konfigurasi ID router."
-
-msgid ""
-"Configure which PCI devices the ``nova-compute`` service may use. Edit the "
-"``nova.conf`` file:"
-msgstr ""
-"Lakukan konfigurasi dimana perangkat PCI layanan ``nova-compute`` dapat "
-"menggunakannya. Edit file ``nova.conf``:"
-
-msgid "Configured in the *L2 agent* configuration."
-msgstr "Dikonfigurasi dalam konfigurasi *L2 agent*."
-
-msgid "Configuring LBaaS v2 with Octavia"
-msgstr "Konfigurasi LBaaS v2 dengan Octavia"
-
-msgid "Configuring LBaaS v2 with an agent"
-msgstr "Konfigurasi LBaaS v2 dengan agen"
-
-msgid ""
-"Configuring OpenStack Networking for integration with an external DNS service"
-msgstr ""
-"Lakukan konfigurasi OpenStack Networking untuk integrasi dengan layanan DNS "
-"eksternal"
-
-msgid "Configuring OpenStack Networking for prefix delegation"
-msgstr ""
-"Configuring OpenStack Networking untuk prefix delegation (penyerahan awalan)"
-
-msgid "Configuring interfaces of the guest"
-msgstr "Konfigurasi interface (antarmuka) dari guest"
-
-msgid "Configuring the Dibbler server"
-msgstr "Malakukan konfigurasi server Dibbler"
-
-msgid ""
-"Configuring the proper burst value is very important. If the burst value is "
-"set too low, bandwidth usage will be throttled even with a proper bandwidth "
-"limit setting. This issue is discussed in various documentation sources, for "
-"example in `Juniper's documentation `_. Burst value for TCP traffic can be set as 80% of desired bandwidth "
-"limit value. For example, if the bandwidth limit is set to 1000kbps then "
-"enough burst value will be 800kbit. If the configured burst value is too "
-"low, achieved bandwidth limit will be lower than expected. If the configured "
-"burst value is too high, too few packets could be limited and achieved "
-"bandwidth limit would be higher than expected."
-msgstr ""
-"Konfigurasi burst value yang tepat sangat penting. Jika burst value diatur "
-"terlalu rendah, penggunaan bandwidth akan tercekik bahkan dengan pengaturan "
-"batas bandwidth yang tepat sekalipun. Masalah ini dibahas dalam berbagai "
-"sumber dokumentasi, misalnya di `Juniper's documentation `_. Burst value untuk lalu lintas TCP dapat "
-"ditetapkan sebagai 80% dari nilai yang diinginkan batas bandwidth. Sebagai "
-"contoh, jika batas bandwidth diatur ke 1000kbps maka burst value yang cukup "
-"akan menjadi 800kbit. Jika burst value dikonfigurasi terlalu rendah, batas "
-"bandwidth yang dicapai akan lebih rendah dari yang diharapkan. Jika burst "
-"value dikonfigurasi terlalu tinggi, terlalu sedikit paket bisa dibatasi dan "
-"batas bandwidth mencapai akan lebih tinggi dari yang diharapkan."
-
-msgid ""
-"Connect a router to each of the project subnets that have been created, for "
-"example, using a router called ``router1``:"
-msgstr ""
-"Menghubungkan router untuk masing-masing subnet proyek yang telah dibuat, "
-"misalnya, menggunakan router disebut ``router1``:"
-
-msgid "Connection to the OpenStack APIs via an IPv6 transport network"
-msgstr "Koneksi ke API OpenStack melalui jaringan transportasi IPv6"
-
-msgid ""
-"Consider adding at least one additional network node to provide high-"
-"availability for instances with a fixed IP address. See See :ref:`config-dvr-"
-"snat-ha-ovs` for more information."
-msgstr ""
-"Pertimbangkan untuk menambahkan setidaknya satu node jaringan tambahan untuk "
-"menyediakan ketersediaan tinggi untuk instance dengan alamat IP tetap. Lihat "
-"lihat :ref: `config-dvr-SNAT-ha-ovs` untuk informasi lebih lanjut."
-
-msgid ""
-"Consider the following attributes of this high-availability mechanism to "
-"determine practicality in your environment:"
-msgstr ""
-"Pertimbangkan atribut berikut mekanisme ketersediaan tinggi ini untuk "
-"menentukan kepraktisan di lingkungan Anda:"
-
-msgid ""
-"Consider the following attributes of this mechanism driver to determine "
-"practicality in your environment:"
-msgstr ""
-"Pertimbangkan atribut berikut dari driver mekanisme ini untuk menentukan "
-"kepraktisan di lingkungan Anda:"
-
-msgid "Contents"
-msgstr "Contents (isi)"
-
-msgid "Controller node"
-msgstr "Controller node"
-
-msgid "Controller node configuration"
-msgstr "Konfigurasi controller node"
-
-msgid ""
-"Controller: Contains control plane components of OpenStack services and "
-"their dependencies."
-msgstr ""
-"Controller: Berisi control plane component dari layanan OpenStack dan "
-"dependensinya."
-
-msgid ""
-"Coordination between the Networking service and the Compute scheduler is not "
-"necessary for IPv6 subnets as a consequence of their large address spaces."
-msgstr ""
-"Koordinasi antara layanan Networking dan Compute scheduler tidak diperlukan "
-"untuk subnet IPv6 sebagai konsekuensi dari ruang alamat (address space) yang "
-"besar."
-
-msgid ""
-"Copy the ``_1481_project_ng_loadbalancersv2_panel.py`` file from the "
-"``neutron-lbaas-dashboard/enabled`` directory into the Dashboard "
-"``openstack_dashboard/local/enabled`` directory."
-msgstr ""
-"Salin file ``_1481_project_ng_loadbalancersv2_panel.py`` dari direktori "
-"``neutron-lbaas-dashboard/enabled`` ke dalam direktori Dashboard "
-"``openstack_dashboard/local/enabled``."
-
-msgid "Create IPv6 and IPv4 address scopes:"
-msgstr "Buat lingkup alamat IPv6 dan IPv4:"
-
-msgid "Create Virtual Functions (Compute)"
-msgstr "BuatlahVirtual Functions (Compute)"
-
-msgid "Create a BGP peer."
-msgstr "Buat rekan BGP."
-
-msgid "Create a IPv4 subnet on the provider network."
-msgstr "Buat subnet IPv4 pada jaringan provider."
-
-msgid "Create a IPv4 subnet on the self-service network."
-msgstr "Buat subnet IPv4 pada jaringan self-service."
-
-msgid "Create a IPv6 subnet on the provider network."
-msgstr "Buat subnet IPv6 pada jaringan provider."
-
-msgid "Create a IPv6 subnet on the self-service network."
-msgstr "Buat subnet IPv6 pada jaringan self-service."
-
-msgid "Create a QoS policy to share:"
-msgstr "Buat kebijakan QoS untuk berbagi:"
-
-msgid ""
-"Create a VLAN provider network which includes a default segment. In this "
-"example, the network uses the ``provider1`` physical network with VLAN ID "
-"2016."
-msgstr ""
-"Membuat jaringan penyedia VLAN yang meliputi segmen default. Dalam contoh "
-"ini, jaringan menggunakan jaringan fisik ``provider1`` dengan VLAN ID 2016."
-
-msgid "Create a couple of networks to host subnets:"
-msgstr "Buat beberapa jaringan untuk subnet host:"
-
-msgid "Create a firewall policy:"
-msgstr "Membuat kebijakan firewall:"
-
-msgid "Create a firewall rule:"
-msgstr "Buat aturan firewall:"
-
-msgid "Create a firewall:"
-msgstr "Buat firewall:"
-
-msgid "Create a flat network."
-msgstr "Buat jaringan datar (flat network)"
-
-msgid "Create a floating IP and associate it to the port."
-msgstr "Buat IP mengambang dan mengasosiasikannya ke port."
-
-msgid "Create a floating IPv4 address on the provider network."
-msgstr "Buat alamat IPv4 mengambang di jaringan provider."
-
-msgid "Create a network"
-msgstr "Membuat network"
-
-msgid "Create a network that you want to be available as an external network:"
-msgstr ""
-"Buat jaringan dimana Anda menginginkan tersedia sebagai jaringan eksternal:"
-
-msgid "Create a network to share:"
-msgstr "Membuat jaringan untuk berbagi:"
-
-msgid "Create a network."
-msgstr "Membuat jaringan."
-
-msgid "Create a parent port for the trunk."
-msgstr "Buat port induk untuk trunk."
-
-msgid ""
-"Create a policy entry using the :command:`openstack network rbac create` "
-"command (in this example, the ID of the project we want to share with is "
-"``838030a7bf3c4d04b4b054c0f0b2b17c``):"
-msgstr ""
-"Buat entri kebijakan menggunakan perintah :command:`openstack network rbac "
-"create` (dalam contoh ini, ID proyek dimana kami ingin berbagi dengan adalah "
-"``838030a7bf3c4d04b4b054c0f0b2b17c``):"
-
-msgid "Create a port chain"
-msgstr "Create a port chain (pembuatan rantai port)"
-
-msgid "Create a routed provider network"
-msgstr "Membuat jaringan penyedia dialihkan (routed)"
-
-msgid "Create a router"
-msgstr "Membuat router"
-
-msgid "Create a router."
-msgstr "Buat router."
-
-msgid "Create a router:"
-msgstr "Buat router:"
-
-msgid ""
-"Create a second segment on the provider network. In this example, the "
-"segment uses the ``provider2`` physical network with VLAN ID 2016."
-msgstr ""
-"Membuat segmen kedua pada jaringan operator. Dalam contoh ini, segmen "
-"menggunakan jaringan fisik ``provider2`` dengan VLAN ID 2016."
-
-msgid ""
-"Create a security group and rules to allow TCP port 80, TCP port 443, and "
-"all ICMP traffic:"
-msgstr ""
-"Buat grup keamanan dan aturan untuk memungkinkan TCP port 80, TCP port 443, "
-"dan semua lalu lintas ICMP:"
-
-msgid "Create a self-service network."
-msgstr "Buat jaringan self-service."
-
-msgid "Create a subnet"
-msgstr "Membuat subnet"
-
-msgid "Create a subnet not associated with a subnet pool or an address scope:"
-msgstr ""
-"Buat subnet yang tidak terkait dengan kolam subnet atau lingkup alamat:"
-
-msgid ""
-"Create a subnet on the external network for all other IP addresses allocated "
-"on the external network. This will not use any service type. It acts as a "
-"fall back for allocations that do not match either of the above two service "
-"subnets."
-msgstr ""
-"Buat subnet pada jaringan eksternal untuk semua alamat IP lainnya yang "
-"dialokasikan pada jaringan eksternal. Subnet tidak akan menggunakan tipe "
-"layanan. Subnet bertindak minder (fall back) ke alokasi yang tidak sesuai "
-"dengan salah satu dari dua subnet layanan di atas."
-
-msgid ""
-"Create a subnet on the external network for the floating IP agent gateway IP "
-"addresses, which are configured by DVR on compute nodes. This will use the "
-"``network:floatingip_agent_gateway`` service type."
-msgstr ""
-"Buat subnet pada jaringan eksternal untuk floating IP agent gateway IP "
-"addresses, yang dikonfigurasi dengan DVR pada node komputasi. Ini akan "
-"menggunakan tipe layanan ``network:floatingip_agent_gateway``."
-
-msgid ""
-"Create a subnet on the external network for the instance floating IP "
-"addresses. This uses the ``network:floatingip`` service type."
-msgstr ""
-"Buat subnet pada jaringan eksternal untuk alamat IP mengambang instance. Ini "
-"menggunakan tipe layanan ``network:floatingip``. "
-
-msgid ""
-"Create a subnet on the first two self-service networks using an IP address "
-"range from the self-service subnet pool."
-msgstr ""
-"Buat subnet pada jaringan self-service dua pertama menggunakan kisaran "
-"alamat IP dari kolam subnet self-service."
-
-msgid ""
-"Create a subnet on the last self-service network using an IP address range "
-"outside of the address scope."
-msgstr ""
-"Buat subnet pada jaringan self-service terakhir menggunakan kisaran alamat "
-"IP di luar lingkup alamat."
-
-msgid ""
-"Create a subnet on the network with one or more service types. For example, "
-"the ``compute:nova`` service type enables instances to use this subnet."
-msgstr ""
-"Buat subnet pada jaringan dengan satu atau lebih tipe layanan. Sebagai "
-"contoh, tipe layanan ``compute:nova`` mengaktifkan instance untuk "
-"menggunakan subnet ini."
-
-msgid ""
-"Create a subnet on the provider network using an IP address range from the "
-"provider subnet pool."
-msgstr ""
-"Buat subnet pada jaringan provider menggunakan kisaran alamat IP dari kolam "
-"subnet provider."
-
-msgid ""
-"Create a subnet using a subnet pool associated with an address scope from an "
-"external network:"
-msgstr ""
-"Buat subnet menggunakan kolam subnet yang terkait dengan lingkup alamat dari "
-"jaringan eksternal:"
-
-msgid ""
-"Create an address scope. The provider (external) and self-service networks "
-"must belong to the same address scope for the agent to advertise those self-"
-"service network prefixes."
-msgstr ""
-"Buat lingkup alamat. Jaringan provider (eksternal) dan self-service harus "
-"termasuk dalam ruang lingkup alamat yang sama untuk agen untuk penyiaran "
-"update dan perubahan awalan jaringan self-service mereka."
-
-msgid "Create an external network:"
-msgstr "Buat jaringan eksternal:"
-
-msgid ""
-"Create and add ``vhost-user`` network interfaces to instances in the same "
-"fashion as conventional interfaces. These interfaces can use the kernel "
-"``virtio-net`` driver or a DPDK-compatible driver in the guest"
-msgstr ""
-"Buat dan tambahkan antarmuka jaringan ``vhost-user`` untuk instance dengan "
-"cara yang sama sebagai antarmuka konvensional. Antarmuka ini dapat "
-"menggunakan driver ``virtio-net`` kernel atau driver DPDK-compatible di "
-"guest"
-
-msgid "Create and configure the BGP speaker"
-msgstr "Membuat dan mengkonfigurasi BGP speaker"
-
-msgid "Create and configure the routers"
-msgstr "Membuat dan mengkonfigurasi router"
-
-msgid "Create default subnetpools"
-msgstr "Buat subnetpools bawaan"
-
-msgid ""
-"Create flow classifier ``FC1`` that matches the appropriate packet headers."
-msgstr ""
-"Buat klassifier aliran ``FC1`` yang cocok dengan header paket yang sesuai."
-
-msgid "Create initial networks"
-msgstr "Menciptakan jaringan awal"
-
-msgid "Create networks and subnets for the trunk and subports"
-msgstr "Buat jaringan dan subnet untuk trunk dan subports"
-
-msgid ""
-"Create port chain ``PC1`` with port pair groups ``PPG1`` and ``PPG2`` and "
-"flow classifier ``FC1``."
-msgstr ""
-"Buat rantai port ``PC1`` dengan kelompok pasangan port ``PPG1`` dan `` "
-"PPG2`` dan mengalir klassifier aliran ``FC1``."
-
-msgid ""
-"Create port pair ``PP1`` with ports ``p1`` and ``p2``, ``PP2`` with ports "
-"``p3`` and ``p4``, and ``PP3`` with ports ``p5`` and ``p6``."
-msgstr ""
-"Buat pasangan port ``PP1`` dengan port ``p1`` dan ``p2``, ``PP2`` dengan "
-"port ``p3`` dan ``p4``, dan ``PP3`` dengan port ``p5`` dan ``p6``."
-
-msgid ""
-"Create port pair group ``PPG1`` with port pair ``PP1`` and ``PP2`` and "
-"``PPG2`` with port pair ``PP3``."
-msgstr ""
-"Buat grup pasangan port ``PPG1`` dengan pasangan port ``PP1`` dan ``PP2`` "
-"dan ``PPG2`` dengan pasangan port ``PP3``."
-
-msgid "Create ports on network ``net1`` and record the UUID values."
-msgstr "Buat port pada jaringan ``net1`` dan merekam nilai UUID."
-
-msgid "Create shared address scopes as an administrative user"
-msgstr "Buat lingkup alamat bersama sebagai pengguna administratif"
-
-msgid ""
-"Create subnet pools specifying the name (or UUID) of the address scope that "
-"the subnet pool belongs to. If you have existing subnet pools, use the :"
-"command:`openstack subnet pool set` command to put them in a new address "
-"scope:"
-msgstr ""
-"Buat kolam subnet yang menetapkan nama (atau UUID) dari lingkup alamat "
-"(address scope) dimana kolam subnet memilikinya. Jika Anda telah ada kolam "
-"subnet, gunakan perintah :command:`openstack subnet pool set` untuk "
-"menempatkan mereka dalam lingkup alamat baru:"
-
-msgid ""
-"Create subnet pools. The provider and self-service networks use different "
-"pools."
-msgstr ""
-"Membuat kolam subnet. Jaringan provider dan self-service menggunakan kolam "
-"yang berbeda."
-
-msgid ""
-"Create subnets on the ``segment1`` segment. In this example, the IPv4 subnet "
-"uses 203.0.113.0/24 and the IPv6 subnet uses fd00:203:0:113::/64."
-msgstr ""
-"Buat subnet pada segmen ``segment1``. Dalam contoh ini, subnet IPv4 "
-"menggunakan 203.0.113.0/24 dan IPv6 menggunakan subnet fd00:203:0:113::/64."
-
-msgid ""
-"Create subnets on the ``segment2`` segment. In this example, the IPv4 subnet "
-"uses 198.51.100.0/24 and the IPv6 subnet uses fd00:198:51:100::/64."
-msgstr ""
-"Buat subnet pada segmen ``segment2``. Dalam contoh ini, subnet IPv4 "
-"menggunakan 198.51.100.0/24 dan IPv6 menggunakan subnet "
-"fd00:198:51:100::/64."
-
-msgid "Create the BGP speaker."
-msgstr "Buat BGP speaker."
-
-msgid "Create the OVS provider bridge ``br-provider``:"
-msgstr "Buat OVS provider bridge ``br-provider``:"
-
-msgid ""
-"Create the RBAC policy entry using the :command:`openstack network rbac "
-"create` command (in this example, the ID of the project we want to share "
-"with is ``be98b82f8fdf46b696e9e01cebc33fd9``):"
-msgstr ""
-"Buat entri kebijakan RBAC menggunakan perintah :command:`openstack network "
-"rbac create` (dalam contoh ini, ID proyek dimana kami ingin berbagi dengan "
-"adalah ``be98b82f8fdf46b696e9e01cebc33fd9``):"
-
-msgid ""
-"Create the SR-IOV port. ``vnic_type=direct`` is used here, but other options "
-"include ``normal``, ``direct-physical``, and ``macvtap``:"
-msgstr ""
-"Buat port SR-IOV. ``vnic_type=direct`` digunakan di sini, tapi pilihan lain "
-"termasuk ``normal``, ``direct-physical``, dan ``macvtap``:"
-
-msgid ""
-"Create the VFs for the network interface that will be used for SR-IOV. We "
-"use ``eth3`` as PF, which is also used as the interface for the VLAN "
-"provider network and has access to the private networks of all machines."
-msgstr ""
-"Buat VF untuk antarmuka jaringan yang akan digunakan untuk SR-IOV. Kami "
-"menggunakan ``eth3`` sebagai PF, yang juga digunakan sebagai antarmuka untuk "
-"jaringan penyedia VLAN dan memiliki akses ke jaringan private dari semua "
-"mesin."
-
-msgid "Create the address scope and subnet pools"
-msgstr "Buat lingkup alamat dan kolam subnet"
-
-msgid ""
-"Create the appropriate networks for the trunk and subports that will be "
-"added to the trunk. Create subnets on these networks to ensure the desired "
-"layer-3 connectivity over the trunk."
-msgstr ""
-"Buat jaringan yang tepat untuk trunk dan subports yang akan ditambahkan ke "
-"bagasi. Buat subnet pada jaringan ini untuk memastikan konektivitas "
-"lapisan-3 yang diinginkan di atas trunk."
-
-msgid ""
-"Create the appropriate security group rules to allow ``ping`` and SSH access "
-"instances using the network."
-msgstr ""
-"Buat aturan kelompok keamanan yang sesuai untuk mengizinkan ``ping`` dan "
-"akses SSH instance menggunakan jaringan."
-
-msgid ""
-"Create the firewall rules and create a policy that contains them. Then, "
-"create a firewall that applies the policy."
-msgstr ""
-"Buat aturan firewall dan buat kebijakan yang berisi mereka. Kemudian, buat "
-"firewall yang memberlakukan kebijakan."
-
-msgid ""
-"Create the instance. Specify the SR-IOV port created in step two for the NIC:"
-msgstr ""
-"Buat instance. Tentukan port SR-IOV yang akan dibuat pada dua langkah untuk "
-"NIC:"
-
-msgid ""
-"Create the policy entry using the :command:`openstack network rbac create` "
-"command (in this example, the ID of the project we want to share with is "
-"``b87b2fc13e0248a4a031d38e06dc191d``):"
-msgstr ""
-"Buat entri kebijakan menggunakan perintah :command:`openstack network rbac "
-"create` (Dalam contoh ini, ID proyek dimana kami ingin berbagi dengan adalah "
-"``b87b2fc13e0248a4a031d38e06dc191d``):"
-
-msgid "Create the provider and self-service networks"
-msgstr "Buat jaringan provider dan self-service"
-
-msgid "Create the provider network pool."
-msgstr "Buat kolam jaringan provider."
-
-msgid "Create the provider network."
-msgstr "Create jaringan provider."
-
-msgid "Create the required tables in the database:"
-msgstr "Buat tabel yang diperlukan dalam database:"
-
-msgid "Create the routers."
-msgstr "Buat router."
-
-msgid "Create the self-service network pool."
-msgstr "Buat kolam jaringan self-service."
-
-msgid "Create the self-service networks."
-msgstr "Buat jaringan self-service."
-
-msgid "Create the trunk"
-msgstr "Buat trunk"
-
-msgid ""
-"Create the trunk using ``--parent-port`` to reference the port from the "
-"previous step:"
-msgstr ""
-"Buat trunk menggunakan ``--parent-port`` untuk referensi port dari langkah "
-"sebelumnya:"
-
-msgid "Create trunk with subports:"
-msgstr "Buat trunk dengan subports:"
-
-msgid ""
-"Creating CIDR subnets including a multicast address or a loopback address "
-"cannot be used in an OpenStack environment. For example, creating a subnet "
-"using ``224.0.0.0/16`` or ``127.0.1.0/24`` is not supported."
-msgstr ""
-"Pembuatan subnet CIDR termasuk alamat multicast atau alamat loopback tidak "
-"dapat digunakan dalam lingkungan OpenStack. Misalnya, membuat subnet dengan "
-"menggunakan ``224.0.0.0/16`` or ``127.0.1.0/24`` tidak didukung."
-
-msgid ""
-"Creating a port and passing it to an instance yields a different behavior "
-"than conventional networks. The Networking service defers assignment of IP "
-"addresses to the port until the particular compute node becomes apparent. "
-"For example:"
-msgstr ""
-"Pembuatan port dan meneruskananya ke sebuah instance mengakibatkan aturan "
-"yang berbeda dari jaringan konvensional. Layanan Networking menangguhkan "
-"penugasan alamat IP ke port sampai node komputasi tertentu menjadi jelas. "
-"Sebagai contoh:"
-
-msgid ""
-"Creating a subnet with a service type requires administrative privileges."
-msgstr "Pembuatan subnet dengan tipe layanan membutuhkan hak administratif."
-
-msgid ""
-"Creating or updating a port with a specific subnet skips this selection "
-"process and explicitly uses the given subnet."
-msgstr ""
-"Pembuatan atau pembaharuan port dengan subnet tertentu melompati proses "
-"seleksi ini dan secara eksplisit menggunakan subnet yang diberikan."
-
-msgid "Currently, SFC lacks support for multi-project service functions."
-msgstr ""
-"Saat ini, SFC tidak memiliki dukungan untuk fungsi-fungsi layanan multi-"
-"proyek."
-
-msgid ""
-"Currently, no migration path exists between v1 and v2 load balancers. If you "
-"choose to switch from v1 to v2, you must recreate all load balancers, pools, "
-"and health monitors."
-msgstr ""
-"Saat ini, tidak ada jalur migrasi berada antara penyeimbang beban v1 dan v2. "
-"Jika Anda memilih untuk beralih dari v1 ke v2, Anda harus menciptakan semua "
-"penyeimbang beban, kolam, dan monitor kesehatan."
-
-msgid ""
-"Currently, the ``shared`` flag is just a mapping to the underlying RBAC "
-"policies for a network. Setting the flag to ``True`` on a network creates a "
-"wildcard RBAC entry. Setting it to ``False`` removes the wildcard entry."
-msgstr ""
-"Saat ini, flag ``shared`` hanyalah pemetaan kebijakan RBAC yang mendasarinya "
-"untuk jaringan. Pengaturan flag ke `` True`` pada jaringan menciptakan entri "
-"RBAC wildcard. Pengaturan ke `` False`` menghapus entri wildcard."
-
-msgid ""
-"Currently, the access that can be granted using this feature is supported by:"
-msgstr ""
-"Saat ini, akses yang dapat diberikan penggunaan fitur ini didukung oleh:"
-
-msgid "DHCP"
-msgstr "DHCP"
-
-msgid "DHCP agent"
-msgstr "Agen DHCP"
-
-msgid ""
-"DHCP clients locate the DHCP server by sending a UDP_ packet from port 68 to "
-"address ``255.255.255.255`` on port 67. Address ``255.255.255.255`` is the "
-"local network broadcast address: all hosts on the local network see the UDP "
-"packets sent to this address. However, such packets are not forwarded to "
-"other networks. Consequently, the DHCP server must be on the same local "
-"network as the client, or the server will not receive the broadcast. The "
-"DHCP server responds by sending a UDP packet from port 67 to port 68 on the "
-"client. The exchange looks like this:"
-msgstr ""
-"Klien DHCP mencari server DHCP dengan mengirimkan UDP_ packet dari port 68 "
-"untuk alamat `` 255.255.255.255`` pada port 67. Alamat `` 255.255.255.255`` "
-"adalah alamat broadcast jaringan lokal: semua host di jaringan lokal melihat "
-"paket UDP dikirim ke alamat ini. Namun, paket tersebut tidak diteruskan ke "
-"jaringan lain. Akibatnya, server DHCP harus berada di jaringan lokal yang "
-"sama sebagai klien, atau server tidak akan menerima siaran. Server DHCP "
-"merespon dengan mengirimkan paket UDP dari port 67 ke port 68 pada klien. "
-"Pertukaran terlihat seperti ini:"
-
-msgid "DHCP high availability"
-msgstr "DHCP ketersediaan tinggi"
-
-msgid ""
-"DHCP services are created on availability zones you selected when creating "
-"the network."
-msgstr ""
-"Layanan DHCP diciptakan di zona ketersediaan dimana Anda pilih ketika "
-"pembuatan jaringan."
-
-msgid ""
-"DHCP_, the Domain Name System (DNS), the Network Time Protocol (NTP), and :"
-"ref:`VXLAN` are examples of UDP-based protocols used in OpenStack "
-"deployments."
-msgstr ""
-"DHCP_, the Domain Name System (DNS), Network Time Protocol (NTP), dan :ref:"
-"`VXLAN` adalah contoh dari protokol berbasis UDP yang digunakan dalam "
-"pengerahan OpenStack."
-
-msgid "DHCPv6"
-msgstr "DHCPv6"
-
-msgid "DHCPv6-stateful"
-msgstr "DHCPv6-stateful"
-
-msgid "DHCPv6-stateless"
-msgstr "DHCPv6-stateless"
-
-msgid "DNAT"
-msgstr "DNAT"
-
-msgid "DNS integration"
-msgstr "Integrasi DNS"
-
-msgid "DPDK 2.0"
-msgstr "DPDK 2.0"
-
-msgid "DPDK 2.2"
-msgstr "DPDK 2.2"
-
-msgid ""
-"DVR lacks support for routing directly to a fixed IP address via the "
-"floating IP agent gateway port and thus prevents the BGP speaker from "
-"advertising fixed IP addresses."
-msgstr ""
-"DVR kekurangan dukungan untuk routing langsung ke alamat IP tetap melalui "
-"port gateway agen IP mengambang dan dengan demikian mencegah BGP speaker "
-"dari penyiaran update dan perubahan alamat IP tetap."
-
-msgid "DVR with IPv6 functions similarly to DVR with IPv4."
-msgstr "DVR dengan fungsi IPv6 mirip dengan DVR dengan IPv4."
-
-msgid "Database"
-msgstr "Database"
-
-msgid "Database downgrade is not supported."
-msgstr "Database downgrade tidak didukung."
-
-msgid "Database management command-line tool"
-msgstr "Alat command-line pengelolaan database"
-
-msgid "Dataplane"
-msgstr "Dataplane"
-
-msgid ""
-"Deactivating the network will remove the ``virbr0`` bridge, terminate the "
-"dnsmasq process, and remove the iptables rules."
-msgstr ""
-"Penonaktifkan jaringan akan menghapus jembatan ``virbr0``, akhiri proses "
-"dnsmasq, dan hapus aturan iptable."
-
-msgid "Default scheduling."
-msgstr "Penjadwalan default."
-
-msgid "Default subnet pools"
-msgstr "Kolam subnet default"
-
-msgid "Define how an OpenStack network is technically realized. Example: VXLAN"
-msgstr ""
-"Definisikan bagaimana jaringan OpenStack secara teknis diwujudkan. Contoh: "
-"VXLAN"
-
-msgid ""
-"Define one or more service types for one or more subnets on a particular "
-"network. Each service type must correspond to a valid device owner within "
-"the port model in order for it to be used."
-msgstr ""
-"Tentukan satu atau lebih tipe layanan untuk satu atau lebih subnet pada "
-"jaringan tertentu. Setiap tipe layanan harus sesuai dengan pemilik perangkat "
-"valid dalam model port supaya digunakan."
-
-msgid ""
-"Define the mechanism to access an OpenStack network of a certain type. "
-"Example: Open vSwitch mechanism driver."
-msgstr ""
-"Definisikan mekanisme untuk mengakses jaringan OpenStack dari tipe tertentu. "
-"Contoh: sopir mekanisme Open vSwitch."
-
-msgid "Definition"
-msgstr "Definition"
-
-msgid "Delete the network resources for a particular project."
-msgstr "Hapus sumber daya jaringan untuk suatu proyek tertentu."
-
-msgid ""
-"Deleting the router interface causes the subnet to be reverted to the "
-"temporary CIDR, and all ports have their IPs updated. Prefix leases are "
-"released and renewed automatically as necessary."
-msgstr ""
-"Penghapusan antarmuka router menyebabkan subnet yang akan dikembalikan ke "
-"CIDR sementara, dan semua port memiliki IP yang diperbaruinya. Peminjaman "
-"prefiks dilepaskan dan diperpanjang secara otomatis bila diperlukan."
-
-msgid "Demo"
-msgstr "Demo"
-
-msgid "Demo setup"
-msgstr "Penyiapan demo"
-
-msgid ""
-"Deny egress DHCP and DHCPv6 responses to prevent instances from acting as "
-"DHCP(v6) servers."
-msgstr ""
-"Tolak egress DHCP dan DHCPv6 response untuk mencegah instance dari tindakan "
-"sebagai server DHCP (v6)."
-
-msgid ""
-"Deny egress ICMPv6 router advertisements to prevent instances from acting as "
-"IPv6 routers and forwarding IPv6 traffic for other instances."
-msgstr ""
-"Tolak egress ICMPv6 router advertisements untuk mencegah instance dari "
-"tindakan sebagai router IPv6 dan meneruskan lalu lintas IPv6 untuk instance "
-"lain."
-
-msgid "Deploy DHCP agents."
-msgstr "Mengerahkan agen DHCP."
-
-msgid "Deployment examples"
-msgstr "Contoh pengerahan "
-
-msgid "Description"
-msgstr "Deskripsi"
-
-msgid ""
-"Details about the DB models, API extension, and use cases are out of the "
-"scope of this guide but can be found in the `Neutron QoS specification "
-"`_."
-msgstr ""
-"Rincian tentang model DB, ekstensi API, dan kasus penggunaan (use case) "
-"berada di luar cakupan panduan ini, tetapi dapat ditemukan di `Neutron QoS "
-"specification `_."
-
-msgid "Determine the IPv4 and IPv6 addresses of the instance."
-msgstr "Tentukan alamat IPv4 dan IPv6 instance."
-
-msgid ""
-"Different types of agents show different details. The following output shows "
-"information for a Linux bridge agent:"
-msgstr ""
-"Berbagai jenis agen menunjukkan rincian yang berbeda. Output berikut "
-"menunjukkan informasi untuk agen Linux bridge:"
-
-msgid "Direct assignment during subnet creation via command line or Horizon"
-msgstr ""
-"tugas langsung selama pembuatan subnet melalui baris perintah (command "
-"line) atau Horizon"
-
-msgid "Direct port and normal port instances reside on the same compute node."
-msgstr ""
-"Instance port langsung dan instance port yang normal berada pada node "
-"komputasi yang sama."
-
-msgid ""
-"Direct port instance that uses floating IP address and network node are "
-"located on the same host."
-msgstr ""
-"Instance port langsung yang menggunakan alamat IP mengambang dan node "
-"jaringan terletak pada host yang sama."
-
-msgid "Disable libvirt networking"
-msgstr "Nonaktifkan libvirt networking"
-
-msgid "Disable nova-compute."
-msgstr "Nonaktifkan nova-compute."
-
-msgid ""
-"Disable service plug-ins because provider networks do not require any. "
-"However, this breaks portions of the dashboard that manage the Networking "
-"service. See the `Ocata Install Tutorials and Guides `__ for more information."
-msgstr ""
-"Nonaktifkan layanan plug-in karena jaringan operator tidak memerlukan. "
-"Namun, ini mengistirahatkan bagian dari dashboard yang mengelola layanan "
-"Networking. Lihat `Ocata Install Tutorials and Guides `__ untuk informasi lebih lanjut."
-
-msgid "Disable the DHCP agent on HostA before you stop it:"
-msgstr "Menonaktifkan agen DHCP pada HostA sebelum Anda menghentikannya:"
-
-msgid ""
-"Disable the hypervisor. This would be a good time to live migrate or "
-"evacuate the compute node, if supported."
-msgstr ""
-"Nonaktifkan hypervisor. Ini akan menjadi saat yang tepat untuk bermigrasi "
-"secara langsung (live) atau mengevakuasi node komputasi, jika didukung."
-
-msgid "Disabling and removing an agent"
-msgstr "Menonaktifkan dan menghapus agen"
-
-msgid "Distributed Virtual Routing with VRRP"
-msgstr "Distributed Virtual Routing dengan VRRP"
-
-msgid ""
-"Do essentially the same thing for IPv6 and there are now two subnet pools. "
-"Regular projects can see them. (the output is trimmed a bit for display)"
-msgstr ""
-"Lakukan hal pada dasarnya yang sama untuk IPv6 dan sekarang ada dua kolam "
-"subnet. Proyek regular dapat melihat mereka. (Output dipangkas sedikit untuk "
-"display)"
-
-msgid ""
-"Do not use this tool when creating or migrating an instance as it throws an "
-"error when the bridge does not exist."
-msgstr ""
-"Jangan menggunakan alat ini saat membuat atau memigrasikan instance "
-"seperti pelemparan kesalahan ketika jembatan (bridge) tidak ada."
-
-msgid "Downlink the router on the previously created subnet"
-msgstr "Downlink router di subnet yang dibuat sebelumnya"
-
-msgid ""
-"Drivers other than the default one may require extra configuration, please "
-"refer to :ref:`extra-driver-conf`"
-msgstr ""
-"Driver lain dari yang standar mungkin memerlukan konfigurasi tambahan, "
-"silakan lihat :ref:`extra-driver-conf`"
-
-msgid ""
-"Due to direct connection, some features are not available when using SRIOV. "
-"For example, DVR, security groups, migration."
-msgstr ""
-"Karena hubungan langsung, beberapa fitur tidak tersedia saat penggunaan "
-"SRIOV. Misalnya, DVR, kelompok keamanan, migrasi."
-
-msgid ""
-"Due to the direct connection, some features are not available when using it "
-"on the compute node. For example, DVR, security groups and arp-spoofing "
-"protection."
-msgstr ""
-"Karena hubungan langsung, beberapa fitur tidak tersedia saat menggunakannya "
-"pada node komputasi. Misalnya, DVR, kelompok keamanan dan perlindungan arp-"
-"spoofing."
-
-msgid ""
-"During IP allocation, the :ref:`IPAM ` driver returns an "
-"address from a subnet with a service type matching the port device owner. If "
-"no subnets match, or all matching subnets lack available IP addresses, the "
-"IPAM driver attempts to use a subnet without any service types to preserve "
-"compatibility. If all subnets on a network have a service type, the IPAM "
-"driver cannot preserve compatibility. However, this feature enables strict "
-"IP allocation from subnets with a matching device owner. If multiple subnets "
-"contain the same service type, or a subnet without a service type exists, "
-"the IPAM driver selects the first subnet with a matching service type. For "
-"example, a floating IP agent gateway port uses the following selection "
-"process:"
-msgstr ""
-"Selama alokasi IP, driver :ref:`IPAM ` mengembalikan alamat "
-"dari subnet dengan tipe layanan yang cocok dengan pemilik perangkat port. "
-"Jika tidak ada subnet cocok, atau semua subnet yang cocok kekurangan alamat "
-"IP yang tersedia, driver IPAM mencoba untuk menggunakan subnet tanpa tipe "
-"layanan untuk melestarikan kompatibilitas. Jika semua subnet pada jaringan "
-"memiliki tipe layanan, driver IPAM tidak dapat melestarikan kompatibilitas. "
-"Namun, fitur ini mengaktifkan alokasi IP yang ketat dari subnet dengan "
-"pemilik perangkat yang cocok. Jika beberapa subnet berisi tipe layanan yang "
-"sama, atau subnet tanpa tipe layanan itu ada, driver IPAM memilih subnet "
-"pertama dengan tipe layanan yang cocok. Misalnya, port gerbang agen IP "
-"mengambang menggunakan proses seleksi berikut:"
-
-msgid ""
-"During normal operation, ``keepalived`` on the master router periodically "
-"transmits *heartbeat* packets over a hidden network that connects all VRRP "
-"routers for a particular project. Each project with VRRP routers uses a "
-"separate hidden network. By default this network uses the first value in the "
-"``tenant_network_types`` option in the ``ml2_conf.ini`` file. For additional "
-"control, you can specify the self-service network type and physical network "
-"name for the hidden network using the ``l3_ha_network_type`` and "
-"``l3_ha_network_name`` options in the ``neutron.conf`` file."
-msgstr ""
-"Selama operasi normal, `` keepalived`` pada router utama secara berkala "
-"mentransmisikan paket *heartbeat* melalui jaringan tersembunyi yang "
-"menghubungkan semua router VRRP untuk proyek tertentu. Setiap proyek dengan "
-"router VRRP menggunakan jaringan tersembunyi yang terpisah. Secara default "
-"jaringan ini menggunakan nilai pertama dalam opsi ``tenant_network_types`` "
-"dalam file ``ml2_conf.ini`` file. Untuk kontrol tambahan, Anda dapat "
-"menentukan tipe jaringan self-service dan nama jaringan fisik untuk jaringan "
-"tersembunyi menggunakan opsi ``l3_ha_network_type`` dan "
-"``l3_ha_network_name`` dalam file ``neutron.conf``."
-
-msgid ""
-"During normal operation, the master router periodically transmits "
-"*heartbeat* packets over a hidden project network that connects all HA "
-"routers for a particular project."
-msgstr ""
-"Selama operasi normal, master router secara periodik mengirimkan paket "
-"*heartbeat* melalui jaringan proyek tersembunyi yang menghubungkan semua "
-"router HA untuk proyek tertentu."
-
-msgid ""
-"During the migration, nova-network API calls will go through an additional "
-"internal conversion to Networking calls. This will have different and likely "
-"poorer performance characteristics compared with either the pre-migration or "
-"post-migration APIs."
-msgstr ""
-"Selama migrasi, panggilan nova-network API akan melalui konversi internal "
-"tambahan untuk panggilan Networking. Ini akan memiliki karakteristik kinerja "
-"yang berbeda dan mungkin lebih buruk dibandingkan dengan pre-migration API "
-"ataupun post-migration API"
-
-msgid ""
-"Each available network type is managed by an ML2 type driver. Type drivers "
-"maintain any needed type-specific network state. They validate the type "
-"specific information for provider networks and are responsible for the "
-"allocation of a free segment in project networks."
-msgstr ""
-"Setiap tipe jaringan yang tersedia dikelola oleh driver tipe ML2. Driver "
-"tipe memelihara setiap keadaan jaringan type-specific yang dibutuhkan. "
-"Mereka memvalidasi informasi spesifik tipe untuk jaringan penyedia dan "
-"bertanggung jawab untuk alokasi segmen bebas di jaringan proyek."
-
-msgid ""
-"Each building block defines the quantity and types of nodes including the "
-"components on each node."
-msgstr ""
-"Setiap blok bangunan mendefinisikan jumlah dan tipe node termasuk komponen "
-"pada setiap node."
-
-msgid ""
-"Each network namespace also has its own routing table, and in fact this is "
-"the main reason for namespaces to exist. A routing table is keyed by "
-"destination IP address, so network namespaces are what you need if you want "
-"the same destination IP address to mean different things at different times "
-"- which is something that OpenStack Networking requires for its feature of "
-"providing overlapping IP addresses in different virtual networks."
-msgstr ""
-"Setiap namespace jaringan juga memiliki tabel routing sendiri, dan "
-"sebenarnya ini adalah alasan utama untuk namespace ada. Sebuah tabel routing "
-"diketik dengan alamat IP tujuan, sehingga namespace jaringan menjadi apa "
-"yang Anda butuhkan jika Anda ingin alamat IP tujuan yang sama dengan kata "
-"lain hal yang berbeda pada waktu yang berbeda - namespace merupakan sesuatu "
-"dimana OpenStack Networking perlu mengetahui fitur-fiturnya yang menjadi "
-"sumber alamat IP yang tumpang tindih di berbagai jaringan virtual."
-
-msgid ""
-"Each network namespace also has its own set of iptables (for both IPv4 and "
-"IPv6). So, you can apply different security to flows with the same IP "
-"addressing in different namespaces, as well as different routing."
-msgstr ""
-"Setiap namespace jaringan juga telah mempunyai IP sendiri dari iptables "
-"(untuk IPv4 dan IPv6). Jadi, Anda dapat menerapkan keamanan yang berbeda "
-"untuk arus (flow) dengan alamat IP yang sama di namespace yang berbeda, "
-"serta routing yang berbeda juga"
-
-msgid ""
-"Each project contains a ``default`` security group that allows all egress "
-"traffic and denies all ingress traffic. You can change the rules in the "
-"``default`` security group. If you launch an instance without specifying a "
-"security group, the ``default`` security group automatically applies to it. "
-"Similarly, if you create a port without specifying a security group, the "
-"``default`` security group automatically applies to it."
-msgstr ""
-"Setiap proyek berisi grup keamanan ``default`` yang mengizinkan semua lalu "
-"lintas egress dan menyangkal semua lalu lintas ingress. Anda dapat mengubah "
-"aturan dalam grup keamanan ``default``. Jika Anda memulai sebuah instance "
-"tanpa menentukan kelompok keamanan, grup keamanan ``default`` otomatis "
-"berlaku untuk itu. Demikian pula, jika Anda membuat sebuah port tanpa "
-"menentukan kelompok keamanan, grup keamanan ``default`` otomatis berlaku "
-"untuk itu."
-
-msgid ""
-"Each router interface is associated with an address scope by looking at "
-"subnets connected to the network. When a router connects to an external "
-"network with matching address scopes, network traffic routes between without "
-"Network address translation (NAT). The router marks all traffic connections "
-"originating from each interface with its corresponding address scope. If "
-"traffic leaves an interface in the wrong scope, the router blocks the "
-"traffic."
-msgstr ""
-"Setiap interface router dikaitkan dengan lingkup alamat dengan melihat "
-"subnet terhubung ke jaringan. Ketika router menghubungkan ke jaringan "
-"eksternal dengan pencocokan lingkup alamat, lalu lintas me-rute jaringan "
-"antar tanpa Network Address Translation (NAT). Router menandai semua koneksi "
-"lalu lintas yang berasal dari masing-masing antarmuka dengan lingkup alamat "
-"yang sesuai. Jika lalu lintas meninggalkan antarmuka dalam lingkup yang "
-"salah, router memblok lalu lintas."
-
-msgid ""
-"Each segment requires at least one subnet that explicitly belongs to that "
-"segment. The association between a segment and a subnet distinguishes a "
-"routed provider network from other types of networks. The Networking service "
-"enforces that either zero or all subnets on a particular network associate "
-"with a segment. For example, attempting to create a subnet without a segment "
-"on a network containing subnets with segments generates an error."
-msgstr ""
-"Setiap segmen memerlukan setidaknya satu subnet yang memilik segmen itu "
-"secara eksplisit. Hubungan antara segmen dan subnet membedakan jaringan "
-"penyedia dialihkan (routed) dari jenis jaringan lainnya. Layanan Networking "
-"memberlakukan bahwa subnet nol atau semua subnet pada jaringan tertentu yang "
-"berasosiasi dengan segmen. Misalnya, usaha untuk membuat subnet tanpa segmen "
-"pada jaringan yang berisi subnet dengan segmen akan menghasilkan kesalahan."
-
-msgid ""
-"East-west scenario 1: Instances on different networks on the same router"
-msgstr ""
-"East-west scenario 1: Instances pada jaringan yang berbeda pada router yang "
-"sama"
-
-msgid "East-west scenario 1: Instances on the same network"
-msgstr "East-west scenario 1: Instance pada jaringan yang sama"
-
-msgid "East-west scenario 2: Instances on different networks"
-msgstr "East-west scenario 2: Instances pada jaringan yang berbeda"
-
-msgid ""
-"Edit the FWaaS section in the ``/etc/neutron/neutron.conf`` file to indicate "
-"the agent version and driver:"
-msgstr ""
-"Edit bagian FWaaS di file ``/etc/neutron/neutron.conf`` menunjukkan versi "
-"agen dan driver:"
-
-msgid ""
-"Edit the ``/etc/neutron/neutron.conf`` file and assign a value different to "
-"``openstacklocal`` (its default value) to the ``dns_domain`` parameter in "
-"the ``[default]`` section. As an example:"
-msgstr ""
-"Edit file ``/etc/neutron/neutron.conf`` dan menetapkan nilai yang berbeda "
-"untuk (nilai default) ``openstacklocal`` ke parameter ``dns_domain`` dalam "
-"bagian ``[default]``. Sebagai contoh:"
-
-msgid ""
-"Edit the ``[default]`` section of ``/etc/neutron/neutron.conf`` and specify "
-"the external DNS service driver to be used in parameter "
-"``external_dns_driver``. The valid options are defined in namespace "
-"``neutron.services.external_dns_drivers``. The following example shows how "
-"to set up the driver for the OpenStack DNS service:"
-msgstr ""
-"Edit ``[default]`` section of ``/etc/neutron/neutron.conf`` dan tentukan "
-"driver layanan DNS eksternal yang akan digunakan dalam parameter "
-"``external_dns_driver``. Opsi valid didefinisikan dalam namespace ``neutron."
-"services.external_dns_drivers``. Contoh berikut menunjukkan cara mengatur "
-"driver untuk layanan DNS OpenStack:"
-
-msgid ""
-"Edit the ``ovs_agent.ini`` or ``linuxbridge_agent.ini`` file on each compute "
-"node. For example:"
-msgstr ""
-"Edit file ``ovs_agent.ini`` atau ``linuxbridge_agent.ini`` pada setiap node "
-"komputasi. Sebagai contoh:"
-
-msgid "Edit the ``sriov_agent.ini`` file on each compute node. For example:"
-msgstr ""
-"Edit file ``sriov_agent.ini`` pada setiap node komputasi. Sebagai contoh:"
-
-msgid ""
-"Effectively the ``ipv6_gateway`` flag takes precedence over an RA that is "
-"received from the upstream router. If it is desired to use a GUA next hop "
-"that is accomplished by allocating a subnet to the external router port and "
-"assigning the upstream routers GUA address as the gateway for the subnet."
-msgstr ""
-"Secra efektif flag ``ipv6_gateway`` diutamakan melalui RA yang diterima dari "
-"router hulu. Jika ini diinginkan untuk menggunakan GUA next hop yang dicapai "
-"dengan mengalokasikan subnet ke port router eksternal dan menempatkan alamat "
-"GUA router hulu sebagai gateway untuk subnet."
-
-msgid ""
-"Egress instance traffic flows similar to north-south scenario 1, except SNAT "
-"changes the source IP address of the packet to the floating IPv4 address "
-"rather than the router IP address on the provider network."
-msgstr ""
-"Egress instance traffic mengalir mirip dengan north-south scenario 1, "
-"kecuali SNAT perubahan alamat IP sumber dari paket ke alamat IPv4 mengambang "
-"bukannya alamat IP router pada jaringan provider."
-
-msgid ""
-"Egress traffic follows similar steps in reverse, except SNAT changes the "
-"source IPv4 address of the packet to the floating IPv4 address."
-msgstr ""
-"Egress traffic mengikuti langkah-langkah serupa secara terbalik, kecuali "
-"SNAT mengubah alamat IPv4 sumber paket ke alamat IPv4 mengambang."
-
-msgid "Enable FWaaS v1"
-msgstr "Enable FWaaS v1"
-
-msgid "Enable FWaaS v2"
-msgstr "Aktifkan FWaaS v2"
-
-msgid ""
-"Enable IOMMU in Linux by adding ``intel_iommu=on`` to the kernel parameters, "
-"for example, using GRUB."
-msgstr ""
-"Aktifkan IOMMU di Linux dengan menambahkan ``intel_iommu = on`` dengan "
-"parameter kernel, misalnya, menggunakan GRUB."
-
-msgid "Enable VRRP."
-msgstr "Aktifkan VRRP."
-
-msgid ""
-"Enable a nova-api proxy that recreates internal Compute objects from "
-"Networking information (via the Networking REST API)."
-msgstr ""
-"Aktifkan proxy nova-api yang menciptakan objek Compute internal dari "
-"informasi Networking (melalui Networking REST API)."
-
-msgid "Enable distributed routing by default for all routers."
-msgstr "Aktifkan routing terdistribusikan dengan default untuk semua router."
-
-msgid "Enable neutron sriov-agent (Compute)"
-msgstr "Aktifkan neutron sriov-agent (Compute)"
-
-msgid "Enable routing and allow overlapping IP address ranges."
-msgstr "Aktifkan routing dan memungkinkan rentang alamat IP tumpang tindih."
-
-msgid "Enable the FWaaS plug-in in the ``/etc/neutron/neutron.conf`` file:"
-msgstr "Aktifkan FWaaS plug-in dalam file ``/etc/neutron/neutron.conf``:"
-
-msgid "Enable the Networking agent."
-msgstr "Aktifkan agen Networking."
-
-msgid ""
-"Enable the functionality described in :ref:`config-dns-int-dns-resolution`."
-msgstr ""
-"Aktifkan fungsi yang dijelaskan dalam :ref:`config-dns-int-dns-resolution`."
-
-msgid "Enable the layer-2 population mechanism driver."
-msgstr "Aktifkan driver mekanisme populasi lapisan-2"
-
-msgid "Enable the native OVS firewall driver"
-msgstr "Aktifkan driver firewall OVS asli"
-
-msgid "Enable the neutron sriov-agent service."
-msgstr "Mengaktifkan layanan neutron sriov-agent."
-
-msgid ""
-"Enable the option in the ``local_settings.py`` file, which is typically "
-"located on the controller node:"
-msgstr ""
-"Aktifkan opsi dalam file ``local_settings.py``, yang biasanya terletak pada "
-"controller node:"
-
-msgid ""
-"Enable the plug-in in Dashboard by editing the ``local_settings.py`` file "
-"and setting ``enable_lb`` to ``True`` in the ``OPENSTACK_NEUTRON_NETWORK`` "
-"dictionary."
-msgstr ""
-"Aktifkan plug-in di Dashboard dengan mengedit file ``local_settings.py`` dan "
-"pengaturan ``enable_lb`` ke ``True`` dalam direktori "
-"``OPENSTACK_NEUTRON_NETWORK``."
-
-msgid ""
-"Enable the segments service plug-in by appending ``segments`` to the list of "
-"``service_plugins`` in the ``neutron.conf`` file on all nodes running the "
-"``neutron-server`` service:"
-msgstr ""
-"Aktifkan segmen layanan plug-in dengan menambahkan ``segments`` ke daftar "
-"``service_plugins`` di file ``neutron.conf`` pada semua node yang "
-"menjalankan layanan ``neutron-server``:"
-
-msgid ""
-"Enable two DHCP agents per network so both compute nodes can provide DHCP "
-"service provider networks."
-msgstr ""
-"Aktifkan dua agen DHCP per jaringan sehingga kedua node komputasi dapat "
-"memberikan layanan DHCP jaringan provider."
-
-msgid "Enabling DHCP high availability by default"
-msgstr "Aktifkan HA DHCP secara default"
-
-msgid "Enabling the deployment for auto-allocation"
-msgstr "Mengaktifkan pengerahan untuk auto-allocation"
-
-msgid ""
-"End users normally can create subnets with any valid IP addresses without "
-"other restrictions. However, in some cases, it is nice for the admin or the "
-"project to pre-define a pool of addresses from which to create subnets with "
-"automatic allocation."
-msgstr ""
-"End user biasanya dapat membuat subnet dengan alamat IP yang valid tanpa "
-"pembatasan lainnya. Namun, dalam beberapa kasus, itu bagus untuk admin atau "
-"proyek untuk pre-define kolam alamat yang membuat subnet dengan alokasi "
-"otomatis."
-
-msgid "Ensure SR-IOV and VT-d are enabled in BIOS."
-msgstr "Pastikan SR-IOV dan VT-d diaktifkan di BIOS."
-
-msgid ""
-"Ensure that the LBaaS v1 and v2 service providers are removed from the "
-"``[service_providers]`` section. They are not used with Octavia. **Verify "
-"that all LBaaS agents are stopped.**"
-msgstr ""
-"Pastikan bahwa LBaaS v1 dan penyedia layanan v2 dikeluarkan dari bagian "
-"``[service_providers]``. Mereka tidak digunakan dengan Octavia. **Verify "
-"that all LBaaS agents are stopped.**"
-
-msgid "Ensure the neutron sriov-agent runs successfully:"
-msgstr "Pastikan neutron sriov-agetn berjalan sukses:"
-
-msgid "Ethernet"
-msgstr "Ethernet"
-
-msgid ""
-"Ethernet is a networking protocol, specified by the IEEE 802.3 standard. "
-"Most wired network interface cards (NICs) communicate using Ethernet."
-msgstr ""
-"Ethernet adalah protokol jaringan, ditentukan oleh standar IEEE 802.3. "
-"Kebanyakan kartu antarmuka jaringan kabel (NIC) berkomunikasi menggunakan "
-"Ethernet."
-
-msgid ""
-"Every agent that supports these extensions will register itself with the "
-"neutron server when it starts up."
-msgstr ""
-"Setiap agen yang mendukung ekstensi ini akan mendaftarkan diri dengan server "
-"neutron ketika dijalankan."
-
-msgid ""
-"Every trunk has a parent port and can have any number of subports. The "
-"parent port is the port that the trunk is associated with. Users create "
-"instances and specify the parent port of the trunk when launching instances "
-"attached to a trunk."
-msgstr ""
-"Setiap trunk memiliki port orangtua dan dapat memiliki sejumlah subports. "
-"Port orangtua adalah port yang bagasi terkait dengan. Pengguna membuat "
-"contoh dan menentukan port induk dari bagasi saat meluncurkan kasus melekat "
-"batang."
-
-msgid "Example 1 - Proof-of-concept"
-msgstr "Example 1 - Proof-of-concept"
-
-msgid "Example 2 - DVR configuration"
-msgstr "Example 2 - DVR configuration"
-
-msgid ""
-"Example commands using the ``openstack`` client assume version 3.2.0 or "
-"higher."
-msgstr ""
-"Contoh perintah menggunakan klien ``openstack`` menganggap versi 3.2.0 atau "
-"lebih tinggi."
-
-msgid "Example configuration"
-msgstr "Konfigurasi contoh"
-
-msgid ""
-"Expect performance degradation of services using tap devices: these devices "
-"do not support DPDK. Example services include DVR, FWaaS, or LBaaS."
-msgstr ""
-"Mengharapkan penurunan kinerja pelayanan menggunakan perangkat tap: "
-"perangkat ini tidak mendukung DPDK. Layanan contoh termasuk DVR, FWaaS, atau "
-"LBaaS."
-
-msgid "Experimental feature or incomplete documentation."
-msgstr "Fitur eksperimental atau dokumentasi tak lengkap."
-
-msgid "Extensions"
-msgstr "Extensions"
-
-msgid "External Router A,M,O"
-msgstr "External Router A,M,O"
-
-msgid ""
-"External mechanism drivers from various vendors exist as well as the neutron "
-"integrated reference implementations."
-msgstr ""
-"Driver mekanisme eksternal dari berbagai vendor ada serta implementasi "
-"referensi neutron terintegrasi."
-
-msgid ""
-"External open source mechanism drivers exist as well as the neutron "
-"integrated reference implementations. Configuration of those drivers is not "
-"part of this document. For example:"
-msgstr ""
-"Driver mekanisme open source eksternal ada serta implementasi referensi "
-"neutron terintegrasi. Konfigurasi driver ini bukan bagian dari dokumen ini. "
-"Sebagai contoh:"
-
-msgid "Extra configuration"
-msgstr "Konfigurasi tambahan"
-
-msgid "FWaaS"
-msgstr "FWaaS"
-
-msgid "FWaaS allows creation of IPv6 based rules."
-msgstr "FWaaS memungkinkan penciptaan aturan berbasis IPv6."
-
-msgid ""
-"FWaaS always adds a default ``deny all`` rule at the lowest precedence of "
-"each policy. Consequently, a firewall policy with no rules blocks all "
-"traffic by default."
-msgstr ""
-"FWaaS selalu menambahkan default aturan ``deny all``` pada prioritas "
-"terendah dari setiap kebijakan. Akibatnya, kebijakan firewall tanpa aturan "
-"memblok semua lalu lintas secara default."
-
-msgid "FWaaS management options are also available in the Dashboard."
-msgstr "Opsi manajemen FWaaS juga tersedia di Dashboard."
-
-msgid "FWaaS v1"
-msgstr "FWaaS v1"
-
-msgid "FWaaS v1 versus v2"
-msgstr "FWaaS v1 dibandingkan dengan v2"
-
-msgid "FWaaS v2"
-msgstr "FWaaS v2"
-
-msgid "Feature"
-msgstr "Fitur"
-
-msgid ""
-"Filtering resources with a tag whose name contains a comma is not supported. "
-"Thus, do not put such a tag name to resources."
-msgstr ""
-"Penyaringan sumber dengan tag yang namanya mengandung koma tidak didukung. "
-"Dengan demikian, tidak menempatkan seperti nama tag ke sumber daya."
-
-msgid "Filtering with tags"
-msgstr "Penyaringan dengan tag"
-
-msgid "Firewall-as-a-Service (FWaaS)"
-msgstr "Firewall-as-a-Service (FWaaS)"
-
-msgid "Firewall-as-a-Service (FWaaS) v1 scenario"
-msgstr "Skenario Firewall-as-a-Service (FWaaS) v1"
-
-msgid "Firewall-as-a-Service (FWaaS) v2 scenario"
-msgstr "Skenario Firewall-as-a-Service (FWaaS) v2"
-
-msgid "Firewalls"
-msgstr "Firewall"
-
-msgid ""
-"Firewalls are implemented in various ways, depending on the driver used. For "
-"example, an iptables driver implements firewalls using iptable rules. An "
-"OpenVSwitch driver implements firewall rules using flow entries in flow "
-"tables. A Cisco firewall driver manipulates NSX devices."
-msgstr ""
-"Firewall diimplementasikan dalam berbagai cara, tergantung pada driver yang "
-"digunakan. Sebagai contoh, driver iptables menerapkan firewall menggunakan "
-"aturan iptable. Driver OpenVSwitch menerapkan aturan firewall menggunakan "
-"entri aliran dalam flow table. Driver firewall Cisco memanipulasi perangkat "
-"NSX."
-
-msgid ""
-"Firewalls are used to regulate traffic to and from a host or a network. A "
-"firewall can be either a specialized device connecting two networks or a "
-"software-based filtering mechanism implemented on an operating system. "
-"Firewalls are used to restrict traffic to a host based on the rules defined "
-"on the host. They can filter packets based on several criteria such as "
-"source IP address, destination IP address, port numbers, connection state, "
-"and so on. It is primarily used to protect the hosts from unauthorized "
-"access and malicious attacks. Linux-based operating systems implement "
-"firewalls through ``iptables``."
-msgstr ""
-"Firewall digunakan untuk mengatur lalu lintas ke dan dari host atau "
-"jaringan. Firewall dapat berupa perangkat khusus yang menghubungkan dua "
-"jaringan atau mekanisme penyaringan berbasis software diimplementasikan pada "
-"sistem operasi. Firewall digunakan untuk membatasi lalu lintas ke host "
-"berdasarkan aturan yang ditetapkan pada host. Mereka dapat menyaring paket "
-"berdasarkan beberapa kriteria seperti alamat IP sumber, alamat IP tujuan, "
-"nomor port, status koneksi, dan sebagainya. Hal ini terutama digunakan untuk "
-"melindungi host dari akses yang tidak sah dan serangan berbahaya. Sistem "
-"operasi berbasis Linux mengimplementasikan firewall melalui ``iptables``."
-
-msgid "First, as admin, create a shared subnet pool:"
-msgstr "Pertama, sebagai admin, buatlah subnet kolam bersama:"
-
-msgid "First, create a QoS policy and its bandwidth limit rule:"
-msgstr "Pertama, buat kebijakan QoS dan aturan batas bandwidth:"
-
-msgid "First, create a network and IPv6 subnet:"
-msgstr "Pertama, buat jaringan dan subnet IPv6:"
-
-msgid "First, the router gateway external port:"
-msgstr "Pertama, port eksternal gerbang router:"
-
-msgid ""
-"First, would not it be nice if you could turn your pool of addresses over to "
-"Neutron to take care of? When you need to create a subnet, you just ask for "
-"addresses to be allocated from the pool. You do not have to worry about what "
-"you have already used and what addresses are in your pool. Subnet pools can "
-"do this."
-msgstr ""
-"Pertama, mungkin tidak akan lebih baik jika Anda bisa mengubah kolam alamat "
-"Anda ke Neutron untuk mengurusnya? Bila Anda perlu membuat subnet, Anda "
-"hanya meminta alamat yang akan dialokasikan dari kolam. Anda tidak perlu "
-"khawatir tentang apa yang Anda telah digunakan dan apa alamat yang ada di "
-"kolam Anda. Kolam subnet bisa melakukan ini."
-
-msgid "Flat"
-msgstr "Flat"
-
-msgid ""
-"Flat networks for project allocation are not supported. They only can exist "
-"as a provider network."
-msgstr ""
-"Jaringan datar (flat) untuk alokasi proyek tidak didukung. Mereka hanya "
-"bisa eksis sebagai jaringan operator (provider)"
-
-msgid "Floating IP addresses"
-msgstr "Alamat IP mengambang"
-
-msgid "Floating IPs"
-msgstr "Floating IPs"
-
-msgid "Flow classifier"
-msgstr "Flow classifier"
-
-msgid ""
-"Following are the PTR records created for this example. Note that for IPv4, "
-"the value of ipv4_ptr_zone_prefix_size is 24. For more details, see :ref:"
-"`config-dns-int-ext-serv`:"
-msgstr ""
-"Berikut ini adalah catatan PTR yang dibuat untuk contoh ini. Perhatikan "
-"bahwa untuk IPv4, nilai ipv4_ptr_zone_prefix_size adalah 24. Untuk lebih "
-"jelasnya, lihat :ref: `config-dns-int-ext-serv`:"
-
-msgid ""
-"Following are the PTR records created for this example. Note that for IPv4, "
-"the value of ipv4_ptr_zone_prefix_size is 24. In the case of IPv6, the value "
-"of ipv6_ptr_zone_prefix_size is 116. For more details, see :ref:`config-dns-"
-"int-ext-serv`:"
-msgstr ""
-"Berikut ini adalah catatan PTR yang dibuat untuk contoh ini. Perhatikan "
-"bahwa untuk IPv4, nilai ipv4_ptr_zone_prefix_size adalah 24. Dalam kasus "
-"IPv6, nilai ipv6_ptr_zone_prefix_size adalah 116. Untuk lebih jelasnya, "
-"lihat :ref:`config-dns-int-ext-serv`:"
-
-msgid "Following is an example of these steps:"
-msgstr "Berikut ini adalah contoh langkah-langkah:"
-
-msgid ""
-"For IPv4, the router performs DNAT on the packet which changes the "
-"destination IP address to the instance IP address on the self-service "
-"network and sends it to the gateway IP address on the self-service network "
-"via the self-service interface (6)."
-msgstr ""
-"Untuk IPv4, router melakukan DNAT pada paket yang mengubah alamat IP tujuan "
-"ke alamat IP instance di jaringan self-service dan mengirimkannya ke alamat "
-"IP gateway di jaringan self-service melalui antarmuka self-service (6) ."
-
-msgid ""
-"For IPv4, the router performs DNAT on the packet which changes the "
-"destination IP address to the instance IP address on the self-service "
-"network and sends it to the gateway IP address on the self-service network "
-"via the self-service interface (7)."
-msgstr ""
-"Untuk IPv4, router melakukan DNAT pada paket yang mengubah alamat IP tujuan "
-"ke alamat IP instance di jaringan self-service dan mengirimkannya ke alamat "
-"IP gateway di jaringan self-service melalui antarmuka self-service (7) ."
-
-msgid ""
-"For IPv4, the router performs SNAT on the packet which changes the source IP "
-"address to the router IP address on the provider network and sends it to the "
-"gateway IP address on the provider network via the gateway interface on the "
-"provider network (11)."
-msgstr ""
-"Untuk IPv4, router melakukan SNAT pada paket yang mengubah alamat IP sumber "
-"ke alamat IP router pada jaringan provider dan mengirimkannya ke alamat IP "
-"gateway di jaringan provider melalui antarmuka gateway pada jaringan "
-"provider (11)."
-
-msgid ""
-"For IPv4, the router performs SNAT on the packet which changes the source IP "
-"address to the router IP address on the provider network and sends it to the "
-"gateway IP address on the provider network via the gateway interface on the "
-"provider network (17)."
-msgstr ""
-"Untuk IPv4, router melakukan SNAT pada paket yang mengubah alamat IP sumber "
-"ke alamat IP router pada jaringan provider dan mengirimkannya ke alamat IP "
-"gateway di jaringan provider melalui antarmuka gateway pada jaringan "
-"provider (17)."
-
-msgid ""
-"For IPv6, the router sends the packet to the next-hop IP address, typically "
-"the gateway IP address on the provider network, via the provider gateway "
-"interface (11)."
-msgstr ""
-"Untuk IPv6, router mengirimkan paket ke alamat IP hop berikutnya (next-hop), "
-"biasanya alamat IP gateway pada jaringan provider, melalui antarmuka gateway "
-"provider (11)."
-
-msgid ""
-"For IPv6, the router sends the packet to the next-hop IP address, typically "
-"the gateway IP address on the provider network, via the provider gateway "
-"interface (17)."
-msgstr ""
-"Untuk IPv6, router mengirimkan paket ke alamat IP next-hop, biasanya alamat "
-"IP gateway pada jaringan provider, melalui antarmuka gateway provider (17)."
-
-msgid ""
-"For IPv6, the router sends the packet to the next-hop IP address, typically "
-"the gateway IP address on the self-service network, via the self-service "
-"interface (6)."
-msgstr ""
-"Untuk IPv6, router mengirimkan paket ke alamat IP hop berikutnya (next-hop), "
-"biasanya alamat IP gateway di jaringan self-service, melalui antarmuka self-"
-"service (6)."
-
-msgid ""
-"For IPv6, the router sends the packet to the next-hop IP address, typically "
-"the gateway IP address on the self-service network, via the self-service "
-"interface (8)."
-msgstr ""
-"Untuk IPv6, router mengirimkan paket ke alamat IP next-hop, biasanya alamat "
-"IP gateway di jaringan self-service, melalui antarmuka self-service (8)."
-
-msgid ""
-"For a detailed list of configuration options, see the related section in the "
-"`Configuration Reference `__."
-msgstr ""
-"Untuk daftar rinci pilihan konfigurasi, lihat bagian terkait di "
-"`Configuration Reference `__."
-
-msgid ""
-"For a detailed list of configuration options, see the related section in the "
-"`Configuration Reference `__."
-msgstr ""
-"Untuk daftar rinci pilihan konfigurasi, lihat bagian terkait di "
-"`Configuration Reference `__."
-
-msgid ""
-"For a detailed list of configuration options, see the related section in the "
-"`Configuration Reference `__."
-msgstr ""
-"Untuk daftar rinci pilihan konfigurasi, lihat bagian terkait di "
-"`Configuration Reference `__."
-
-msgid ""
-"For a detailed list of configuration options, see the related section in the "
-"`Configuration Reference `__."
-msgstr ""
-"Untuk daftar rinci pilihan konfigurasi, lihat bagian terkait di "
-"`Configuration Reference `__."
-
-msgid ""
-"For a detailed list of configuration options, see the related section in the "
-"`Configuration Reference `__."
-msgstr ""
-"Untuk daftar rinci pilihan konfigurasi, lihat bagian terkait di "
-"`Configuration Reference `__."
-
-msgid ""
-"For a detailed list of configuration options, see the related section in the "
-"`Configuration Reference `__."
-msgstr ""
-"Untuk daftar rinci pilihan konfigurasi, lihat bagian terkait di "
-"`Configuration Reference `__."
-
-msgid ""
-"For a detailed list of configuration options, see the related section in the "
-"`Configuration Reference `__."
-msgstr ""
-"Untuk daftar rinci pilihan konfigurasi, lihat bagian terkait di "
-"`Configuration Reference `__."
-
-msgid ""
-"For a detailed list of configuration options, see the related section in the "
-"`Configuration Reference `__."
-msgstr ""
-"Untuk daftar rinci pilihan konfigurasi, lihat bagian terkait di "
-"`Configuration Reference `__."
-
-msgid ""
-"For additional information describing the problem, refer to: `Virtual "
-"switching technologies and Linux bridge. `_"
-msgstr ""
-"Untuk informasi tambahan menjelaskan masalah ini, lihat: `Virtual switching "
-"technologies and Linux bridge. `_"
-
-msgid ""
-"For basic configuration of prerequisites, see the `Ocata Install Tutorials "
-"and Guides `__."
-msgstr ""
-"Untuk konfigurasi dasar dari prasyarat, lihat `Ocata Install Tutorials and "
-"Guides `__."
-
-msgid ""
-"For best performance, 10+ Gbps physical network infrastructure should "
-"support jumbo frames."
-msgstr ""
-"Untuk performa terbaik, 10+ Gbps infrastruktur jaringan fisik harus "
-"mendukung frame jumbo."
-
-msgid ""
-"For each router, add one self-service subnet as an interface on the router."
-msgstr ""
-"Untuk setiap router, tambahkan satu subnet self-service sebagai interface "
-"pada router."
-
-msgid ""
-"For example, Linux provides namespaces for networking and processes, among "
-"other things. If a process is running within a process namespace, it can "
-"only see and communicate with other processes in the same namespace. So, if "
-"a shell in a particular process namespace ran :command:`ps waux`, it would "
-"only show the other processes in the same namespace."
-msgstr ""
-"Misalnya, Linux menyediakan namespace untuk jaringan dan proses. Jika suatu "
-"proses sedang berjalan dalam namespace proses, proses hanya bisa melihat dan "
-"berkomunikasi dengan proses lainnya dalam namespace yang sama. Jadi, jika "
-"suatu shell dimana proses namespace tertentu menjalankan :command:`ps waux`, "
-"proses hanya akan menunjukkan proses lain dalam namespace yang sama."
-
-msgid "For example, add flow classifier ``FC2`` to port chain ``PC1``:"
-msgstr ""
-"Misalnya, tambahkan klassifier aliran ``FC2`` untuk rantai port ``PC1``:"
-
-msgid "For example, add port pair group ``PPG3`` to port chain ``PC1``:"
-msgstr ""
-"Misalnya, tambahkan grup pasangan port ``PPG3`` untuk rantai port ``PC1``:"
-
-msgid "For example, consider the following components:"
-msgstr "Sebagai contoh, pertimbangkan komponen-komponen berikut:"
-
-msgid ""
-"For example, referencing a 4000-byte MTU for ``provider2``, a 1500-byte MTU "
-"for ``provider3``, and a 9000-byte MTU for other networks using the Open "
-"vSwitch agent:"
-msgstr ""
-"Misalnya, referensi 4000-byte MTU untuk ``provider2``, 1500-byte MTU untuk "
-"``provider3``, dan 9000-byte MTU untuk jaringan lain menggunakan agen Open "
-"vSwitch:"
-
-msgid ""
-"For example, referencing a 4000-byte MTU for overlay networks and a 9000-"
-"byte MTU for other networks:"
-msgstr ""
-"Sebagai contoh, referensi 4000-byte MTU untuk jaringan overlay dan 9000-byte "
-"MTU untuk jaringan lain:"
-
-msgid ""
-"For example, referencing an underlying physical network with a 9000-byte MTU:"
-msgstr ""
-"Sebagai contoh, referensi jaringan fisik yang mendasari dengan MTU 9000-byte:"
-
-msgid "For example, to match any domain, bus 0a, slot 00, and all functions:"
-msgstr ""
-"Misalnya, untuk mencocokkan domain apapun, bus 0a, slot 00, dan semua fungsi:"
-
-msgid ""
-"For further information, see `v1 configuration guide `_ or `v2 configuration "
-"guide `_."
-msgstr ""
-"Untuk informasi lebih lanjut, lihat `v1 configuration guide `_ atau `v2 "
-"configuration guide `_."
-
-msgid ""
-"For general configuration, see the `Configuration Reference `_."
-msgstr ""
-"Untuk konfigurasi umum, lihat `Configuration Reference `_."
-
-msgid ""
-"For illustration purposes, the configuration examples typically reference "
-"the following IP address ranges:"
-msgstr ""
-"Untuk tujuan ilustrasi, contoh konfigurasi biasanya mereferensi rentang "
-"alamat IP berikut:"
-
-msgid ""
-"For information on **Mellanox SR-IOV Ethernet ConnectX-3/ConnectX-3 Pro "
-"cards**, see `Mellanox: How To Configure SR-IOV VFs `_."
-msgstr ""
-"Untuk informasi tentang **Mellanox SR-IOV Ethernet ConnectX-3/ConnectX-3 Pro "
-"cards**, lihat `Mellanox: How To Configure SR-IOV VFs `_."
-
-msgid ""
-"For information on **QLogic SR-IOV Ethernet cards**, see `User's Guide "
-"OpenStack Deployment with SR-IOV Configuration `_."
-msgstr ""
-"Untuk informasi tentang **QLogic SR-IOV Ethernet cards**, lihat `User's "
-"Guide OpenStack Deployment with SR-IOV Configuration `_."
-
-msgid ""
-"For instances with a fixed IPv4 address, the network node performs SNAT on "
-"north-south traffic passing from self-service to external networks such as "
-"the Internet. For instances with a fixed IPv6 address, the network node "
-"performs conventional routing of traffic between self-service and external "
-"networks."
-msgstr ""
-"Untuk instance dengan alamat IPv4 tetap, node jaringan melakukan SNAT pada "
-"lalu lintas utara-selatan melewati dari self-service untuk jaringan "
-"eksternal seperti Internet. Untuk instance dengan alamat IPv6 tetap, node "
-"jaringan melakukan routing yang konvensional lalu lintas antara self-service "
-"dan jaringan eksternal."
-
-msgid ""
-"For instances with a floating IPv4 address using a self-service network on a "
-"distributed router, the compute node containing the instance performs SNAT "
-"on north-south traffic passing from the instance to external networks such "
-"as the Internet and DNAT on north-south traffic passing from external "
-"networks to the instance. Floating IP addresses and NAT do not apply to "
-"IPv6. Thus, the network node routes IPv6 traffic in this scenario. north-"
-"south traffic passing between the instance and external networks such as the "
-"Internet."
-msgstr ""
-"Untuk instance dengan alamat IPv4 mengambang menggunakan jaringan self-"
-"service pada router terdistribusikan, komputasi node yang berisi instance "
-"melakukan SNAT pada lalu lintas utara-selatan lewat dari instance ke "
-"jaringan eksternal seperti Internet dan DNAT di lalu lintas utara-selatan "
-"yang lewat dari jaringan eksternal untuk instance. Alamat IP mengambang dan "
-"NAT tidak berlaku untuk IPv6. Dengan demikian, node jaringan me-rute lalu "
-"lintas IPv6 dalam skenario ini, lalu lintas utara-selatan yang melewati "
-"antara instance dan jaringan eksternal seperti Internet."
-
-msgid ""
-"For instances with a floating IPv4 address, maintains state of network "
-"connections during failover as a side effect of 1:1 static NAT. The "
-"mechanism does not actually implement connection tracking."
-msgstr ""
-"Untuk instance dengan alamat IPv4 mengambang, pertahankan keadaan koneksi "
-"jaringan selama failover sebagai efek samping dari NAT statis 1:1. "
-"Mekanisme tidak secara benar melaksanakan pelacakan koneksi."
-
-msgid ""
-"For instances with a floating IPv4 address, the network node performs SNAT "
-"on north-south traffic passing from the instance to external networks such "
-"as the Internet and DNAT on north-south traffic passing from external "
-"networks to the instance. Floating IP addresses and NAT do not apply to "
-"IPv6. Thus, the network node routes IPv6 traffic in this scenario."
-msgstr ""
-"Untuk instance dengan alamat IPv4 mengambang, node jaringan melakukan SNAT "
-"pada lalu lintas utara-selatan lewat dari instance ke jaringan eksternal "
-"seperti Internet dan DNAT pada lalu lintas utara-selatan lewat dari jaringan "
-"eksternal untuk instance. Alamat IP mengambang dan NAT tidak berlaku untuk "
-"IPv6. Dengan demikian, node jaringan me-rute lalu lintas IPv6 dalam skenario "
-"ini."
-
-msgid ""
-"For more details, see `Networking command-line client `_ in the OpenStack Command-Line "
-"Interface Reference."
-msgstr ""
-"Untuk lebih jelasnya, lihat `Networking command-line client `_ dalam OpenStack Command-Line "
-"Interface Reference."
-
-msgid ""
-"For more details, see the `Configuration Reference `__."
-msgstr ""
-"Untuk lebih jelasnya, lihat `Configuration Reference `__."
-
-msgid ""
-"For more details, see the `Networking configuration options `__ of Configuration "
-"Reference."
-msgstr ""
-"Untuk lebih jelasnya, lihat `Networking configuration options `__ dari "
-"Configuration Reference."
-
-msgid ""
-"For more details, see the related section in the `Configuration Reference "
-"`__."
-msgstr ""
-"Untuk lebih jelasnya, lihat bagian terkait di `Configuration Reference "
-"`__."
-
-msgid ""
-"For more information about the syntax for ``hw:mem_page_size``, refer to the "
-"`Flavors `__ "
-"guide."
-msgstr ""
-"Untuk informasi lebih lanjut tentang sintaks untuk ``hw:mem_page_size``, "
-"mengacu kepada panduan `Flavors `__."
-
-msgid "For more information see the :ref:`config-sriov`."
-msgstr "Untuk informasi lebih lanjut lihat :ref:`config-sriov`."
-
-msgid ""
-"For networked software applications to communicate over an IP network, they "
-"must use a protocol layered atop IP. These protocols occupy the fourth layer "
-"of the OSI model known as the *transport layer* or *layer 4*. See the "
-"`Protocol Numbers `_ web page maintained by the Internet Assigned Numbers "
-"Authority (IANA) for a list of protocols that layer atop IP and their "
-"associated numbers."
-msgstr ""
-"Untuk aplikasi perangkat lunak jaringan yang berkomunikasi melalui jaringan "
-"IP, mereka harus menggunakan protokol yang berlapisan atop IP. Protokol ini "
-"menempati lapisan keempat dari model OSI yang dikenal sebagai *transport "
-"layer* atau *layer 4*. Lihat halaman web `Protocol Numbers `_ yang dikelola "
-"oleh Internet Assigned Numbers Authority (IANA) untuk daftar protokol yang "
-"berlapisan atop IP dan nomor yang terkait."
-
-msgid ""
-"For production deployments, we recommend at least three network nodes with "
-"sufficient resources to handle network traffic for the entire environment if "
-"one network node fails. Also, the remaining two nodes can continue to "
-"provide redundancy."
-msgstr ""
-"Untuk pengerahan produksi, kami sarankan setidaknya tiga node jaringan "
-"dengan sumber daya yang cukup untuk menangani lalu lintas jaringan untuk "
-"seluruh lingkungan jika salah satu simpul jaringan gagal. Disamping itu, dua "
-"node yang tersisa dapat terus memberikan redundansi."
-
-msgid ""
-"For the above two attributes to be effective, ``enable_dhcp`` of the subnet "
-"object must be set to True."
-msgstr ""
-"Untuk dua atribut di atas supaya menjadi efektif, ``enable_dhcp`` dari objek "
-"subnet harus diatur ke True."
-
-msgid ""
-"For typical underlying physical network architectures that implement a "
-"single MTU value, you can leverage jumbo frames using two options, one in "
-"the ``neutron.conf`` file and the other in the ``ml2_conf.ini`` file. Most "
-"environments should use this configuration."
-msgstr ""
-"Untuk arsitektur jaringan fisik yang mendasarinya yang khas yang menerapkan "
-"nilai MTU tunggal, Anda dapat memanfaatkan frame jumbo menggunakan dua "
-"pilihan, satu di file ``neutron.conf`` dan yang lainnya di file ``ml2_conf."
-"ini``. Kebanyakan lingkungan harus menggunakan konfigurasi ini."
-
-msgid ""
-"Forwarding DataBase (FDB) population is an L2 agent extension to OVS agent "
-"or Linux bridge. Its objective is to update the FDB table for existing "
-"instance using normal port. This enables communication between SR-IOV "
-"instances and normal instances. The use cases of the FDB population "
-"extension are:"
-msgstr ""
-"Populasi Forwarding DataBase (FDB) adalah L2 agent extension untuk OVS agent "
-"atau Linux bridge. Tujuannya adalah untuk memperbarui tabel FDB pada "
-"instance yang ada menggunakan port normal. Hal ini mengaktifkan komunikasi "
-"antara instance SR-IOV dan instance normal. Use case ekstensi populasi FDB "
-"adalah:"
-
-msgid ""
-"From the Liberty release onwards, OpenStack Networking supports IPv6 prefix "
-"delegation. This section describes the configuration and workflow steps "
-"necessary to use IPv6 prefix delegation to provide automatic allocation of "
-"subnet CIDRs. This allows you as the OpenStack administrator to rely on an "
-"external (to the OpenStack Networking service) DHCPv6 server to manage your "
-"project network prefixes."
-msgstr ""
-"Dari rilis Liberty dan seterusnya, OpenStack Networking mendukung IPv6 "
-"prefix delegation. Bagian ini menjelaskan langkah konfigurasi dan alur kerja "
-"yang diperlukan untuk menggunakan IPv6 prefix delegation untuk memberikan "
-"alokasi otomatis subnet CIDR. Hal ini memungkinkan Anda sebagai "
-"administrator OpenStack mengandalkan server DHCPv6 eksternal (ke layanan "
-"OpenStack Networking) untuk mengelola prefix jaringan proyek Anda."
-
-msgid "Function: Firewall"
-msgstr "Function: Firewall"
-
-msgid "Function: Intrusion detection system (IDS)"
-msgstr "Function: Intrusion detection system (IDS)"
-
-msgid ""
-"Fundamentally, SFC routes packets through one or more service functions "
-"instead of conventional routing that routes packets using destination IP "
-"address. Service functions essentially emulate a series of physical network "
-"devices with cables linking them together."
-msgstr ""
-"Pada dasarnya, SFC me-rute paket melalui satu atau lebih fungsi pelayanan "
-"bukan routing konvensional yang me-rute paket menggunakan alamat IP tujuan. "
-"Fungsi layanan pada dasarnya meniru serangkaian perangkat jaringan fisik "
-"dengan kabel yang menghubungkan mereka bersama-sama."
-
-msgid "Future support"
-msgstr "Dukungan di masa depan"
-
-msgid "GRE"
-msgstr "GRE"
-
-msgid "GRE and VXLAN"
-msgstr "GRE dan VXLAN"
-
-msgid "Gateway"
-msgstr "Gateway (gerbang)"
-
-msgid "Gateway (via physical network infrastructure)"
-msgstr "Gateway (melalui infrastruktur jaringan fisik)"
-
-msgid "Gateway on the provider network"
-msgstr "Gateway jaringan provider"
-
-msgid "Generic routing encapsulation (GRE)"
-msgstr "Generic routing encapsulation (GRE)"
-
-msgid ""
-"Generic routing encapsulation (GRE) is a protocol that runs over IP and is "
-"employed when delivery and payload protocols are compatible but payload "
-"addresses are incompatible. For instance, a payload might think it is "
-"running on a datalink layer but it is actually running over a transport "
-"layer using datagram protocol over IP. GRE creates a private point-to-point "
-"connection and works by encapsulating a payload. GRE is a foundation "
-"protocol for other tunnel protocols but the GRE tunnels provide only weak "
-"authentication."
-msgstr ""
-"Generic routing encapsulation (GRE) adalah protokol yang berjalan di atas IP "
-"dan digunakan saat pengiriman dan protokol payload yang kompatibel tetapi "
-"alamat payload tidak kompatibel. Misalnya, payload mungkin diperlirakan akan "
-"berjalan pada lapisan datalink tetapi sebenarnya berjalan di atas lapisan "
-"transport menggunakan protokol datagram over IP. GRE membuat koneksi point-"
-"to-point private dan bekerja dengan encapsulating payload. GRE adalah "
-"protokol dasar untuk protokol terowongan (terowongan) lain tetapi terowongan "
-"GRE hanya menyediakan otentikasi lemah."
-
-msgid "Get Me A Network"
-msgstr "Get Me A Network (dapatkan saya jaringan)"
-
-msgid "Get Network IP address availability for all IPv4 networks:"
-msgstr "Dapatkan ketersediaan alamat IP jaringan untuk semua jaringan IPv4:"
-
-msgid "Get Network IP address availability for all IPv6 networks:"
-msgstr "Dapatkan ketersediaan alamat IP jaringan untuk semua jaringan IPv6:"
-
-msgid "Get Network IP address availability statistics for a specific network:"
-msgstr ""
-"Dapatkan statistik ketersediaan alamat IP jaringan untuk jaringan tertentu:"
-
-msgid "Get list of resources with ``not-tags-any`` filter:"
-msgstr "Dapatkan daftar sumber daya dengan penyaring ``not-tags-any``:"
-
-msgid "Get list of resources with ``not-tags`` filter:"
-msgstr "Dapatkan daftar sumber daya dengan penyaring ``not-tags```:"
-
-msgid "Get list of resources with ``tags-any`` filter:"
-msgstr "Dapatkan daftar sumber daya dengan penyaring ``tag-any``:"
-
-msgid "Get list of resources with ``tags`` filter:"
-msgstr "Dapatkan daftar sumber daya dengan penyaring ``tags``:"
-
-msgid ""
-"Get list of resources with tag filters from networks. The networks are: test-"
-"net1 with \"red\" tag, test-net2 with \"red\" and \"blue\" tags, test-net3 "
-"with \"red\", \"blue\", and \"green\" tags, and test-net4 with \"green\" tag."
-msgstr ""
-"Dapatkan daftar sumber daya dengan filter tag dari jaringan. Jaringan "
-"menjadi are: test-net1 dengan tag \"red\", test-net2 dengan tag \"red\" and "
-"\"blue\", test-net3 dengan tag \"red\", \"blue\", dan \"green\", dan test-"
-"net4 dengan tag \"green\"."
-
-msgid ""
-"Get the ``id`` of the network where you want the SR-IOV port to be created:"
-msgstr ""
-"Dapatkan ``id`` dari jaringan dimana Anda ingin port SR-IOV yang akan dibuat:"
-
-msgid ""
-"Guest instance obtains IPv6 address from OpenStack managed radvd using SLAAC "
-"and optional info from dnsmasq using DHCPv6."
-msgstr ""
-"Guest instance memperoleh alamat IPv6 dari OpenStack dikelola dengan radvd "
-"menggunakan SLAAC dan info opsional dari dnsmasq menggunakan DHCPv6."
-
-msgid ""
-"Guest instance obtains IPv6 address from OpenStack managed radvd using SLAAC."
-msgstr ""
-"Guest instance memperoleh alamat IPv6 dari OpenStack dikelola dengan radvd "
-"menggunakan SLAAC."
-
-msgid ""
-"Guest instance obtains IPv6 address from dnsmasq using DHCPv6 stateful and "
-"optional info from dnsmasq using DHCPv6."
-msgstr ""
-"Guest instance memperoleh alamat IPv6 dari dnsmasq menggunakan DHCPv6 "
-"stateful dan info opsional dari dnsmasq menggunakan DHCPv6."
-
-msgid ""
-"Guest instance obtains IPv6 address from non-OpenStack router using SLAAC."
-msgstr ""
-"Guest instance memperoleh alamat IPv6 dari router non-OpenStack menggunakan "
-"SLAAC."
-
-msgid "HA of DHCP agents"
-msgstr "HA untuk agen DHCP"
-
-msgid ""
-"HA routers are created on availability zones you selected when creating the "
-"router."
-msgstr ""
-"HA router diciptakan pada zona ketersediaan dimana Anda pilih pada saat "
-"pembuatan router."
-
-msgid "Handles metadata, etc."
-msgstr "Tangani metadata, dll"
-
-msgid "Handles physical-virtual network transition"
-msgstr "Tangani transisi jaringan physical-virtual"
-
-msgid "Health monitor"
-msgstr "Health monitor"
-
-msgid "Here is an example of output from :command:`ip route show`:"
-msgstr "Berikut adalah contoh output dari :command:`ip route show`:"
-
-msgid "Here is the router we have used in our demonstration:"
-msgstr "Berikut adalah router yang telah kami digunakan di demonstrasi kami:"
-
-msgid "High availability"
-msgstr "Ketersediaan tinggi"
-
-msgid "High-availability features"
-msgstr "Fitur high-availability"
-
-msgid "High-availability for DHCP"
-msgstr "Ketersediaan tinggi untuk DHCP"
-
-msgid "Horizon support"
-msgstr "Dukungan Horizon"
-
-msgid "Host"
-msgstr "Host"
-
-msgid "Host *A* then sends Ethernet frames to host *B*."
-msgstr "Host *A* kemudian mengirimkan frame Ethernet ke host *B*."
-
-msgid "Host *B* responds with a response like this:"
-msgstr "Host *B* merespon dengan respon seperti ini:"
-
-msgid "HostA"
-msgstr "HostA"
-
-msgid "HostB"
-msgstr "HostB"
-
-msgid ""
-"Hosts connected to a network use the :term:`Dynamic Host Configuration "
-"Protocol (DHCP)` to dynamically obtain IP addresses. A DHCP server hands out "
-"the IP addresses to network hosts, which are the DHCP clients."
-msgstr ""
-"Host terhubung ke jaringan menggunakan :term:`Dynamic Host Configuration "
-"Protocol (DHCP)` untuk mendapatkan alamat IP secara dinamis. Sebuah server "
-"DHCP memberi alamat IP untuk jaringan host, yang merupakan klien DHCP."
-
-msgid "How the 'shared' flag relates to these entries"
-msgstr "Bagaimana 'shared' flag berkaitan dengan entri ini"
-
-msgid "How they work"
-msgstr "Bagaimana mereka bekerja"
-
-msgid ""
-"How those instances communicate across a router to other subnets or the "
-"internet."
-msgstr ""
-"Bagaimana instance ini berkomunikasi melewati router ke subnet lain atau "
-"internet."
-
-msgid "How those instances interact with other OpenStack services."
-msgstr "Bagaimana instance ini berinteraksi dengan layanan OpenStack lainnya."
-
-msgid "How those instances receive an IPv6 address."
-msgstr "Bagaimana contoh menerima alamat IPv6."
-
-msgid "How to disable libvirt networks"
-msgstr "Cara menonaktifkan jaringan libvirt"
-
-msgid "How to enable dual-stack (IPv4 and IPv6 enabled) instances."
-msgstr "Cara mengaktifkan instance dual-stack (IPv4 dan IPv6 diaktifkan)."
-
-msgid ""
-"However, libvirt is capable of providing networking services to the virtual "
-"machines that it manages. In particular, libvirt can be configured to "
-"provide networking functionality akin to a simplified, single-node version "
-"of OpenStack. Users can use libvirt to create layer 2 networks that are "
-"similar to OpenStack Networking's networks, confined to a single node."
-msgstr ""
-"Namun, libvirt mampu memberikan layanan jaringan untuk mesin virtual yang "
-"mengelolanya.. Secara khusus, libvirt dapat dikonfigurasi untuk menyediakan "
-"fungsionalitas jaringan mirip dengan jaringan sederhana, versi single-node "
-"dari OpenStack. Pengguna dapat menggunakan libvirt untuk membuat jaringan "
-"layer 2 yang mirip dengan jaringan OpenStack Networking's ini, terbatas pada "
-"node tunggal."
-
-msgid ""
-"However, load balancers deployed onto private or isolated networks need a "
-"floating IP address assigned if they must be accessible to external clients. "
-"To complete this step, you must have a router between the private and public "
-"networks and an available floating IP address."
-msgstr ""
-"Namun, balancers beban yang dikerahkan ke jaringan private atau jaringan "
-"terisolasi membutuhkan alamat IP mengambang yang ditetapkan jika mereka "
-"harus dapat diakses oleh klien eksternal. Untuk menyelesaikan langkah ini, "
-"Anda harus memiliki router antara jaringan private dan publik dan alamat IP "
-"mengambang yang tersedia."
-
-msgid ""
-"IANA maintains a `registry of port numbers `_ for many TCP-"
-"based services, as well as services that use other layer 4 protocols that "
-"employ ports. Registering a TCP port number is not required, but registering "
-"a port number is helpful to avoid collisions with other services. See "
-"`firewalls and default ports `_ in OpenStack Administrator Guide for the "
-"default TCP ports used by various services involved in an OpenStack "
-"deployment."
-msgstr ""
-"IANA memelihara `registry of port numbers `_ untuk "
-"berbagai layanan berbasis TCP, serta layanan yang menggunakan protokol "
-"lapisan 4 lain yang mempekerjakan port. Pendaftaran nomor port TCP tidak "
-"diperlukan, tetapi pendaftaran nomor port adalah membantu untuk menghindari "
-"tabrakan dengan layanan lainnya. Lihat `firewalls and default ports `_ dalam "
-"OpenStack Administrator Guide untuk port TCP default yang digunakan oleh "
-"berbagai layanan yang terlibat dalam pengerahan OpenStack."
-
-msgid "IP"
-msgstr "IP"
-
-msgid "IP address range 192.0.2.0/24 and fd00:192:0:2::/64"
-msgstr "Rentang alamat IP 192.0.2.0/24 dan fd00:192:0:2::/64"
-
-msgid "IP address ranges 203.0.113.0/24 and fd00:203:0:113::/64"
-msgstr "Rentang alamat IP 203.0.113.0/24 dan fd00:203:0:113::/64"
-
-msgid "IP addresses 192.0.2.1 and fd00:192:0:2::1"
-msgstr "Alamat IP 192.0.2.1 dan fd00:192:0:2::1"
-
-msgid "IP addresses 192.0.2.101 and fd00:192:0:2:0::101"
-msgstr "Alamat IP 192.0.2.101 dan fd00:192:0:2:0::101"
-
-msgid "IP addresses 203.0.113.1 and fd00:203:0:113:0::1"
-msgstr "Alamat IP 203.0.113.1 dan fd00:203:0:113:0::1"
-
-msgid "IP addresses 203.0.113.101 and fd00:203:0:113:0::101"
-msgstr "Alamat IP 203.0.113.101 dan fd00:203:0:113:0::101"
-
-msgid ""
-"IP addresses are broken up into two parts: a *network number* and a *host "
-"identifier*. Two hosts are on the same *subnet* if they have the same "
-"network number. Recall that two hosts can only communicate directly over "
-"Ethernet if they are on the same local network. ARP assumes that all "
-"machines that are in the same subnet are on the same local network. Network "
-"administrators must take care when assigning IP addresses and netmasks to "
-"hosts so that any two hosts that are in the same subnet are on the same "
-"local network, otherwise ARP does not work properly."
-msgstr ""
-"Alamat IP yang dipecah menjadi dua bagian: *network number* dan *host "
-"identifier*. Dua host pada *subnet* yang sama jika mereka memiliki jumlah "
-"jaringan yang sama. Ingat bahwa dua host hanya dapat berkomunikasi secara "
-"langsung melintasi Ethernet jika mereka berada di jaringan lokal yang sama. "
-"ARP mengasumsikan bahwa semua mesin yang berada di subnet yang sama berada "
-"di jaringan lokal yang sama. Administrator jaringan harus berhati-hati "
-"ketika menetapkan alamat IP dan netmask ke host sehingga setiap dua host "
-"yang berada di subnet yang sama berada di jaringan lokal yang sama, jika "
-"tidak seperti itu ARP tidak bekerja dengan benar."
-
-msgid "IP availability metrics"
-msgstr "Metrik ketersediaan IP"
-
-msgid "IPAM configuration"
-msgstr "Konfigurasi IPAM"
-
-msgid ""
-"IPv4 self-service networks typically use private IP address ranges (RFC1918) "
-"and interact with provider networks via source NAT on virtual routers. "
-"Floating IP addresses enable access to instances from provider networks via "
-"destination NAT on virtual routers. IPv6 self-service networks always use "
-"public IP address ranges and interact with provider networks via virtual "
-"routers with static routes."
-msgstr ""
-"Jaringan self-service IPv4 biasanya menggunakan rentang alamat IP private "
-"(RFC1918) dan berinteraksi dengan jaringan provider melalui sumber NAT pada "
-"router virtual. Alamat IP mengambang mengaktifkan akses ke instance dari "
-"jaringan provider via NAT tujuan pada router virtual. IPv6 jaringan self-"
-"service selalu menggunakan rentang alamat IP publik dan berinteraksi dengan "
-"jaringan provider melalui router virtual dengan rute statis."
-
-msgid "IPv4: 192.0.2.0/24"
-msgstr "IPv4: 192.0.2.0/24"
-
-msgid "IPv4: 203.0.113.0/24"
-msgstr "IPv4: 203.0.113.0/24"
-
-msgid "IPv6"
-msgstr "IPv6"
-
-msgid "IPv6 addressing"
-msgstr "Alamat IPv6"
-
-msgid ""
-"IPv6 connectivity with self-service networks often requires addition of "
-"static routes to nodes and physical network infrastructure."
-msgstr ""
-"Konektivitas IPv6 dengan jaringan self-service sering membutuhkan penambahan "
-"rute statis untuk node dan infrastruktur jaringan fisik."
-
-msgid ""
-"IPv6 does work when the Distributed Virtual Router functionality is enabled, "
-"but all ingress/egress traffic is via the centralized router (hence, not "
-"distributed). More work is required to fully enable this functionality."
-msgstr ""
-"IPv6 tidak bekerja ketika fungsi Distributed Virtual Router diaktifkan, "
-"tetapi semua lalu lintas masuk/keluar (ingress/egress) adalah melalui router "
-"terpusat (oleh karena itu, tidak didistribusikan). Banyak pekerjaan "
-"diperlukan untuk sepenuhnya mengaktifkan fungsi ini."
-
-msgid "IPv6 multicast"
-msgstr "IPv6 multicast"
-
-msgid "IPv6 since OpenStack Networking has no IPv6 floating IPs."
-msgstr "IPv6 sejak OpenStack Networking tidak mempunyai IPv6 floating IP."
-
-msgid ""
-"IPv6 support in conjunction with any out of tree routers, switches, services "
-"or agents whether in physical or virtual form factors."
-msgstr ""
-"IPv6 mendukungan dalam hubungannya dengan keluar dari tree router, switch, "
-"layanan atau agen baik dalam faktor bentuk fisik atau virtual."
-
-msgid ""
-"IPv6 supports three different addressing schemes for address configuration "
-"and for providing optional network information."
-msgstr ""
-"IPv6 mendukung tiga skema pengalamatan yang berbeda untuk konfigurasi alamat "
-"dan untuk menyediakan informasi jaringan opsional."
-
-msgid "IPv6: fd00:192:0:2::/64"
-msgstr "IPv6: fd00:192:0:2::/64"
-
-msgid "IPv6: fd00:203:0:113::/64"
-msgstr "IPv6: fd00:203:0:113::/64"
-
-msgid ""
-"If Dashboard is configured to compress static files for better performance "
-"(usually set through ``COMPRESS_OFFLINE`` in ``local_settings.py``), "
-"optimize the static files again:"
-msgstr ""
-"Jika Dashboard dikonfigurasi untuk kompres file statis untuk kinerja yang "
-"lebih baik (biasanya diatur melalui ``COMPRESS_OFFLINE`` dalam "
-"``local_settings.py``), lakukan optimalisasi file statis lagi:"
-
-msgid ""
-"If ``ebrctl`` does not appear in any of the rootwrap files, add this to the "
-"``/etc/nova/rootwrap.d/compute.filters`` file in the ``[Filters]`` section."
-msgstr ""
-"JIka ``ebrctl`` tidak muncul dalam salah satu file rootwrap, tambahkan ini "
-"ke file ``/etc/nova/rootwrap.d/compute.filters`` dalam bagian ``[Filters]``."
-
-msgid ""
-"If ``keepalived`` on the backup router stops receiving *heartbeat* packets, "
-"it assumes failure of the master router and promotes the backup router to "
-"master router by configuring IP addresses on the interfaces in the "
-"``qrouter`` namespace. In environments with more than one backup router, "
-"``keepalived`` on the backup router with the next highest priority promotes "
-"that backup router to master router."
-msgstr ""
-"If ``keepalived`` on the backup router stops receiving *heartbeat* packets, "
-"it assumes failure of the master router and promotes the backup router to "
-"master router by configuring IP addresses on the interfaces in the "
-"``qrouter`` namespace. In environments with more than one backup router, "
-"``keepalived`` on the backup router with the next highest priority promotes "
-"that backup router to master router."
-
-msgid ""
-"If a network is marked as external during creation, it now implicitly "
-"creates a wildcard RBAC policy granting everyone access to preserve previous "
-"behavior before this feature was added."
-msgstr ""
-"Jika jaringan ditandai sebagai eksternal selama pembuatan, sekarang secara "
-"implisit hal itu menciptakan kebijakan RBAC wildcard memberikan akses setiap "
-"orang untuk menjaga tindakan sebelumnya sebelum fitur ini ditambahkan."
-
-msgid ""
-"If a service function involves a pair of ports, the first port acts as the "
-"ingress port of the service function and the second port acts as the egress "
-"port. If both ports use the same value, they function as a single virtual "
-"bidirectional port."
-msgstr ""
-"Jika fungsi pelayanan melibatkan sepasang port, port pertama bertindak "
-"sebagai ingress port fungsi pelayanan dan port kedua bertindak sebagai port "
-"egress. Jika kedua port menggunakan nilai yang sama, mereka berfungsi "
-"sebagai port bidirectional virtual tunggal."
-
-msgid ""
-"If all routers lose connectivity simultaneously, the process of selecting a "
-"new master router will be repeated in a round-robin fashion until one or "
-"more routers have their connectivity restored."
-msgstr ""
-"Jika semua router kehilangan konektivitas secara bersamaan, proses pemilihan "
-"router induk baru akan diulang secara round-robin sampai satu atau lebih "
-"router telah dipulihkan konektivitas mereka."
-
-msgid ""
-"If an operator wants to prevent normal users from doing this, the ``"
-"\"create_rbac_policy\":`` entry in ``policy.json`` can be adjusted from ``"
-"\"\"`` to ``\"rule:admin_only\"``."
-msgstr ""
-"Jika operator ingin mencegah pengguna biasa melakukan ini, ``"
-"\"create_rbac_policy\":`` entri dalam ``policy.json`` dapat disesuaikan "
-"dari ``\"\"`` menjadi ``\"rule:admin_only\"``."
-
-msgid ""
-"If installing from source, you must configure a daemon file for the init "
-"system manually."
-msgstr ""
-"Jika menginstal dari source, Anda harus mengkonfigurasi file daemon untuk "
-"sistem init secara manual."
-
-msgid "If necessary, :ref:`configure MTU `."
-msgstr "Jika diperlukan, :ref:`configure MTU `."
-
-msgid ""
-"If projects are trusted to administrate their own QoS policies in your "
-"cloud, neutron's file ``policy.json`` can be modified to allow this."
-msgstr ""
-"Jika proyek dipercaya melakukan administrasi kebijakan QoS sendiri di cloud "
-"Anda, file ``policy.json`` milik neutron ini dapat dimodifikasi untuk "
-"memberi ini."
-
-msgid ""
-"If projects are trusted to create their own policies, check the trusted "
-"projects ``policy.json`` configuration section."
-msgstr ""
-"Jika proyek dipercaya untuk membuat kebijakan mereka sendiri, periksa bagian "
-"konfigurasi ``policy.json`` proyek dipercaya."
-
-msgid ""
-"If that project has ports on the network, the server will prevent the policy "
-"from being deleted until the ports have been deleted:"
-msgstr ""
-"Jika proyek memiliki port pada jaringan, server akan mencegah kebijakan dari "
-"penghapusan sampai port itu telah dihapus:"
-
-msgid ""
-"If that project has ports or networks with the QoS policy applied to them, "
-"the server will not delete the RBAC policy until the QoS policy is no longer "
-"in use:"
-msgstr ""
-"Jika proyek yang memiliki port atau jaringan dengan kebijakan QoS diterapkan "
-"kepada mereka, server tidak akan menghapus kebijakan RBAC sampai kebijakan "
-"QoS tidak lagi digunakan:"
-
-msgid ""
-"If that project has router gateway ports attached to that network, the "
-"server prevents the policy from being deleted until the ports have been "
-"deleted:"
-msgstr ""
-"Jika proyek yang memiliki port gerbang router terpasang ke jaringan itu, "
-"server akan mencegah kebijakan dari penghapusan sampai port telah dihapus:"
-
-msgid ""
-"If the DVR/SNAT backup router stops receiving these packets, it assumes "
-"failure of the master DVR/SNAT router and promotes itself to master router "
-"by configuring IP addresses on the interfaces in the ``snat`` namespace. In "
-"environments with more than one backup router, the rules of VRRP are "
-"followed to select a new master router."
-msgstr ""
-"Jika router backup DVR/SNAT berhenti menerima paket ini, hal ini "
-"mengasumsikan kegagalan router DVR/SNAT master dan mempromosikan diri untuk "
-"menguasai router dengan mengkonfigurasi alamat IP pada antarmuka dalam "
-"namespace ``snat``. Dalam lingkungan dengan lebih dari satu router cadangan, "
-"aturan VRRP diikuti untuk memilih router msater baru."
-
-msgid ""
-"If the Open vSwitch agent is being used, set ``extensions`` to ``qos`` in "
-"the ``[agent]`` section of ``/etc/neutron/plugins/ml2/openvswitch_agent."
-"ini``. For example:"
-msgstr ""
-"Jika agen Open vSwitch sedang digunakan, atur ``extensions`` ke ``qos`` "
-"dalam bagian ``[agent]`` dari ``/etc/neutron/plugins/ml2/openvswitch_agent."
-"ini``. Sebagai contoh:"
-
-msgid ""
-"If the OpenStack DNS service is the target external DNS, the ``[designate]`` "
-"section of ``/etc/neutron/neutron.conf`` must define the following "
-"parameters:"
-msgstr ""
-"Jika layanan OpenStack DNS adalah target DNS eksternal, bagian "
-"``[designate]`` dari ``/etc/neutron/neutron.conf`` harus mendefiniskan "
-"parameter berikut:"
-
-msgid ""
-"If the address scopes match between networks then pings and other traffic "
-"route directly through. If the scopes do not match between networks, the "
-"router either drops the traffic or applies NAT to cross scope boundaries."
-msgstr ""
-"Jika lingkup alamat ada kesesuaian antar jaringan maka ping dan rute lalu "
-"lintas lainnya akan melalui langsung. Jika lingkup tidak cocok antar "
-"jaringan, router menjatuhkan lalu lintas ataupun menerapkan NAT untuk "
-"melintasi batas ruang lingkup."
-
-msgid ""
-"If the device defined by the PCI address or ``devname`` corresponds to an SR-"
-"IOV PF, all VFs under the PF will match the entry. Multiple "
-"``pci_passthrough_whitelist`` entries per host are supported."
-msgstr ""
-"Jika perangkat didefinisikan oleh alamat PCI atau ``devname`` sesuai dengan "
-"SR-IOV PF, semua VF bawah PF akan cocok dengan entri. Beberapa entri "
-"``pci_passthrough_whitelist`` per host didukungnya."
-
-msgid ""
-"If the interfaces are down, set them to ``up`` before launching a guest, "
-"otherwise the instance will fail to spawn:"
-msgstr ""
-"Jika interface turun, aturlah mereka ke ``up`` sebelum meluncurkan guest "
-"(tamu), jika instance akan gagal berkembangan (spawn):"
-
-msgid ""
-"If the policy is shared, the project is able to attach or detach such policy "
-"from its own ports and networks."
-msgstr ""
-"Jika kebijakan ini bersama, proyek ini dapat melekatkan atau melepaskan "
-"kebijakan tersebut dari port dan jaringan itu sendiri."
-
-msgid "If the pool becomes exhausted, load some more prefixes:"
-msgstr "Jika kolam menjadi habis, muatlah beberapa awalan (prefix) yang lebih:"
-
-msgid ""
-"If the prefix delegation server is configured to delegate globally routable "
-"prefixes and setup routes, then any instance with a port on this subnet "
-"should now have external network access."
-msgstr ""
-"Jika server prefix delegation dikonfigurasi untuk mendelegasikan prefiks "
-"routable global dan mengatur rute, maka setiap instance dengan port pada "
-"subnet ini sekarang harus memiliki akses jaringan eksternal."
-
-msgid ""
-"If there is a default, it can be requested by passing ``--use-default-"
-"subnetpool`` instead of ``--subnet-pool SUBNETPOOL``."
-msgstr ""
-"Jika ada default, itu dapat diminta dengan melewati ``--use-default-"
-"subnetpool`` bukannya ``--subnet-pool SUBNETPOOL``."
-
-msgid ""
-"If two switches are to be connected together, and the switches are "
-"configured for VLANs, then the switchports used for cross-connecting the "
-"switches must be configured to allow Ethernet frames from any VLAN to be "
-"forwarded to the other switch. In addition, the sending switch must tag each "
-"Ethernet frame with the VLAN ID so that the receiving switch can ensure that "
-"only hosts on the matching VLAN are eligible to receive the frame."
-msgstr ""
-"Jika dua switch dihubungkan bersama-sama, dan switch dikonfigurasi untuk "
-"VLAN, maka switchport yang digunakan untuk cross-connecting switch harus "
-"dikonfigurasi untuk mengizinkan frame Ethernet dari VLAN apapun untuk "
-"diteruskan ke switch lainnya. Selain itu, switch pengirim harus menandai "
-"(tag) setiap frame Ethernet dengan ID VLAN sehingga switch penerima dapat "
-"memastikan bahwa hanya host pada VLAN yang cocok memenuhi syarat untuk "
-"menerima frame."
-
-msgid ""
-"If you are an admin, you can create a pool which can be accessed by any "
-"regular project. Being a shared resource, there is a quota mechanism to "
-"arbitrate access."
-msgstr ""
-"Jika Anda adalah seorang admin, Anda dapat membuat kolam yang dapat diakses "
-"oleh setiap proyek biasa. Menjadi sumber daya bersama, kolam mempunyai "
-"mekanisme kuota untuk menengahi konflik akses."
-
-msgid ""
-"If you are not using the default dibbler-based driver for prefix delegation, "
-"then you also need to set the driver in ``/etc/neutron/neutron.conf``:"
-msgstr ""
-"Jika Anda tidak menggunakan driver berbasis dibbler default untuk prefix "
-"delegation, maka Anda juga perlu mengatur driver dalam ``/etc/neutron/"
-"neutron.conf``:"
-
-msgid ""
-"If you have access to an OpenStack Kilo or later based neutron, you can play "
-"with this feature now. Give it a try. All of the following commands work "
-"equally as well with IPv6 addresses."
-msgstr ""
-"Jika Anda memiliki akses ke OpenStack Kilo atau later based neutron, Anda "
-"dapat bermain dengan fitur ini sekarang. Cobalah. Semua perintah berikut "
-"bekerja sama juga dengan alamat IPv6."
-
-msgid ""
-"If you have deployed LBaaS v1, **stop the LBaaS v1 agent now**. The v1 and "
-"v2 agents **cannot** run simultaneously."
-msgstr ""
-"Jika Anda telah mengerahkan LBaaS v1, **stop the LBaaS v1 agent now**. "
-"Agen v1 dan v2 **cannot** berjalan secara bersamaan."
-
-msgid ""
-"If you have existing service providers for other networking service plug-"
-"ins, such as VPNaaS or FWaaS, add the ``service_provider`` line shown above "
-"in the ``[service_providers]`` section as a separate line. These "
-"configuration directives are repeatable and are not comma-separated."
-msgstr ""
-"Jika Anda memiliki penyedia layanan yang ada untuk plug-in layanan jejaring "
-"lainnya, seperti VPNaaS atau FWaaS, tambahkan baris ``service_provider`` "
-"yang ditunjukkan di atas dalam bagian ``[service_providers] `` sebagai baris "
-"terpisah. Perintah konfigurasi ini berulang dan tidak dipisahkan koma."
-
-msgid ""
-"If you use the metadata service, removing the default egress rules denies "
-"access to TCP port 80 on 169.254.169.254, thus preventing instances from "
-"retrieving metadata."
-msgstr ""
-"Jika Anda menggunakan layanan metadata, penghapusan aturan default egress "
-"menolak akses ke TCP port 80 pada 169.254.169.254, sehingga hal itu dapat "
-"mencegah pengambilan metadata."
-
-msgid "Impact and limitations"
-msgstr "Dampak dan keterbatasan"
-
-msgid "Implement routing between segments."
-msgstr "Terapkan routing antara segmen."
-
-msgid ""
-"In *Destination Network Address Translation* (DNAT), the NAT router modifies "
-"the IP address of the destination in IP packet headers."
-msgstr ""
-"Dalam *Destination Network Address Translation* (DNAT), router NAT "
-"memodifikasi alamat IP tujuan ke dalam header paket IP."
-
-msgid ""
-"In *Source Network Address Translation* (SNAT), the NAT router modifies the "
-"IP address of the sender in IP packets. SNAT is commonly used to enable "
-"hosts with *private addresses* to communicate with servers on the public "
-"Internet."
-msgstr ""
-"Dalam *Source Network Address Translation* (SNAT), router NAT memodifikasi "
-"alamat IP dari pengirim di paket IP. SNAT umumnya digunakan untuk "
-"mengaktifkan host dengan *private addresses* untuk berkomunikasi dengan "
-"server di Internet publik."
-
-msgid ""
-"In *one-to-one NAT*, the NAT router maintains a one-to-one mapping between "
-"private IP addresses and public IP addresses. OpenStack uses one-to-one NAT "
-"to implement floating IP addresses."
-msgstr ""
-"Dalam *one-to-one NAT*, router NAT memelihara pemetaan one-to-one antara "
-"alamat IP private dan alamat IP publik. OpenStack menggunakan one-to-one NAT "
-"untuk menerapkan alamat IP mengambang."
-
-msgid ""
-"In :ref:`config-dns-use-case-1`, the externally accessible network must meet "
-"the following requirements:"
-msgstr ""
-"Dalam :ref:`config-dns-use-case-1`, jaringan yang dapat diakses secara "
-"eksternal harus memenuhi persyaratan sebagai berikut:"
-
-msgid ""
-"In Liberty and Mitaka, the IPAM implementation within OpenStack Networking "
-"provided a pluggable and non-pluggable flavor. As of Newton, the non-"
-"pluggable flavor is no longer available. Instead, it is completely replaced "
-"with a reference driver implementation of the pluggable framework. All data "
-"will be automatically migrated during the upgrade process, unless you have "
-"previously configured a pluggable IPAM driver. In that case, no migration is "
-"necessary."
-msgstr ""
-"Di Liberty dan Mitaka, pelaksanaan IPAM dalam OpenStack Networking tersedia "
-"flavor pluggable dan non-pluggable. Pada Newton, rasa non-pluggable tidak "
-"lagi tersedia. Sebaliknya, itu benar-benar diganti dengan implementasi "
-"driver referensi dari kerangka pluggable. Semua data akan dipindahkan secara "
-"otomatis selama proses upgrade, kecuali jika Anda sebelumnya telah "
-"dikonfigurasi driver IPAM pluggable. Dalam hal ini, tidak ada migrasi "
-"diperlukan."
-
-msgid ""
-"In ``/etc/neutron/plugins/ml2/ml2_conf.ini``, add ``qos`` to "
-"``extension_drivers`` in the ``[ml2]`` section. For example:"
-msgstr ""
-"Dalam ``/etc/neutron/plugins/ml2/ml2_conf.ini``, tambahkan ``qos`` ke "
-"``extension_drivers`` dalam bagian ``[ml2]``. Sebagi contoh:"
-
-msgid ""
-"In ``/etc/neutron/plugins/ml2/openvswitch_agent.ini``, add ``qos`` to the "
-"``extensions`` setting in the ``[agent]`` section. For example:"
-msgstr ""
-"Dalam ``/etc/neutron/plugins/ml2/openvswitch_agent.ini``, tambahkan ``qos`` "
-"ke pengaturan ``extensions`` dalam bagian ``[agent]``. Sebagai contoh:"
-
-msgid ""
-"In a deployment where the operator has set up the resources as described "
-"above, they can get their auto-allocated network topology as follows:"
-msgstr ""
-"Dalam penyebaran dimana operator telah menyiapkan sumber daya seperti "
-"dijelaskan di atas, mereka bisa mendapatkan topologi jaringan yang "
-"dialokasikan secara otomatis sebagai berikut:"
-
-msgid ""
-"In a network namespace, the scoped 'identifiers' are network devices; so a "
-"given network device, such as ``eth0``, exists in a particular namespace. "
-"Linux starts up with a default network namespace, so if your operating "
-"system does not do anything special, that is where all the network devices "
-"will be located. But it is also possible to create further non-default "
-"namespaces, and create new devices in those namespaces, or to move an "
-"existing device from one namespace to another."
-msgstr ""
-"Dalam namespace jaringan, scoped 'identifiers' adalah perangkat jaringan; "
-"sehingga perangkat jaringan tertentu, seperti ``eth0``, ada dalam namespace "
-"tertentu. Linux memulai dengan namespace jaringan default, jadi jika sistem "
-"operasi Anda tidak melakukan sesuatu yang istimewa, itulah namespace tempat "
-"semua perangkat jaringan akan berlokasi. Tetapi juga namespace itu mungkin "
-"membuat non-default namespace lebih lanjut, dan menciptakan perangkat baru "
-"pada namespace itu, atau memindahkan perangkat yang ada dari satu namespace "
-"ke namespace yang lain."
-
-msgid ""
-"In an Ethernet network, every host on the network can send a frame directly "
-"to every other host. An Ethernet network also supports broadcasts so that "
-"one host can send a frame to every host on the network by sending to the "
-"special MAC address ``ff:ff:ff:ff:ff:ff``. ARP_ and DHCP_ are two notable "
-"protocols that use Ethernet broadcasts. Because Ethernet networks support "
-"broadcasts, you will sometimes hear an Ethernet network referred to as a "
-"*broadcast domain*."
-msgstr ""
-"Dalam sebuah jaringan Ethernet, setiap host pada jaringan dapat mengirim "
-"frame langsung ke setiap host lainnya. Jaringan Ethernet juga mendukung "
-"siaran sehingga satu host dapat mengirim frame untuk setiap host pada "
-"jaringan dengan mengirimkan ke alamat MAC khusus ``ff:ff:ff:ff:ff:ff``. "
-"ARP_ dan DHCP_ adalah dua protokol penting yang menggunakan siaran Ethernet. "
-"Karena jaringan Ethernet mendukung siaran, Anda kadang-kadang akan mendengar "
-"jaringan Ethernet disebut sebagai *broadcast domain*."
-
-msgid ""
-"In an Ethernet network, the hosts connected to the network communicate by "
-"exchanging *frames*. Every host on an Ethernet network is uniquely "
-"identified by an address called the media access control (MAC) address. In "
-"particular, every virtual machine instance in an OpenStack environment has a "
-"unique MAC address, which is different from the MAC address of the compute "
-"host. A MAC address has 48 bits and is typically represented as a "
-"hexadecimal string, such as ``08:00:27:b9:88:74``. The MAC address is hard-"
-"coded into the NIC by the manufacturer, although modern NICs allow you to "
-"change the MAC address programmatically. In Linux, you can retrieve the MAC "
-"address of a NIC using the :command:`ip` command:"
-msgstr ""
-"Dalam sebuah jaringan Ethernet, host terhubung ke jaringan berkomunikasi "
-"dengan bertukar * frame *. Setiap host di jaringan Ethernet secara unik "
-"diidentifikasi oleh alamat yang disebut media access control (MAC). Secara "
-"khusus, setiap instance mesin virtual di lingkungan OpenStack memiliki "
-"alamat MAC yang unik, yang berbeda dari alamat MAC dari host komputasi. "
-"Sebuah alamat MAC memiliki 48 bit dan biasanya direpresentasikan sebagai "
-"string heksadesimal, seperti ``08:00:27:b9:88:74``. Alamat MAC adalah hard-"
-"coded ke dalam NIC oleh manufacturer, meskipun NIC modern memungkinkan Anda "
-"untuk mengubah alamat MAC secara pemrograman. Di Linux, Anda dapat mengambil "
-"alamat MAC dari NIC menggunakan perintah :command:`ip`"
-
-msgid ""
-"In deployments using DVR, the BGP speaker advertises floating IP addresses "
-"and self-service networks differently. For floating IP addresses, the BGP "
-"speaker advertises the floating IP agent gateway on the corresponding "
-"compute node as the next-hop IP address. For self-service networks using "
-"SNAT, the BGP speaker advertises the DVR SNAT node as the next-hop IP "
-"address."
-msgstr ""
-"Dalam pengerahan penggunaan DVR, BGP speaker menyiaran update dan perubahan "
-"alamat IP mengambang dan jaringan self-service yang berbeda. Untuk alamat IP "
-"mengambang, BGP speaker menyiaran update dan perubahan gateway agen IP "
-"mengambang pada node komputasi yang sesuai sebagai alamat IP next-hop. Untuk "
-"jaringan self-service menggunakan SNAT, BGP speaker menyiaran update dan "
-"perubahan node DVR SNAT sebagai alamat IP next-hop."
-
-msgid ""
-"In existing deployments, check the current database version using the "
-"following command:"
-msgstr ""
-"Dalam pengerahan yang ada, periksa versi database saat menggunakan perintah "
-"berikut:"
-
-msgid ""
-"In future releases, the Networking service may support setting tags for "
-"additional resources."
-msgstr ""
-"Dalam rilis mendatang, layanan Networking dapat mendukung pengaturan tag "
-"untuk sumber daya tambahan."
-
-msgid ""
-"In general, the OpenStack Networking software components that handle layer-3 "
-"operations impact performance and reliability the most. To improve "
-"performance and reliability, provider networks move layer-3 operations to "
-"the physical network infrastructure."
-msgstr ""
-"Secara umum, komponen software OpenStack Networking yang menangani operasi "
-"lapisan-3 berdampak secara nysta pada kinerja dan kehandalan. Untuk "
-"meningkatkan kinerja dan kehandalan, jaringan provider memindahkan operasi "
-"lapisan-3 untuk infrastruktur jaringan fisik."
-
-msgid ""
-"In many cases, operators who are already familiar with virtual networking "
-"architectures that rely on physical network infrastructure for layer-2, "
-"layer-3, or other services can seamlessly deploy the OpenStack Networking "
-"service. In particular, provider networks appeal to operators looking to "
-"migrate from the Compute networking service (nova-network) to the OpenStack "
-"Networking service. Over time, operators can build on this minimal "
-"architecture to enable more cloud networking features."
-msgstr ""
-"Dalam banyak kasus, operator yang sudah akrab dengan arsitektur jaringan "
-"virtual yang mengandalkan infrastruktur jaringan fisik untuk lapisan-2, "
-"lapisan-3, atau layanan lainnya dapat secara mulus mnegerahkan layanan "
-"OpenStack Networking. Secara khusus, jaringan provider menarik bagi operator "
-"yang ingin bermigrasi dari layanan jaringan Compute (nova-network) ke "
-"layanan OpenStack Networking. Seiring waktu, operator dapat membangun "
-"arsitektur minimal ini untuk mengaktifkan fitur jaringan cloud."
-
-msgid ""
-"In most cases, self-service networks use overlay protocols such as VXLAN or "
-"GRE because they can support many more networks than layer-2 segmentation "
-"using VLAN tagging (802.1q). Furthermore, VLANs typically require additional "
-"configuration of physical network infrastructure."
-msgstr ""
-"Dalam kebanyakan kasus, jaringan self-service menggunakan protokol overlay "
-"seperti VXLAN atau GRE karena mereka dapat mendukung lebih banyak jaringan "
-"dari segmentasi lapisan-2 menggunakan VLAN tagging (802.1q). Selanjutnya, "
-"VLAN biasanya membutuhkan konfigurasi infrastruktur jaringan fisik tambahan."
-
-msgid ""
-"In new deployments, you start with an empty database and then upgrade to the "
-"latest database version using the following command:"
-msgstr ""
-"Dalam pengerahan baru, Anda mulai dengan database kosong dan kemudian meng-"
-"upgrade ke versi terbaru database menggunakan perintah berikut:"
-
-msgid ""
-"In one particular use case, the OpenStack deployment resides in a mixed "
-"environment with conventional virtualization and bare-metal hosts that use a "
-"sizable physical network infrastructure. Applications that run inside the "
-"OpenStack deployment might require direct layer-2 access, typically using "
-"VLANs, to applications outside of the deployment."
-msgstr ""
-"Dalam satu kasus penggunaan tertentu, pengerahan OpenStack berada di "
-"lingkungan campuran conventional virtualization dengan bare-metal host yang "
-"menggunakan infrastruktur jaringan fisik yang cukup besar. Aplikasi yang "
-"berjalan di dalam penyebaran OpenStack mungkin memerlukan lapisan-2 akses "
-"langsung, biasanya menggunakan VLAN, untuk aplikasi di luar pengerahan."
-
-msgid ""
-"In order to attach a QoS policy to a network, update an existing network, or "
-"initially create the network attached to the policy."
-msgstr ""
-"Dalam rangka untuk melekatkan kebijakan QoS untuk jaringan, perbarui "
-"jaringan yang ada, atau buat secara awal jaringan yang melekat pada "
-"kebijakan."
-
-msgid ""
-"In order to detach a port from the QoS policy, simply update again the port "
-"configuration."
-msgstr ""
-"Dalam rangka untuk melepaskan port dari kebijakan QoS, hanya update lagi "
-"konfigurasi port."
-
-msgid "In order to enable SR-IOV, the following steps are required:"
-msgstr "Untuk mengaktifkan SR-IOV, langkah-langkah berikut ini diperlukan:"
-
-msgid ""
-"In order to support a wide range of deployment options, the migration "
-"process described here requires a rolling restart of hypervisors. The rate "
-"and timing of specific hypervisor restarts is under the control of the "
-"operator."
-msgstr ""
-"Dalam rangka mendukung berbagai opsi pengerahan, proses migrasi dijelaskan "
-"disini membutuhkan restart hypervisors rolling. Tingkat dan waktu restart "
-"hypervisor tertentu berada di bawah kendali operator."
-
-msgid ""
-"In order to support the widest range of deployer needs, the process "
-"described here is easy to automate but is not already automated. Deployers "
-"should expect to perform multiple manual steps or write some simple scripts "
-"in order to perform this migration."
-msgstr ""
-"Dalam rangka mendukung jangkauan terluas kebutuhan deployer, proses yang "
-"dijelaskan di sini adalah mudah untuk mengotomatisasi tetapi belum otomatis. "
-"Deployers harus berharap untuk melakukan beberapa langkah-langkah manual "
-"atau menulis beberapa script sederhana untuk melakukan migrasi ini."
-
-msgid ""
-"In the OSI model of networking protocols IP occupies the third layer, known "
-"as the network layer. When discussing IP, you will often hear terms such as "
-"*layer 3*, *L3*, and *network layer*."
-msgstr ""
-"Dalam model OSI, IP protokol jaringan menempati lapisan ketiga, yang dikenal "
-"sebagai lapisan jaringan. Ketika mendiskusikan IP, Anda akan sering "
-"mendengar istilah-istilah seperti *layer 3*, *L3*, dan *network layer*."
-
-msgid ""
-"In the `OSI model `_ of networking "
-"protocols, Ethernet occupies the second layer, which is known as the data "
-"link layer. When discussing Ethernet, you will often hear terms such as "
-"*local network*, *layer 2*, *L2*, *link layer* and *data link layer*."
-msgstr ""
-"Dalam `OSI model `_ untuk protokol "
-"jaringan, Ethernet menempati lapisan kedua, yang dikenal sebagai lapisan "
-"data link. Ketika membahas Ethernet, Anda akan sering mendengar istilah-"
-"istilah seperti *local network*, *layer 2*, *L2*, *link layer* dan *data "
-"link layer*."
-
-msgid ""
-"In the ``AGENT`` section of ``l3_agent.ini``, make sure the FWaaS extension "
-"is loaded:"
-msgstr ""
-"Dalam bagian ``AGENT`` dari ``l3_agent.ini``, pastikan ekstensi FWaaS dimuat:"
-
-msgid "In the ``bgp_dragent.ini`` file:"
-msgstr "Dalam file ``bgp_dragent.ini``:"
-
-msgid ""
-"In the ``dhcp_agent.ini`` file, configure one or more DNS resolvers. To "
-"configure more than one DNS resolver, use a comma between each value."
-msgstr ""
-"Dalam file ``dhcp_agent.ini``, lakukan konfigurasi satu atau lebih DNS "
-"resolvers. Untuk mengkonfigurasi lebih dari satu DNS resolver, gunakan koma "
-"di antara setiap nilai."
-
-msgid "In the ``dhcp_agent.ini`` file, configure the DHCP agent:"
-msgstr "Dalam file ``dhcp_agent.ini``, lakukan konfigurasi agen DHCP:"
-
-msgid ""
-"In the ``dhcp_agent.ini`` file, enable advertisement of the DNS resolver(s) "
-"on the host."
-msgstr ""
-"Dalam file ``dhcp_agent.ini``, memungkinkan penyiaran update dan perubahan "
-"DNS resolver pada host."
-
-msgid ""
-"In the ``l3_agent.ini`` file, configure the layer-3 agent to provide SNAT "
-"services."
-msgstr ""
-"Dalam file ``l3_agent.ini``, lakukan konfigurasi agen lapisan-3 untuk "
-"memberikan layanan SNAT"
-
-msgid "In the ``l3_agent.ini`` file, configure the layer-3 agent."
-msgstr "Dalam file ``l3_agent.ini``, lakukan konfigurasi layer-3 agent."
-
-msgid ""
-"In the ``linuxbridge_agent.ini`` file, configure the Linux bridge agent:"
-msgstr ""
-"Dalam file ``linuxbridge_agent.ini``, lakukan konfigurasi Linux bridge agent:"
-
-msgid "In the ``linuxbridge_agent.ini`` file, configure the layer-2 agent."
-msgstr ""
-"Dalam file ``linuxbridge_agent.ini``, lakukan konfigurasi layer-2 agent."
-
-msgid ""
-"In the ``linuxbridge_agent.ini`` file, enable VXLAN support including "
-"layer-2 population."
-msgstr ""
-"Dalam file ``linuxbridge_agent.ini`` , aktifkan dukungan VXLAN termasuk "
-"populasi lapisan-2."
-
-msgid "In the ``macvtap_agent.ini`` file, configure the layer-2 agent."
-msgstr "Dalam file ``macvtap_agent.ini``, lakukan konfigurasi agen layer-2."
-
-msgid "In the ``metadata_agent.ini`` file, configure the metadata agent:"
-msgstr "Dalam file ``metadata_agent.ini``, lakukan konfigurasi agen metadata:"
-
-msgid "In the ``ml2_conf.ini`` file:"
-msgstr "Dalam file ``ml2_conf.ini``:"
-
-msgid "In the ``neutron.conf`` file, configure common options:"
-msgstr "Dalam file ``neutron.conf``, lakukan konfigurasi opsi umum:"
-
-msgid ""
-"In the ``neutron.conf`` file, enable the conventional layer-3 and BGP "
-"dynamic routing service plug-ins:"
-msgstr ""
-"Dalam file ``neutron.conf``, mengaktifkan lapisan-3 konvensional dan BGP "
-"layanan routing dinamis plug-in:"
-
-msgid "In the ``neutron.conf`` file, enable the trunk service plug-in:"
-msgstr "Dalam file ``neutron.conf``, aktifkan trunk service plug-in:"
-
-msgid "In the ``neutron.conf`` file:"
-msgstr "Dalam file ``neutron.conf``:"
-
-msgid "In the ``openswitch_agent.ini`` file, enable distributed routing."
-msgstr "Dalam file ``openswitch_agent.ini``, aktifkan didistribusikan routing."
-
-msgid "In the ``openvswitch_agent.ini`` file, configure the OVS agent:"
-msgstr "Dalam file ``openvswitch_agent.ini``, lakukan konfigurasi agen OVS:"
-
-msgid "In the ``openvswitch_agent.ini`` file, configure the layer-2 agent."
-msgstr ""
-"Dalam file ``openvswitch_agent.ini``, lakukan konfigurasi layer-2 agent."
-
-msgid ""
-"In the ``openvswitch_agent.ini`` file, enable VXLAN support including "
-"layer-2 population."
-msgstr ""
-"Dalam file ``openvswitch_agent.ini``, aktifkan dukungan VXLAN termasuk "
-"layer-2 population."
-
-msgid "In the ``openvswitch_agent.ini`` file:"
-msgstr "Dalam file ``openvswitch_agent.ini``:"
-
-msgid ""
-"In the above configuration, we use ``dhcp_agents_per_network = 1`` for this "
-"demonstration. In usual deployments, we suggest setting "
-"``dhcp_agents_per_network`` to more than one to match the number of DHCP "
-"agents in your deployment. See :ref:`conf-dhcp-agents-per-network`."
-msgstr ""
-"Dalam konfigurasi di atas, kita menggunakan ``dhcp_agents_per_network = 1`` "
-"untuk demonstrasi ini. Dalam pengerahan biasa, kami sarankan pengaturan "
-"``dhcp_agents_per_network`` untuk lebih dari satu untuk mencocokkan jumlah "
-"agen DHCP dalam pengerahan Anda. Lihat :ref:`conf-dhcp-agents-per-network`."
-
-msgid "In the above example notice that:"
-msgstr "Dalam contoh di atas perhatikan bahwa:"
-
-msgid ""
-"In the absence of an upstream RA support, ``ipv6_gateway`` flag can be set "
-"with the external router gateway LLA in the neutron L3 agent configuration "
-"file. This also requires that no subnet is associated with that port."
-msgstr ""
-"Dengan tidak adanya dukungan RA hulu, ``ilag ipv6_gateway`` dapat diatur "
-"dengan LLA router gerbang eksternal dalam file konfigurasi neutron-l3-agent. "
-"Ini juga mensyaratkan bahwa tidak ada subnet dikaitkan dengan port tersebut."
-
-msgid ""
-"In the most simple case, the property can be represented by a simple Python "
-"list defined on the class."
-msgstr ""
-"Dalam kasus yang paling sederhana, properti dapat diwakili oleh daftar "
-"Python sederhana yang didefinisikan di kelas."
-
-msgid ""
-"In the output above the standard ``router:external`` attribute is "
-"``External`` as expected. Now a wildcard policy is visible in the RBAC "
-"policy listings:"
-msgstr ""
-"Pada contoh di atas atribut ``router:external`` standar adalah `` External`` "
-"seperti yang diharapkan. Sekarang kebijakan wildcard terlihat dalam daftar "
-"kebijakan RBAC:"
-
-msgid ""
-"In this case, the DHCP agent offers one or more unique DNS resolvers to "
-"instances via DHCP on each virtual network. You can configure a DNS resolver "
-"when creating or updating a subnet. To configure more than one DNS resolver, "
-"use a comma between each value."
-msgstr ""
-"Dalam case ini, agen DHCP menawarkan satu atau lebih DNS resolver unik untuk "
-"instance melalui DHCP pada setiap jaringan virtual. Anda dapat "
-"mengkonfigurasi DNS resolver saat membuat atau memperbarui subnet. Untuk "
-"mengkonfigurasi lebih dari satu DNS resolver, gunakan koma di antara setiap "
-"nilai."
-
-msgid ""
-"In this case, the DHCP agent offers the DNS resolver(s) in the ``resolv."
-"conf`` file on the host running the DHCP agent via DHCP to instances on all "
-"virtual networks."
-msgstr ""
-"Dalam case ini, agen DHCP menawarkan DNS resolver dalam file ``resolv.conf`` "
-"pada host yang sedang menjalankan agen DHCP melalui DHCP untuk instance di "
-"semua jaringan virtual."
-
-msgid ""
-"In this case, the DHCP agent offers the same DNS resolver(s) to instances "
-"via DHCP on all virtual networks."
-msgstr ""
-"Dalam case ini, agen DHCP menawarkan DNS resolver yang sama untuk instance "
-"melalui DHCP di semua jaringan virtual."
-
-msgid ""
-"In this case, the user is creating ports or booting instances on a network "
-"that is accessible externally. The steps to publish the port in the external "
-"DNS service are the following:"
-msgstr ""
-"Dalam hal ini, pengguna menciptakan port atau booting instance pada jaringan "
-"yang dapat diakses secara eksternal. Langkah-langkah untuk menerbitkan port "
-"di layanan DNS eksternal adalah sebagai berikut:"
-
-msgid ""
-"In this case, we should let the Networking service find these networks. "
-"Obviously, there are no such networks and the service will return an empty "
-"list."
-msgstr ""
-"Dalam hal ini, kita harus membiarkan layanan Networking menemukan jaringan "
-"ini. Jelas, tidak ada jaringan tersebut dan layanan akan kembali daftar "
-"kosong."
-
-msgid ""
-"In this example the port is created manually by the user and then used to "
-"boot an instance. Notice that:"
-msgstr ""
-"Dalam contoh ini port dibuat secara manual oleh pengguna dan kemudian "
-"digunakan untuk boot sebuah instance. Perhatikan bahwa:"
-
-msgid ""
-"In this example, notice that the data is published in the DNS service when "
-"the floating IP is associated to the port."
-msgstr ""
-"Dalam contoh ini, perhatikan bahwa data yang diterbitkan dalam layanan DNS "
-"ketika IP mengambang dikaitkan ke port."
-
-msgid ""
-"In this example, the health monitor removes the server from the pool if it "
-"fails a health check at two five-second intervals. When the server recovers "
-"and begins responding to health checks again, it is added to the pool once "
-"again."
-msgstr ""
-"Dalam contoh ini, monitor kesehatan menghapus server dari kolam jika gagal "
-"cek kesehatan di dua interval lima detik. Ketika server pulih dan mulai "
-"menanggapi pemeriksaan kesehatan lagi, itu akan ditambahkan ke kolam sekali "
-"lagi."
-
-msgid ""
-"In this example, the load balancer uses the round robin algorithm and the "
-"traffic alternates between the web servers on the backend."
-msgstr ""
-"Dalam contoh ini, beban penyeimbang menggunakan algoritma round robin dan "
-"pergantian lalu lintas antara server web pada backend."
-
-msgid ""
-"In this section, the combination of a mechanism driver and an L2 agent is "
-"called 'reference implementation'. The following table lists these "
-"implementations:"
-msgstr ""
-"Pada bagian ini, kombinasi dari driver mekanisme dan agen L2 disebut "
-"'reference implementation'. Tabel berikut berisi implementasi tersebut:"
-
-msgid ""
-"In this use case, the address of a floating IP is published in the external "
-"DNS service in conjunction with the ``dns_name`` of its associated port and "
-"the ``dns_domain`` of the port's network. The steps to execute in this use "
-"case are the following:"
-msgstr ""
-"Dalam use case ini, alamat IP mengambang diterbitkan dalam layanan DNS "
-"eksternal dalam hubungannya dengan ``dns_name`` port yang terkait dan "
-"``dns_domain`` jaringan port. Langkah untuk mengeksekusi use case ini adalah "
-"sebagai berikut:"
-
-msgid ""
-"In this use case, the user assigns ``dns_name`` and ``dns_domain`` "
-"attributes to a floating IP when it is created. The floating IP data becomes "
-"visible in the external DNS service as soon as it is created. The floating "
-"IP can be associated with a port on creation or later on. The following "
-"example shows a user booting an instance and then creating a floating IP "
-"associated to the port allocated for the instance:"
-msgstr ""
-"Dalam use case ini, pengguna memberikan atribut ``dns_name`` dan "
-"``dns_domain`` ke IP mengambang ketika dibuat. Data IP mengambang akan "
-"terlihat dalam layanan DNS eksternal segera setelah itu dibuat. IP "
-"mengambang dapat dikaitkan dengan port ketika pembuatan atau nanti. Contoh "
-"berikut menunjukkan bagaimana pengguna booting instance dan kemudian membuat "
-"IP mengambang terkait ke port yang dialokasikan untuk instance:"
-
-msgid "Input type"
-msgstr "Input type (tipe masukan )"
-
-msgid "Install OVS."
-msgstr "Instal OVS."
-
-msgid "Install the Dashboard panel plug-in:"
-msgstr "Instal plug-in panel Dashboard:"
-
-msgid ""
-"Install the Networking service Linux bridge layer-2 agent and layer-3 agent."
-msgstr ""
-"Instal layanan Networking, Linux bridge layer-2 agent dan layer-3 agent."
-
-msgid "Install the Networking service Linux bridge layer-2 agent."
-msgstr "Instal layanan Networking, Linux bridge layer-2 agent."
-
-msgid "Install the Networking service Macvtap layer-2 agent."
-msgstr "Instal agen ayer-2 Macvtap layanan Networking"
-
-msgid "Install the Networking service OVS layer-2 agent and layer-3 agent."
-msgstr "Install OVS layer-2 agent dan layer-3 agent layanan Networking."
-
-msgid ""
-"Install the Networking service OVS layer-2 agent, DHCP agent, and metadata "
-"agent."
-msgstr ""
-"Instal Networking service OVS layer-2 agent, DHCP agent, dan metadata agent."
-
-msgid ""
-"Install the Networking service components that provide the ``neutron-"
-"server`` service and ML2 plug-in."
-msgstr ""
-"Menginstal komponen layanan Networking yang menyediakan layanan ``neutron-"
-"server`` dan ML2 plug-in."
-
-msgid ""
-"Install the Networking service components that provides the ``neutron-"
-"server`` service and ML2 plug-in."
-msgstr ""
-"Instal komponen layanan Networking yang menyediakan layanan ``neutron-"
-"server`` dan ML2 plug-in."
-
-msgid "Install the Networking service layer-3 agent."
-msgstr "Instal agen lapisan-3 layanan Networking"
-
-msgid "Install the OpenStack Networking layer-3 agent."
-msgstr "Instal agen lapisan-3 OpenStack Networking."
-
-msgid "Install the SR-IOV agent."
-msgstr "Instal agen SR-IOV."
-
-msgid "Install the ``ebrctl`` utility on the compute nodes."
-msgstr "Instal utilitas ``ebrctl`` pada node komputasi."
-
-msgid "Instance 1"
-msgstr "Instance 1"
-
-msgid "Instance 1 resides on compute node 1 and uses provider network 1."
-msgstr ""
-"Instance 1 berada pada komputasi node 1 dan menggunakan jaringan operator 1."
-
-msgid "Instance 1 resides on compute node 1 and uses self-service network 1."
-msgstr ""
-"Instance 1 berada pada compute node 1 dan menggunakan self-service network 1."
-
-msgid "Instance 1 sends a packet to instance 2."
-msgstr "Instance 1 mengirimkan sebuah paket ke instance 2."
-
-msgid "Instance 2"
-msgstr "Instance 2"
-
-msgid "Instance 2 resides on compute node 1 and uses provider network 2."
-msgstr ""
-"Instance 2 berada pada komputasi node 1 dan menggunakan provider network 2."
-
-msgid "Instance 2 resides on compute node 1 and uses self-service network 2."
-msgstr ""
-"Instance 2 berada pad compute node 1dan menggunakan self-service network 2."
-
-msgid "Instance 2 resides on compute node 2 and uses provider network 1."
-msgstr ""
-"Instance 2 berada pada komputasi node 2 dan menggunakan jaringan operator 1."
-
-msgid "Instance 2 resides on compute node 2 and uses self-service network 1."
-msgstr ""
-"Instance 2 berada pada compute node 2 dan menggunakan self-service network 1."
-
-msgid "Instance 3"
-msgstr "Instance 3"
-
-msgid ""
-"Instance migration requires the same values for the "
-"``physical_interface_mapping`` configuration option on each compute node. "
-"For more information, see ``_."
-msgstr ""
-"Migrasi instance membutuhkan nilai yang sama untuk opsi konfigurasi "
-"``physical_interface_mapping`` pada setiap node komputasi. Untuk informasi "
-"lebih lanjut, lihat ``_."
-
-msgid "Instance network interfaces (VIFs)"
-msgstr "Instance network interfaces (VIF)"
-
-msgid ""
-"Instance network traffic on self-service networks using a particular router "
-"only traverses the master instance of that router. Thus, resource "
-"limitations of a particular network node can impact all master instances of "
-"routers on that network node without triggering failover to another network "
-"node. However, you can configure the scheduler to distribute the master "
-"instance of each router uniformly across a pool of network nodes to reduce "
-"the chance of resource contention on any particular network node."
-msgstr ""
-"Lalu lintas jaringan instance di jaringan self-service menggunakan router "
-"tertentu saja melintasi instance master router itu. Dengan demikian, "
-"keterbatasan sumber daya dari node jaringan tertentu dapat berdampak pada "
-"semua instance master router pada jaringan node tanpa memicu failover ke "
-"node jaringan lain. Namun, Anda dapat mengkonfigurasi scheduler untuk "
-"mendistribusikan instance master setiap router merata di kolam node jaringan "
-"untuk mengurangi kemungkinan pertentangan sumber daya pada setiap simpul "
-"jaringan tertentu."
-
-msgid ""
-"Instances communicate via router on the physical network infrastructure."
-msgstr ""
-"Instance berkomunikasi melalui router pada infrastruktur jaringan fisik."
-
-msgid ""
-"Instances on the same network communicate directly between compute nodes "
-"containing those instances."
-msgstr ""
-"Instance pada jaringan yang sama berkomunikasi secara langsung antara node "
-"komputasi yang berisi instance."
-
-msgid ""
-"Instances using a fixed IPv4/IPv6 address or floating IPv4 address "
-"communicate via router on the network node. The self-service networks must "
-"reside on the same router."
-msgstr ""
-"Instance menggunakan alamat IPv4/IPv6 tetap atau alamat IPv4 mengambang "
-"berkomunikasi melalui router pada node jaringan. Jaringan self-service harus "
-"berada pada router yang sama."
-
-msgid ""
-"Instances with a fixed IPv4/IPv6 address or floating IPv4 address on the "
-"same network communicate directly between compute nodes containing those "
-"instances."
-msgstr ""
-"Instance dengan alamat IPv4/IPv6 tetap atau alamat IPv4 mengambang pada "
-"jaringan yang sama berkomunikasi secara langsung antara node komputasi yang "
-"berisi instance."
-
-msgid ""
-"Instances with a fixed IPv4/IPv6 or floating IPv4 address on the same "
-"network communicate directly between compute nodes containing those "
-"instances."
-msgstr ""
-"Instance dengan alamat IPv4 / IPv6 tetap atau alamat IPv4 mengambang pada "
-"jaringan yang sama berkomunikasi secara langsung antara node komputasi yang "
-"berisi instance"
-
-msgid ""
-"Instances with fixed IPv4/IPv6 address or floating IPv4 address on the same "
-"compute node communicate via router on the compute node. Instances on "
-"different compute nodes communicate via an instance of the router on each "
-"compute node."
-msgstr ""
-"Instance dengan alamat IPv4/IPv6 tetap atau alamat IPv4 mengambang pada "
-"node komputasi yang sama berkomunikasi melalui router pada node komputasi. "
-"Instance pada node komputasi yang berbeda berkomunikasi melalui sebuah "
-"instance dari router pada setiap node komputasi."
-
-msgid ""
-"Instead of having the Compute service create the port for the instance, the "
-"user might have created it and assigned a value to its ``dns_name`` "
-"attribute. In this case, the value assigned to the ``dns_name`` attribute "
-"must be equal to the value that Compute service will assign to the "
-"instance's ``hostname``, in this example ``my-vm``. Otherwise, the instance "
-"boot will fail."
-msgstr ""
-"Dari pada memiliki layanan Compute membuat port untuk instance, pengguna "
-"mungkin telah membuatnya dan menugaskan nilai untuk atribut ``dns_name`` "
-"nya. Dalam hal ini, nilai yang diberikan kepada atribut ``dns_name`` harus "
-"sama dengan nilai dimana layanan Compute akan menetapkan ke ``hostname`` "
-"instance nya, dalam contoh ini ``my-vm``. Jika tidak, booting instance akan "
-"gagal."
-
-msgid ""
-"Instead of reading the DB connection from the configuration file(s), you can "
-"use the ``--database-connection`` option:"
-msgstr ""
-"Dari pada membaca koneksi DB dari file konfigurasi, Anda dapat menggunakan "
-"opsi ``--database-connection``:"
-
-msgid ""
-"Integration of the Compute service and the Networking service with an "
-"external DNSaaS (DNS-as-a-Service)."
-msgstr ""
-"Integrasi layanan Compute dan layanan Networking dengan DNSaaS (DNS-as-a-"
-"Service) eksternal. "
-
-msgid "Integration with an external DNS service"
-msgstr "Integrasi dengan layanan DNS eksternal"
-
-msgid "Intel"
-msgstr "Intel"
-
-msgid "Interface on self-service network 1"
-msgstr "Antarmuka pada jaringan self-service 1"
-
-msgid "Interface on self-service network 2"
-msgstr "Antarmuka pada jaringan self-service 2"
-
-msgid ""
-"Internal router ports, that act as default gateway ports for a network, will "
-"share a common port for all IPv6 subnets associated with the network. This "
-"implies that there will be an IPv6 internal router interface with multiple "
-"IPv6 addresses from each of the IPv6 subnets associated with the network and "
-"a separate IPv4 internal router interface for the IPv4 subnet. On the other "
-"hand, external router ports are allowed to have a dual-stack configuration "
-"with both an IPv4 and an IPv6 address assigned to them."
-msgstr ""
-"Port router internal, yang bertindak sebagai port gateway default untuk "
-"jaringan, akan berbagi port umum untuk semua subnet IPv6 terkait dengan "
-"jaringan. Ini berarti bahwa akan ada sebuah antarmuka router internal yang "
-"IPv6 dengan beberapa alamat IPv6 dari masing-masing subnet IPv6 yang terkait "
-"dengan jaringan dan interface router internal IPv4 yang terpisah untuk "
-"subnet IPv4. Di sisi lain, port router eksternal diperbolehkan untuk "
-"memiliki konfigurasi dual-stack dengan IPv4 dan alamat IPv6 yang ditugaskan "
-"kepada mereka."
-
-msgid ""
-"Interruption of VRRP *heartbeat* traffic between network nodes, typically "
-"due to a network interface or physical network infrastructure failure, "
-"triggers a failover. Restarting the layer-3 agent, or failure of it, does "
-"not trigger a failover providing ``keepalived`` continues to operate."
-msgstr ""
-"Gangguanlalu lintas *heartbeat* VRRP antara node jaringan, biasanya karena "
-"antarmuka jaringan atau kegagalan infrastruktur jaringan fisik, memicu "
-"failover. Restart agen lapisan-3, atau kegagalan itu, tidak memicu failover "
-"menyediakan ``keepalived`` terus beroperasi."
-
-msgid "Introduction"
-msgstr "Pengantar"
-
-msgid ""
-"Irrelevant fields have been trimmed from the output of these commands for "
-"brevity."
-msgstr ""
-"Field tidak relevan telah dipangkas dari output dari perintah ini supaya "
-"singkat."
-
-msgid ""
-"Is deployed besides an other mechanism driver and L2 agent such as OVS or "
-"Linux bridge. It offers instances direct access to the network adapter "
-"through a PCI Virtual Function (VF). This gives an instance direct access to "
-"hardware capabilities and high performance networking."
-msgstr ""
-"Dikerahkan bersama driver mekanisme lain dan agen L2 seperti OVS atau "
-"jembatan Linux. Ia menawarkan akses langsung instance ke adaptor jaringan "
-"melalui PCI Virtual Function (VF). Hal ini memberikan sebuah akses langsung "
-"instance ke kekuatan hardware dan jaringan kinerja tinggi."
-
-msgid ""
-"It follows that with these two flags set to ``True`` in the configuration "
-"file, routers created by all users will default to distributed HA routers "
-"(DVR HA)."
-msgstr ""
-"Ini berarti bahwa dengan dua flag ini diatur ke ``True`` dalam file "
-"konfigurasi, router yang dibuat oleh semua pengguna akan menjadi default "
-"untuk router HA (DVR HA) terdistribusi."
-
-msgid "It includes the following components:"
-msgstr "Ini mencakup komponen-komponen berikut:"
-
-msgid ""
-"It is allocated to DHCP agent on HostA. If you want to validate the behavior "
-"through the :command:`dnsmasq` command, you must create a subnet for the "
-"network because the DHCP agent starts the dnsmasq service only if there is a "
-"DHCP."
-msgstr ""
-"Hal ini dialokasikan untuk agen DHCP pada HostA. Jika Anda ingin memvalidasi "
-"perilaku melalui perintah :command:`dnsmasq`, Anda harus membuat subnet "
-"untuk jaringan karena agen DHCP memulai layanan dnsmasq hanya jika ada DHCP."
-
-msgid ""
-"It is common for a packet to hop across multiple routers to reach its final "
-"destination. On a Linux machine, the ``traceroute`` and more recent ``mtr`` "
-"programs prints out the IP address of each router that an IP packet "
-"traverses along its path to its destination."
-msgstr ""
-"Hal ini umum untuk sebuah paket untuk hop di beberapa router untuk mencapai "
-"tujuan akhir. Pada mesin Linux, program ``traceroute`` dan ``mtr`` lebih "
-"baru mencetak alamat IP untuk setiap router dimana paket IP melintasi "
-"sepanjang jalan ke tujuannya."
-
-msgid ""
-"It is important that you select a VLAN range not being used by your current "
-"network infrastructure. For example, if you estimate that your cloud must "
-"support a maximum of 100 projects, pick a VLAN range outside of that value, "
-"such as VLAN 200–299. OpenStack, and all physical network infrastructure "
-"that handles project networks, must then support this VLAN range."
-msgstr ""
-"Adalah penting bahwa Anda memilih rentang VLAN yang tidak digunakan oleh "
-"infrastruktur jaringan Anda saat ini. Misalnya, jika Anda memperkirakan "
-"bahwa cloud Anda harus mendukung maksimal 100 proyek, pilihlah rentang VLAN "
-"yang luar nilai itu, seperti VLAN 200-299. OpenStack, dan semua "
-"infrastruktur jaringan fisik yang menangani jaringan proyek, maka harus "
-"mendukung berbagai VLAN ini."
-
-msgid ""
-"It is positioned as alternative to Open vSwitch or Linux bridge support on "
-"the compute node for internal deployments."
-msgstr ""
-"Hal ini diposisikan sebagai alternatif untuk Open vSwitch atau dukungan "
-"jembatan Linux pada node komputasi untuk pengerahan internal."
-
-msgid "Jumbo frames"
-msgstr "Frame jumbo"
-
-msgid ""
-"Just like with bandwidth limiting, create a policy for DSCP marking rule:"
-msgstr ""
-"Sama seperti dengan bandwidth yang membatasi, buat kebijakan untuk aturan "
-"DSCP marking:"
-
-msgid "Keepalived VRRP health check"
-msgstr "Cek kesehatan Keepalived VRRP health check"
-
-msgid ""
-"Kernel version 3.3, but less than 4.3, does not include *conntrack* support "
-"and requires building the OVS modules."
-msgstr ""
-"Kernel versi 3.3, tetapi kurang dari 4.3, tidak termasuk dukungan *conntrack "
-"* dan membutuhkan pembangunan modul OVS."
-
-msgid "Kernel version 4.3 or newer includes *conntrack* support."
-msgstr "Kernel version 4.3 atau lebih baru termasuk dukungan *conntrack*."
-
-msgid "Known limitations"
-msgstr "Keterbatasan dikenal"
-
-msgid "L2 agent"
-msgstr "Agen L2"
-
-msgid "L2 agents support some important security configurations."
-msgstr "Agen L2 mendukung beberapa konfigurasi keamanan penting."
-
-msgid "L2 population"
-msgstr "Populasi L2"
-
-msgid ""
-"L2 population is a special mechanism driver that optimizes BUM (Broadcast, "
-"unknown destination address, multicast) traffic in the overlay networks "
-"VXLAN and GRE. It needs to be used in conjunction with either the Linux "
-"bridge or the Open vSwitch mechanism driver and cannot be used as standalone "
-"mechanism driver. For more information, see the *Mechanism drivers* section "
-"below."
-msgstr ""
-"Populasi L2 adalah driver mekanisme khusus yang mengoptimalkan lalu lintas "
-"BUM (Broadcast, unknown destination address, multicast) di VXLAN dan GRE "
-"jaringan overlay. Perlu digunakan bersama dengan baik jembatan Linux atau "
-"driver mekanisme Open vSwitch dan tidak dapat digunakan sebagai driver "
-"mekanisme mandiri. Untuk informasi lebih lanjut, lihat bagian bawah "
-"*Mechanism drivers*."
-
-msgid ""
-"L2 population is not listed here, as it is not a standalone mechanism. If "
-"other agents are supported depends on the conjunctive mechanism driver that "
-"is used for binding a port."
-msgstr ""
-"Populasi L2 tidak tercantum di sini, karena itu tidak memiliki mekanisme "
-"mandiri. Jika agen lain didukung tergantung pada driver mekanisme penghubung "
-"yang digunakan untuk mengikat port."
-
-msgid "L3"
-msgstr "L3"
-
-msgid "L3 HA to Legacy"
-msgstr "L3 HA untuk Legacy"
-
-msgid "L3 Metering agent"
-msgstr "Agen metering L3 "
-
-msgid "L3 agent"
-msgstr "Agen L3"
-
-msgid "L3 high availability"
-msgstr "L3 ketersediaan tinggi"
-
-msgid "L3 metering agent"
-msgstr "Agen metering L3"
-
-msgid "LBaaS"
-msgstr "LBaaS"
-
-msgid ""
-"LBaaS v1 was removed in the Newton release. These links provide more details "
-"about how LBaaS v1 works and how to configure it:"
-msgstr ""
-"LBaaS v1 telah dihapus dalam rilis Newton. Link ini memberikan rincian lebih "
-"lanjut tentang bagaimana LBaaS v1 bekerja dan bagaimana mengkonfigurasinya:"
-
-msgid "LBaaS v2 Concepts"
-msgstr "LBaaS v2 Concepts"
-
-msgid ""
-"LBaaS v2 adds the concept of listeners to the LBaaS v1 load balancers. LBaaS "
-"v2 allows you to configure multiple listener ports on a single load balancer "
-"IP address."
-msgstr ""
-"LBaaS v2 menambahkan konsep pendengar ke penyeimbang beban LBaaS v1. LBaaS "
-"v2 memungkinkan Anda untuk mengkonfigurasi beberapa port pendengar pada "
-"alamat IP penyeimbang beban tunggal."
-
-msgid ""
-"LBaaS v2 has multiple implementations via different service plug-ins. The "
-"two most common implementations use either an agent or the Octavia services. "
-"Both implementations use the `LBaaS v2 API `_."
-msgstr ""
-"LBaaS v2 memiliki beberapa implementasi melalui berbagai service plug-ins. "
-"Dua implementasi yang paling umum menggunakan layanan agen ataupun layanan "
-"Octavia. Kedua implementasi menggunakan `LBaaS v2 API `_."
-
-msgid "LBaaS v2 has several new concepts to understand:"
-msgstr "LBaaS v2 memiliki beberapa konsep baru untuk memahami:"
-
-msgid "LBaaS v2 operations"
-msgstr "operasi LBaaS v2"
-
-msgid ""
-"Lacks support for layer-3 high-availability mechanisms such as Virtual "
-"Router Redundancy Protocol (VRRP) and Distributed Virtual Routing (DVR)."
-msgstr ""
-"Kekurangan dukungan untuk mekanisme layer-3 high-availability seperti "
-"Virtual Router Redundancy Protocol (VRRP) dan Distributed Virtual Routing "
-"(DVR)."
-
-msgid ""
-"Lacks support for security groups including basic (sanity) and anti-spoofing "
-"rules."
-msgstr ""
-"Kekurangan dukungan untuk kelompok keamanan termasuk aturan basic (sanity) "
-"dan anti-spoofing."
-
-msgid ""
-"Larger deployments typically deploy the DHCP and metadata agents on a subset "
-"of compute nodes to increase performance and redundancy. However, too many "
-"agents can overwhelm the message bus. Also, to further simplify any "
-"deployment, you can omit the metadata agent and use a configuration drive to "
-"provide metadata to instances."
-msgstr ""
-"Pengerahan yang lebih besar biasanya menggunakan DHCP dan agen metadata pada "
-"subset node komputasi untuk meningkatkan kinerja dan redundansi. Namun, "
-"terlalu banyak agen dapat membanjiri (overwhelm) pengangkutan pesan. Juga, "
-"untuk lebih menyederhanakan penyebaran, Anda dapat menghilangkan agen "
-"metadata dan menggunakan drive konfigurasi untuk memberikan metadata untuk "
-"instance."
-
-msgid ""
-"Launch an instance on a private network and retrieve the neutron port ID "
-"that was allocated. As above, use the ``cirros`` image and ``m1.tiny`` "
-"flavor:"
-msgstr ""
-"Luncurkan sebuah instance pada jaringan private dan mengambil ID port "
-"neutron yang dialokasikan. Seperti di atas, gunakan image ``cirros`` dan "
-"flavor ``m1.tiny``:"
-
-msgid "Launch an instance on the trunk"
-msgstr "Luncurkan instance pada trunk"
-
-msgid ""
-"Launch an instance using the network. For example, using the ``cirros`` "
-"image and ``m1.tiny`` flavor."
-msgstr ""
-"Luncurkan sebuah instance menggunakan jaringan. Misalnya, dengan menggunakan "
-"image ``cirros`` dan flavor ``m1.tiny``."
-
-msgid ""
-"Launch an instance with an interface on the addtional self-service network. "
-"For example, a CirrOS image using flavor ID 1."
-msgstr ""
-"Luncurkan instance dengan sebuah antarmuka pada jaringan self-service "
-"tambahan. Misalnya, image CirrOS menggunakan flavor ID 1."
-
-msgid ""
-"Launch an instance with an interface on the provider network. For example, a "
-"CirrOS image using flavor ID 1."
-msgstr ""
-"Luncurkan sebuah instance dengan sebuah antarmuka pada jaringan provider. "
-"Misalnya, image CirrOS menggunakan flavor ID 1."
-
-msgid ""
-"Launch an instance with an interface on the self-service network. For "
-"example, a CirrOS image using flavor ID 1."
-msgstr ""
-"Luncurkan sebuah instance dengan sebuah antarmuka di jaringan self-service. "
-"Misalnya, image CirrOS menggunakan flavor ID 1."
-
-msgid ""
-"Launch one or more instances. Each instance obtains IP addresses according "
-"to the segment it uses on the particular compute node."
-msgstr ""
-"Luncurkan satu atau lebih instance. Setiap instance memperoleh alamat IP "
-"sesuai dengan segmennya pada node komputasi tertentu."
-
-msgid ""
-"Launch service function instance ``vm1`` using ports ``p1`` and ``p2``, "
-"``vm2`` using ports ``p3`` and ``p4``, and ``vm3`` using ports ``p5`` and "
-"``p6``."
-msgstr ""
-"Peluncuran fungsi pelayanan instance ``vm1`` menggunakan ports ``p1`` dan "
-"``p2``, ``vm2`` menggunakan ports ``p3`` and ``p4``, dan ``vm3`` menggunakan "
-"ports ``p5`` and ``p6``."
-
-msgid ""
-"Launch the instance by specifying ``port-id`` using the value of ``port_id`` "
-"from the trunk details. Launching an instance on a subport is not supported."
-msgstr ""
-"Luncurkan instance dengan menentukan ``port-id`` menggunakan nilai "
-"``port_id`` dari rincian trunk. Peluncuran sebuah instance pada subport "
-"tidak didukung."
-
-msgid ""
-"Launch two instances, ``instance1`` on ``network1`` and ``instance2`` on "
-"``network2``. Associate a floating IP address to both instances."
-msgstr ""
-"Meluncurkan dua instance, ``instance1`` pada ``network1`` and ``instance2`` "
-"pada ``network2``. Mengasosiasikan alamat IP mengambang dengan kedua "
-"instance."
-
-msgid "Launching instances with SR-IOV ports"
-msgstr "Peluncuran instance dengan port SR-IOV"
-
-msgid "Layer 2 (Ethernet and Switching)"
-msgstr "Lapisan 2 (Ethernet dan Switching)"
-
-msgid "Layer 3 (IP and Routing)"
-msgstr "Lapisan 3 (IP dan Routing)"
-
-msgid "Layer-3 agent"
-msgstr "Layer-3 agent (agen lapisan-3)"
-
-msgid "Legacy nova-network to OpenStack Networking (neutron)"
-msgstr "Legacy nova-network ke OpenStack Networking (neutron)"
-
-msgid ""
-"Like TCP, the sockets API is the most common API for writing UDP-based "
-"applications. The sockets API provides a *message-oriented* interface for "
-"writing UDP applications: a programmer sends data over UDP by transmitting a "
-"fixed-sized message. If an application requires retransmissions of lost "
-"packets or a well-defined ordering of received packets, the programmer is "
-"responsible for implementing this functionality in the application code."
-msgstr ""
-"Seperti TCP, soket API adalah API yang paling umum untuk menulis aplikasi "
-"berbasis UDP. Soket API menyediakan antarmuka *message-oriented* untuk "
-"menulis aplikasi UDP: programmer mengirimkan data melalui UDP dengan "
-"mengirimkan pesan berukuran tetap. Jika aplikasi membutuhkan transmisi ulang "
-"paket yang hilang atau memesan paket yang diterima didefinisikan dengan "
-"baik, programmer bertanggung jawab untuk melaksanakan fungsi ini dalam kode "
-"aplikasi."
-
-msgid "Limitations"
-msgstr "Limitations (keterbatasan)"
-
-msgid "Limitations and issues"
-msgstr "Keterbatasan dan masalah"
-
-msgid "Linux Bridge"
-msgstr "Linux Bridge (jembatan Linux)"
-
-msgid "Linux bridge"
-msgstr "Linux bridge (jembatan Linux)"
-
-msgid "Linux bridge & Linux bridge agent"
-msgstr "Agen jembatan Linux & jembatan Linux"
-
-msgid "Linux bridge agent"
-msgstr "Agen jembatan Linux"
-
-msgid "Linux bridge mechanism and Linux bridge agent"
-msgstr "Mekanisme jembatan Linux dan agen jembatan Linux"
-
-msgid "Linux bridge mechanism driver"
-msgstr "Linux bridge mechanism driver (driver mekanisme jembatan linux)"
-
-msgid "Linux bridge: High availability using VRRP"
-msgstr "Linux bridge: High availability menggunakan VRRP"
-
-msgid "Linux bridge: Provider networks"
-msgstr "Linux bridge: Provider networks"
-
-msgid "Linux bridge: Self-service networks"
-msgstr "Linux bridge: Jaringan self-service"
-
-msgid "Linux bridging for implementing a layer 2 network"
-msgstr "Linux bridging untuk implementasi jaringan layer 2"
-
-msgid ""
-"Linux distributions often package older releases of Open vSwitch that can "
-"introduce issues during operation with the Networking service. We recommend "
-"using at least the latest long-term stable (LTS) release of Open vSwitch for "
-"the best experience and support from Open vSwitch. See ``__ for available releases and the `installation "
-"instructions `__ "
-"for"
-msgstr ""
-"Distribusi Linux sering memaket rilis yang lebih tua dari Open vSwitch yang "
-"dapat menimbulkan masalah selama operasi dengan layanan Networking. "
-"Sebaiknya gunakan setidaknya rilis jangka panjang yang stabil (LTS) terbaru "
-"dari Open vSwitch untuk pengalaman terbaik dan dukungan dari Open vSwitch. "
-"Lihat `` __ untuk rilis yang tersedia dan "
-"`installation instructions `__"
-
-msgid "Linux network namespaces"
-msgstr "Namespaces jaringan Linux"
-
-msgid "List DHCP agents that host a specified network:"
-msgstr "Daftar agen DHCP yang menjadi tempat jaringan tertentu:"
-
-msgid "List all agents:"
-msgstr "Daftar semua agen:"
-
-msgid "List the networks hosted by a given DHCP agent:"
-msgstr "Daftar jaringan yang ditempati oleh agen DHCP yang diberikan:"
-
-msgid "Listener"
-msgstr "Listener"
-
-msgid "Live migration is not supported for instances with SR-IOV ports."
-msgstr ""
-"Migrasi langsung (live) tidak didukung untuk instance dengan port SR-IOV."
-
-msgid "Load Balancer as a Service (LBaaS)"
-msgstr "Load Balancer as a Service (LBaaS)"
-
-msgid "Load balancer"
-msgstr "Penyeimbang beban (load balancer)"
-
-msgid "Load balancers"
-msgstr "Load balancers (penyeimbang beban)"
-
-msgid ""
-"Load balancers can be software-based or hardware-based devices that allow "
-"traffic to evenly be distributed across several servers. By distributing the "
-"traffic across multiple servers, it avoids overload of a single server "
-"thereby preventing a single point of failure in the product. This further "
-"improves the performance, network throughput, and response time of the "
-"servers. Load balancers are typically used in a 3-tier architecture. In this "
-"model, a load balancer receives a request from the front-end web server, "
-"which then forwards the request to one of the available back-end database "
-"servers for processing. The response from the database server is passed back "
-"to the web server for further processing."
-msgstr ""
-"Penyeimbang beban dapat menjadi perangkat software-based atau hardware-based "
-"yang memungkinkan lalu lintas terdistribusi secara merata di beberapa "
-"server. Dengan mendistribusikan lalu lintas di beberapa server, penyeimbang "
-"beban menghindari kelebihan beban di sebuah server tunggal sehingga mencegah "
-"satu titik kegagalan dalam produk. Hal ini semakin meningkatkan kinerja, "
-"throughput jaringan, dan waktu respon dari server. Penyeimbang beban "
-"biasanya digunakan dalam arsitektur 3-tier. Dalam model ini, penyeimbang "
-"beban menerima permintaan dari web server front-end, yang kemudian "
-"meneruskan permintaannya ke salah satu server database back-end yang "
-"tersedia untuk diproses. Tanggapan dari server database dilewatkan kembali "
-"ke web server untuk diproses lebih lanjut."
-
-msgid ""
-"Load balancers can listen for requests on multiple ports. Each one of those "
-"ports is specified by a listener."
-msgstr ""
-"Penyeimbang beban dapat mendengarkan permintaan pada beberapa port. Masing-"
-"masing dari port itu ditentukan oleh pendengar (listener)."
-
-msgid ""
-"Load balancers that are deployed on a public or provider network that are "
-"accessible to external clients do not need a floating IP address assigned. "
-"External clients can directly access the virtual IP address (VIP) of those "
-"load balancers."
-msgstr ""
-"Balancers beban yang digunakan pada jaringan publik atau penyedia yang dapat "
-"diakses oleh klien eksternal tidak membutuhkan alamat IP mengambang yang "
-"ditetapkan. Klien eksternal dapat langsung mengakses virtual IP address "
-"(VIP) dari load balancer mereka"
-
-msgid ""
-"Log in to the ``myserver4`` VM, and run ``udhcpc``, ``dhclient`` or other "
-"DHCP client."
-msgstr ""
-"Log in ke ``myserver4`` VM, dan jalankan ``udhcpc``, ``dhclient`` atau klien "
-"DHCP lainnya."
-
-msgid ""
-"Look at the ``availability_zones`` attribute of each resource to confirm in "
-"which zone the resource is hosted:"
-msgstr ""
-"Lihatlah atribut ``availability_zones`` dari setiap sumber untuk "
-"mengkonfirmasi zona dimana sumber disimpan (hosted):"
-
-msgid "ML2 driver support matrix"
-msgstr " Matriks dukungan driver ML2"
-
-msgid "ML2 plug-in"
-msgstr "ML2 plug-in"
-
-msgid "MTU considerations"
-msgstr "Pertimbangan MTU"
-
-msgid "MacVTap"
-msgstr "MacVTap"
-
-msgid "MacVTap & MacVTap agent"
-msgstr "Agen MacVTap & MacVTap"
-
-msgid "MacVTap agent"
-msgstr "Agen MacVTap"
-
-msgid "MacVTap mechanism driver and MacVTap agent"
-msgstr "Driver mekanisme MacVTap dan agen MacVTap"
-
-msgid ""
-"MacVTap offers a direct connection with very little overhead between "
-"instances and down to the adapter. You can use MacVTap agent on the compute "
-"node when you require a network connection that is performance critical. It "
-"does not require specific hardware (like with SRIOV)."
-msgstr ""
-"MacVTap menawarkan koneksi langsung dengan overhead yang sangat kecil antara "
-"instance dan turun sampai adaptor. Anda dapat menggunakan agen MacVTap pada "
-"node komputasi ketika Anda membutuhkan koneksi jaringan kinerja kritis. Ini "
-"tidak memerlukan perangkat keras khusus (seperti dengan SRIOV)."
-
-msgid "Macvtap agent"
-msgstr "agen Macvtap"
-
-msgid "Macvtap mechanism driver"
-msgstr "Driver mekanisme Macvtap"
-
-msgid ""
-"Make Compute REST API read-write again. This means legacy networking DB is "
-"now unused, new changes are now stored in the Networking DB, and no rollback "
-"is possible from here without losing those new changes."
-msgstr ""
-"Buat Compute REST API read-write lagi. Ini berarti legacy networking DB "
-"sekarang tidak terpakai, perubahan baru sekarang disimpan di Networking DB, "
-"dan tidak ada rollback adalah mungkin dari sini tanpa kehilangan perubahan "
-"baru."
-
-msgid "Make sure both DHCP agents hosting ``net2``:"
-msgstr "Pastikan kedua agen DHCP menjadi host ``net2``:"
-
-msgid ""
-"Make sure that subnets on an external network are created from the subnet "
-"pools created above:"
-msgstr ""
-"Pastikan bahwa subnet pada jaringan eksternal diciptakan dari kolam subnet "
-"yang dibuat di atas:"
-
-msgid "Make sure that the router's ``ha`` attribute has changed to ``False``."
-msgstr "Pastikan bahwa atribut router ``ha`` telah berubah menjadi ``False``."
-
-msgid "Make sure that the router's ``ha`` attribute has changed to ``True``."
-msgstr "Pastikan bahwa atribut router ``ha`` telah berubah menjadi ``True``."
-
-msgid "Make the Compute REST API read-only."
-msgstr "Buatlah Compute REST API read-only."
-
-msgid "Make the Networking API read-write and disable legacy networking."
-msgstr "Membuat Networking API read-write dan menonaktifkan legacy networking."
-
-msgid "Managed Configuration Flag = 0"
-msgstr "Managed Configuration Flag = 0"
-
-msgid "Management impact"
-msgstr "Dampak manajemen"
-
-msgid ""
-"Management: Handles API requests from clients and control plane traffic for "
-"OpenStack services including their dependencies."
-msgstr ""
-"Management: Menangani permintaan API dari klien dan control plane traffic "
-"untuk layanan OpenStack termasuk dependensinya.."
-
-msgid "Manages agents"
-msgstr "Kelola agen"
-
-msgid "Managing agents in neutron deployment"
-msgstr "Mengelola agen dalam pengerahan neutron"
-
-msgid "Managing assignment of networks to DHCP agent"
-msgstr "Mengelola tugas jaringan agen DHCP"
-
-msgid "Map segments to compute nodes."
-msgstr "Segmen peta untuk node komputasi"
-
-msgid "Mechanism Driver"
-msgstr "Mechanism Driver (driver mekanisme)"
-
-msgid "Mechanism drivers"
-msgstr "Driver mekanisme"
-
-msgid "Mechanism drivers and L2 agents"
-msgstr "Driver mekanisme dan agen L2"
-
-msgid ""
-"Mechanism drivers can utilize L2 agents (via RPC) and/or interact directly "
-"with external devices or controllers."
-msgstr ""
-"Driver mekanisme dapat memanfaatkan agen L2 (melalui RPC) dan/atau "
-"berinteraksi langsung dengan perangkat eksternal atau controller."
-
-msgid "Mellanox"
-msgstr "Mellanox"
-
-msgid "Member"
-msgstr "Member"
-
-msgid ""
-"Members are servers that serve traffic behind a load balancer. Each member "
-"is specified by the IP address and port that it uses to serve traffic."
-msgstr ""
-"Anggota adalah server yang melayani lalu lintas di belakang penyeimbang "
-"beban. Setiap anggota ditentukan oleh alamat IP dan port yang menggunakannya "
-"untuk melayani lalu lintas."
-
-msgid ""
-"Members may go offline from time to time and health monitors divert traffic "
-"away from members that are not responding properly. Health monitors are "
-"associated with pools."
-msgstr ""
-"Anggota dapat offline dari waktu ke waktu dan monitor kesehatan dapat "
-"mengalihkan lalu lintas dari anggota yang tidak merespon dengan baik. "
-"Monitor kesehatan dihubungankan dengan kolam."
-
-msgid "Messaging queue"
-msgstr "Antrian pesan"
-
-# #-#-#-#-# config_ml2_plug_in.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# intro_os_networking_service.pot (Networking Guide 0.9) #-#-#-#-#
-msgid "Metadata"
-msgstr "Metadata"
-
-msgid "Metadata agent"
-msgstr "Agen Metadata"
-
-msgid ""
-"Migrating a router from distributed only, HA only, or legacy to distributed "
-"HA is not supported at this time. The router must be created as distributed "
-"HA. The reverse direction is also not supported. You cannot reconfigure a "
-"distributed HA router to be only distributed, only HA, or legacy."
-msgstr ""
-"Migrasi router dari hanya didistribusikan, hanya HA, atau legacy ke HA yang "
-"didistribusikan tidak didukung saat ini. Router harus dibuat sebagai HA yang "
-"didistribusikan. Arah sebaliknya juga tidak didukung. Anda tidak dapat "
-"mengkonfigurasi ulang router HA yang didistribusikan ke hanya "
-"didistribusikan, hanya HA, atau legacy (warisan)."
-
-msgid "Migration"
-msgstr "Migrasi"
-
-msgid "Migration Completed!"
-msgstr "Migrasi Selesai!"
-
-msgid "Migration process overview"
-msgstr "ikhtisar proses migrasi"
-
-msgid "Miscellaneous"
-msgstr "Miscellaneous (lain-lain)"
-
-msgid "Modify ``/etc/neutron/policy.json`` policy entries as follows:"
-msgstr ""
-"Modifikasi entri kebijakan ``/etc/neutron/policy.json`` sebagai berikut:"
-
-msgid "Modify the compute nodes with the following components:"
-msgstr "Memodifikasi node komputasi dengan komponen-komponen berikut:"
-
-msgid ""
-"More information about L2 population see the `OpenStack Manuals `__."
-msgstr ""
-"Informasi lebih lanjut tentang populasi L2 lihat `Manual OpenStack`__."
-
-msgid "Multiple ``--config-file`` options can be passed if needed."
-msgstr "Beberapa opsi ``--config-file`` dapat dilewatkan jika diperlukan."
-
-msgid ""
-"Multiple mechanism and type drivers can be used simultaneously to access "
-"different ports of the same virtual network."
-msgstr ""
-"Beberapa driver mekanisme dan tipe dapat digunakan secara bersamaan untuk "
-"mengakses port yang berbeda dari jaringan virtual yang sama."
-
-msgid "Multiple smaller layer-2 networks"
-msgstr "Beberapa jaringan lapisan-2 lebih kecil "
-
-msgid ""
-"Multiple smaller layer-2 networks scale better and shrink failure domains, "
-"but leave network selection to the user. Without additional information, "
-"users cannot easily differentiate these networks."
-msgstr ""
-"Beberapa jaringan lapisan-2 lebih kecil berskala yang lebih baik dan "
-"menyusutkan failure domain, tetapi meninggalkan pilihan jaringan untuk "
-"pengguna. Tanpa informasi tambahan, pengguna tidak dapat dengan mudah "
-"membedakan jaringan ini."
-
-msgid "N/A"
-msgstr "N/A"
-
-msgid "NAT & Floating IPs"
-msgstr "NAT & IP mengambang"
-
-msgid "NO"
-msgstr "NO"
-
-msgid "NO*"
-msgstr "NO* "
-
-msgid "NO**"
-msgstr "NO**"
-
-msgid "Name"
-msgstr "Name"
-
-msgid "Name resolution for instances"
-msgstr "Resolusi nama untuk instance"
-
-msgid "Name: vm1"
-msgstr "Name: vm1"
-
-msgid "Name: vm2"
-msgstr "Name: vm2"
-
-msgid "Name: vm3"
-msgstr "Name: vm3"
-
-msgid "Native Open vSwitch firewall driver"
-msgstr "Driver firewall Open vSwitch asli"
-
-msgid ""
-"Network IP Availability is an information-only API extension that allows a "
-"user or process to determine the number of IP addresses that are consumed "
-"across networks and the allocation pools of their subnets. This extension "
-"was added to neutron in the Mitaka release."
-msgstr ""
-"Network IP Availability merupakan information-only API extension yang "
-"mengizinkan pengguna atau proses untuk menentukan jumlah alamat IP yang "
-"dikonsumsi di seluruh jaringan dan kolam alokasi subnet mereka. Extension "
-"(perpanjangan) ini telah ditambahkan ke neutron dalam rilis Mitaka."
-
-msgid "Network address translation"
-msgstr "Terjemahan alamat Jaringan"
-
-msgid "Network components"
-msgstr "Komponen jaringan"
-
-msgid ""
-"Network devices such as switches and routers can mark traffic so that it is "
-"handled with a higher priority to fulfill the QoS conditions agreed under "
-"the SLA. In other cases, certain network traffic such as Voice over IP "
-"(VoIP) and video streaming needs to be transmitted with minimal bandwidth "
-"constraints. On a system without network QoS management, all traffic will be "
-"transmitted in a \"best-effort\" manner making it impossible to guarantee "
-"service delivery to customers."
-msgstr ""
-"Perangkat jaringan seperti switch dan router bisa menandai lalu lintas "
-"sehingga hal itu ditangani dengan prioritas yang lebih tinggi untuk memenuhi "
-"persetujuan persyaratan QoS di bawah SLA. Dalam kasus lain, lalu lintas "
-"jaringan tertentu seperti Voice over IP (VoIP) dan video streaming perlu "
-"ditransmisikan dengan keterbatasan bandwidth minimal. Pada sistem tanpa "
-"manajemen QoS jaringan, semua lalu lintas akan ditransmisikan dalam cara "
-"\"best-effort\" membuat mustahil untuk menjamin pelayanan kepada pelanggan."
-
-msgid "Network namespaces"
-msgstr "Network namespaces (namespace jaringan)"
-
-msgid "Network node"
-msgstr "Node jaringan"
-
-msgid "Network node 1"
-msgstr "Network node 1"
-
-msgid "Network node 1:"
-msgstr "Network node 1:"
-
-msgid "Network node 2"
-msgstr "Network node 2"
-
-msgid "Network node 2:"
-msgstr "Network node 2:"
-
-msgid "Network nodes"
-msgstr "Network nodes (simpul jaringan)"
-
-msgid "Network or compute nodes"
-msgstr "Jaringan atau node komputasi"
-
-msgid "Network scheduler"
-msgstr "Alat penjadwal jaringan"
-
-msgid "Network traffic flow"
-msgstr "Arus lalu lintas jaringan"
-
-msgid ""
-"Network trunking consists of a service plug-in and a set of drivers that "
-"manage trunks on different layer-2 mechanism drivers. Users can create a "
-"port, associate it with a trunk, and launch an instance on that port. Users "
-"can dynamically attach and detach additional networks without disrupting "
-"operation of the instance."
-msgstr ""
-"Jaringan trunking terdiri dari service plug-in dan satu set driver yang "
-"mengelola trunk di berbagai driver mekanisme lapisan-2. Pengguna dapat "
-"membuat sebuah port, mengasosiasikannya dengan trunk, dan meluncurkan sebuah "
-"instance pada port tersebut. Pengguna dinamis dapat menghubungkan dan "
-"melepaskan jaringan tambahan tanpa mengganggu pengoperasian instance."
-
-msgid "Network type drivers"
-msgstr " Driver tipe jaringan"
-
-msgid ""
-"Network: Contains the OpenStack Networking service layer-3 (routing) "
-"component. High availability options may include additional components."
-msgstr ""
-"Network: Berisi layanan OpenStack Networking komponen lapisan-3 (routing). "
-"Opsi ketersediaan tinggi (high availability) mungkin termasuk komponen "
-"tambahan."
-
-msgid ""
-"Networking allows users to create multiple provider or project networks "
-"using VLAN IDs (802.1Q tagged) that correspond to VLANs present in the "
-"physical network. This allows instances to communicate with each other "
-"across the environment. They can also communicate with dedicated servers, "
-"firewalls, load balancers, and other networking infrastructure on the same "
-"layer 2 VLAN."
-msgstr ""
-"Jaringan mengizinkan pengguna untuk membuat beberapa provider atau proyek "
-"jaringan menggunakan ID VLAN (802.1Q tag) yang sesuai dengan VLAN yang ada "
-"dalam jaringan fisik. Hal ini mengizinkan instance untuk berkomunikasi satu "
-"sama lain di lingkungan. Mereka juga dapat berkomunikasi dengan dedicated "
-"server, firewall, load balancer, dan infrastruktur jaringan lain pada layer "
-"2 VLAN yang sama."
-
-msgid "Networks"
-msgstr "Networks"
-
-msgid "Networks and network interfaces"
-msgstr "Jaringan dan antarmuka jaringan"
-
-msgid ""
-"Networks created before the Mitaka release do not contain explicitly named "
-"address scopes, unless the network contains subnets from a subnet pool that "
-"belongs to a created or updated address scope. The Networking service "
-"preserves backwards compatibility with pre-Mitaka networks through special "
-"address scope properties so that these networks can perform advanced routing:"
-msgstr ""
-"Jaringan yang dibuat sebelum rilis Mitaka tidak mengandung secara eksplisit "
-"lingkup address bernama, kecuali jaringan berisi subnet dari kolam subnet "
-"milik lingkup alamat yang dibuat atau diperbarui. Layanan Networking "
-"mempertahankan kompatibilitas mundur dengan jaringan pra-Mitaka melalui "
-"sifat lingkup alamat khusus sehingga jaringan ini bisa melakukan routing "
-"canggih:"
-
-msgid "Neutron configuration file ``/etc/neutron/neutron.conf``:"
-msgstr "file konfigurasi Neutron ``/etc/neutron/neutron.conf``:"
-
-msgid "Neutron dhcpv6_pd_agent"
-msgstr "Neutron dhcpv6_pd_agent"
-
-msgid ""
-"Neutron project networks that are assigned Global Unicast Address (GUA) "
-"prefixes and addresses don’t require NAT on the neutron router external "
-"gateway port to access the outside world. As a consequence of the lack of "
-"NAT the external router port doesn’t require a GUA to send and receive to "
-"the external networks. This implies a GUA IPv6 subnet prefix is not "
-"necessarily needed for the neutron external network. By default, a IPv6 LLA "
-"associated with the external gateway port can be used for routing purposes. "
-"To handle this scenario, the implementation of router-gateway-set API in "
-"neutron has been modified so that an IPv6 subnet is not required for the "
-"external network that is associated with the neutron router. The LLA address "
-"of the upstream router can be learned in two ways."
-msgstr ""
-"Jaringan proyek neutron yang ditugaskan di alamat dan prefiks Global Unicast "
-"Address (GUA) tidak memerlukan NAT pada gateway port eksternal router "
-"neutron untuk mengakses dunia luar. Sebagai konsekuensi dari kurangnya NAT "
-"port router eksternal tidak memerlukan GUA untuk mengirim dan menerima "
-"dengan jaringan eksternal. Ini berarti subnet prefix GUA IPv6 tidak selalu "
-"diperlukan untuk jaringan eksternal neutron. Secara default, LLA IPv6 "
-"terkait dengan port eksternal gateway dapat digunakan untuk tujuan routing. "
-"Untuk menangani skenario ini, pelaksanaan router-gateway-set API di neutron "
-"telah dimodifikasi sehingga suatu subnet IPv6 tidak diperlukan untuk "
-"jaringan eksternal yang berhubungan dengan router neutron. Alamat LLA dari "
-"router hulu dapat dipelajari dalam dua cara."
-
-msgid ""
-"Neutron routers, by default, will NAT traffic from internal networks to "
-"external networks."
-msgstr ""
-"Router neutron, secara default, akan men-NAT (Network Address Translation) "
-"lalu lintas dari jaringan internal untuk jaringan eksternal."
-
-msgid "Neutron subnets and the IPv6 API attributes"
-msgstr "Subnet neutron dan atribut API IPv6"
-
-msgid "Neutron's Distributed Router feature and IPv6"
-msgstr "Fitur Router Terdistribusi milik Neutron dan IPv6"
-
-msgid ""
-"Next, you'll need to migrate each hypervisor. To do that, follow these "
-"steps:"
-msgstr ""
-"Berikutnya, Anda akan perlu memigrasikan setiap hypervisor. Untuk "
-"melakukannya, ikuti langkah berikut:"
-
-msgid "No"
-msgstr "No"
-
-msgid "No additional configuration required."
-msgstr "Tidak ada konfigurasi tambahan yang diperlukan."
-
-msgid ""
-"No additional configurations required for the mechanism driver. Additional "
-"agent configuration is required. For details, see the related *L2 agent* "
-"section below."
-msgstr ""
-"Tidak ada konfigurasi tambahan yang dibutuhkan untuk driver mekanisme. "
-"Konfigurasi agen tambahan diperlukan. Untuk rincian, lihat bagian bawah *L2 "
-"agent* terkait."
-
-msgid ""
-"No additional configurations required for the mechanism driver. Additional "
-"agent configuration is required. Please see the related section."
-msgstr ""
-"Tidak ada konfigurasi tambahan yang dibutuhkan untuk driver mekanisme. "
-"Konfigurasi agen tambahan diperlukan. Silakan lihat bagian terkait."
-
-msgid "No changes."
-msgstr "Tidak ada perubahan."
-
-msgid "Nodes"
-msgstr "Node (simpul)"
-
-msgid "North-south"
-msgstr "North-south (utara-selatan)"
-
-msgid "North-south scenario 1: Instance with a fixed IP address"
-msgstr "North-south scenario 1: Instance dengan alamat IP tetap"
-
-msgid "North-south scenario 2: Instance with a floating IPv4 address"
-msgstr "North-south scenario 2: Instance dengan alamat IPv4 mengambang"
-
-msgid "North-south scenario: Instance with a fixed IP address"
-msgstr "North-south scenario: Instance dengan alamat IP tetap"
-
-msgid "Not Defined"
-msgstr "Not Defined (tak terdefinisikan)"
-
-msgid "Not currently implemented in the reference implementation."
-msgstr "Saat ini tidak dilaksanakan dalam implementasi referensi."
-
-msgid "Not specified."
-msgstr "Tidak ditentukan."
-
-msgid ""
-"Note that if you are using VLANs on your physical switches to implement "
-"project isolation in your OpenStack cloud, you must ensure that all of your "
-"switchports are configured as trunk ports."
-msgstr ""
-"Perhatikan bahwa jika Anda menggunakan VLAN pada switch fisik Anda untuk "
-"menerapkan isolasi proyek di cloud OpenStack Anda, Anda harus memastikan "
-"bahwa semua switchport Anda dikonfigurasi sebagai trunk port."
-
-msgid "Note that in this use case:"
-msgstr "Perhatikan dalam use case ini:"
-
-msgid ""
-"Nova uses the ``auto allocated topology`` feature with API micro version "
-"2.37 or later. This is because, unlike the neutron feature which was "
-"implemented in the Mitaka release, the integration for nova was completed "
-"during the Newton release cycle. Note that the CLI option ``--nic`` can be "
-"omitted regardless of the microversion used as long as there is no more than "
-"one network available to the project, in which case nova fails with a 400 "
-"error because it does not know which network to use. Furthermore, nova does "
-"not start using the feature, regardless of whether or not a user requests "
-"micro version 2.37 or later, unless all of the ``nova-compute`` services are "
-"running Newton-level code."
-msgstr ""
-"Nova menggunakan fitur ``auto allocated topology`` dengan versi mikro API "
-"2.37 atau yang lebih baru. Ini karena, tidak seperti fitur neutron yang "
-"diimplementasikan dalam rilis Mitaka, integrasi untuk nova selesai selama "
-"siklus pelepasan Newton. Perhatikan bahwa opsi CLI ``--nic`` dapat "
-"dihilangkan terlepas dari mikrofi yang digunakan selama tidak ada lebih dari "
-"satu jaringan yang tersedia untuk proyek ini, dalam hal ini nova gagal "
-"dengan kesalahan 400 karena tidak tahu mana jaringan yang akan digunakan. "
-"Selanjutnya, nova tidak mulai menggunakan fitur ini, terlepas dari apakah "
-"pengguna meminta versi mikro 2.37 atau yang lebih baru, kecuali semua "
-"layanan ``nova-compute`` menjalankan kode tingkat Newton."
-
-msgid ""
-"Now consider the scenario that all of the switchports in the first switch "
-"become occupied, and so the organization buys a second switch and connects "
-"it to the first switch to expand the available number of switchports. The "
-"second switch is also configured to support VLAN IDs 10, 11, and 12. Now "
-"imagine host A connected to switch 1 on a port configured for VLAN ID 10 "
-"sends an Ethernet frame intended for host B connected to switch 2 on a port "
-"configured for VLAN ID 10. When switch 1 forwards the Ethernet frame to "
-"switch 2, it must communicate that the frame is associated with VLAN ID 10."
-msgstr ""
-"Sekarang mempertimbangkan skenario bahwa semua switchport di switch pertama "
-"sedang dipakai, dan perusahaan (organization) membeli switch kedua dan "
-"menghubungkannya ke switch pertama untuk memperluas jumlah yang tersedia "
-"dari switchports. Switch kedua juga dikonfigurasi untuk mendukung VLAN ID "
-"10, 11, dan 12. Sekarang bayangkan bahwa host A terhubung dengan switch 1 "
-"pada port yang dikonfigurasi untuk VLAN ID 10 mengirimkan sebuah frame "
-"Ethernet yang ditujukan untuk host B yang terhubung dengan switch 2 pada "
-"port yang dikonfigurasi untuk VLAN ID 10. Ketika switch 1 meneruskan frame "
-"Ethernet untuk switch 2, switch 1 harus berkomunikasi frame yang berhubungan "
-"dengan VLAN ID 10."
-
-msgid ""
-"Now project ``838030a7bf3c4d04b4b054c0f0b2b17c`` is able to see the network "
-"when running :command:`openstack network list` and :command:`openstack "
-"network show` and can attach router gateway ports to that network. No other "
-"users (other than admins and the owner) are able to see the network."
-msgstr ""
-"Sekarang proyek `` 838030a7bf3c4d04b4b054c0f0b2b17c`` mampu melihat jaringan "
-"ketika berjalan :command:`openstack network list` dan :command:`openstack "
-"network show` dan dapat menghubungkan router gateway port ke jaringan itu. "
-"Tidak ada pengguna lain (selain admin dan pemilik) dapat melihat jaringan."
-
-msgid "Now, use them. It is easy to create a subnet from a pool:"
-msgstr ""
-"Sekarang, silahkan menggunakannya. Sangat mudah untuk membuat subnet dari "
-"kolam:"
-
-msgid "OVS"
-msgstr "OVS"
-
-msgid "OVS 2.4"
-msgstr "OVS 2.4"
-
-msgid "OVS 2.5"
-msgstr "OVS 2.5"
-
-msgid "OVS agent"
-msgstr "Agen OVS"
-
-msgid ""
-"OVS with DPDK, or OVS-DPDK, can be used to provide high-performance "
-"networking between instances on OpenStack compute nodes."
-msgstr ""
-"OVS dengan DPDK, atau OVS-DPDK, dapat digunakan untuk menyediakan jaringan "
-"high-performance antara instance pada node komputasi OpenStack."
-
-msgid "Obtain access to the instance."
-msgstr "Dapatkan akses ke instance."
-
-msgid ""
-"Octavia provides additional capabilities for load balancers, including using "
-"a compute driver to build instances that operate as load balancers. The "
-"`Hands on Lab - Install and Configure OpenStack Octavia `_ session at the OpenStack "
-"Summit in Tokyo provides an overview of Octavia."
-msgstr ""
-"Octavia memberikan kemampuan tambahan untuk penyeimbang beban, meliputi "
-"penggunaan driver komputasi untuk membangun instance yang beroperasi sebagai "
-"penyeimbang beban. Sesi `Hands on Lab - Install and Configure OpenStack "
-"Octavia `_ pada "
-"KTT OpenStack di Tokyo memberikan gambaran tentang Octavia."
-
-msgid "Off"
-msgstr "Off"
-
-msgid ""
-"Offline migration requires all Neutron server instances in the cluster to be "
-"shutdown before you apply any contract scripts."
-msgstr ""
-"Migrasi Offline memerlukan semua instance server Neutron dalam cluster untuk "
-"shutdown sebelum Anda menerapkan script kontrak."
-
-msgid ""
-"Often, an application running on a host with a private IP address will need "
-"to connect to a server on the public Internet. An example is a user who "
-"wants to access a public website such as www.openstack.org. If the IP "
-"packets reach the web server at www.openstack.org with a private IP address "
-"as the source, then the web server cannot send packets back to the sender."
-msgstr ""
-"Seringkali, sebuah aplikasi yang berjalan pada host dengan alamat IP private "
-"akan harus terhubung ke server di Internet publik. Contohnya adalah pengguna "
-"yang ingin mengakses situs publik seperti www.openstack.org. Jika paket IP "
-"mencapai server web di www.openstack.org dengan alamat IP private sebagai "
-"sumber, maka web server tidak dapat mengirim paket kembali ke pengirim."
-
-msgid ""
-"On Ubuntu, modify the ``[fwaas]`` section in the ``/etc/neutron/fwaas_driver."
-"ini`` file instead of ``/etc/neutron/neutron.conf``."
-msgstr ""
-"Pada Ubuntu, memodifikasi bagian ``[fwaas]`` dalam file ``/etc/neutron/"
-"fwaas_driver.ini`` dari pada ``/etc/neutron/neutron.conf``."
-
-msgid ""
-"On Ubuntu, the iptables ruleset that libvirt creates includes the following "
-"rules::"
-msgstr ""
-"Pada Ubuntu, iptables ruleset dimana libvirt menciptakan, ruleset mencakup "
-"aturan berikut ::"
-
-msgid ""
-"On a Linux machine, any of the following commands displays the routing table:"
-msgstr "Pada mesin Linux, semua perintah berikut menampilkan routing table:"
-
-msgid "On compute nodes:"
-msgstr "Pada node komputasi:"
-
-msgid "On each compute node, create the VFs via the PCI SYS interface:"
-msgstr "Pada setiap node komputasi, buatlah VF melalui antarmuka PCI SYS:"
-
-msgid ""
-"On each compute node, verify creation of a ``qrouter`` namespace with the "
-"same ID."
-msgstr ""
-"Pada setiap node komputasi, lakukan verifikasi penciptaan namespace "
-"``qrouter`` dengan ID yang sama."
-
-msgid "On each compute node, verify creation of a second ``qdhcp`` namespace."
-msgstr ""
-"Pada setiap node komputasi, lakukan verifikasi penciptaan namespace "
-"``qdhcp`` kedua."
-
-msgid "On each compute node, verify creation of the ``qdhcp`` namespace."
-msgstr ""
-"Pada setiap node komputasi, lakukan verifikasi penciptaan namespace "
-"``qdhcp``."
-
-msgid ""
-"On each network node, show the IP address of interfaces in the ``qrouter`` "
-"namespace. With the exception of the VRRP interface, only one namespace "
-"belonging to the master router instance contains IP addresses on the "
-"interfaces."
-msgstr ""
-"Pada setiap node jaringan, tampilkan alamat IP dari antarmuka dalam "
-"namespace ``qrouter``. Dengan pengecualian dari interface VRRP, hanya satu "
-"namespace yang menjadi milik instance router utama berisi alamat IP pada "
-"antarmuka."
-
-msgid ""
-"On each network node, verify creation of a ``qrouter`` namespace with the "
-"same ID."
-msgstr ""
-"Pada setiap node jaringan, lakukan verifikasi penciptaan namespace "
-"``qrouter`` dengan ID yang sama."
-
-msgid ""
-"On every controller node running the ``nova-scheduler`` service, add "
-"``PciPassthroughFilter`` to ``scheduler_default_filters`` to enable "
-"``PciPassthroughFilter`` by default. Also ensure "
-"``scheduler_available_filters`` parameter under the ``[DEFAULT]`` section in "
-"``nova.conf`` is set to ``all_filters`` to enable all filters provided by "
-"the Compute service."
-msgstr ""
-"Pada setiap controller node yang menjalankan layanan ``nova-scheduler``, "
-"tambahkan ``PciPassthroughFilter`` ke ``scheduler_default_filters`` untuk "
-"mengaktifkan ``PciPassthroughFilter`` secara default. Juga memastikan "
-"parameter ``scheduler_available_filters`` bawah bagian ``[DEFAULT] `` dalam "
-"``nova.conf`` diatur ke ``all_filters`` untuk mengaktifkan semua filter yang "
-"disediakan oleh layanan Compute."
-
-msgid "On network nodes:"
-msgstr "Pada node jaringan:"
-
-msgid ""
-"On nodes running the Open vSwitch agent, edit the ``openvswitch_agent.ini`` "
-"file and enable the firewall driver."
-msgstr ""
-"Pada node yang menjalankan agen Open vSwitch, edit file ``openvswitch_agent."
-"ini`` dan aktifkan driver firewall."
-
-msgid ""
-"On some PCI devices, observe that when changing the amount of VFs you "
-"receive the error ``Device or resource busy``. In this case, you must first "
-"set ``sriov_numvfs`` to ``0``, then set it to your new value."
-msgstr ""
-"Pada beberapa perangkat PCI, amati bahwa ketika mengubah jumlah VF dimana "
-"Anda menerima kesalahan ``Device or resource busy``. Dalam hal ini, Anda "
-"harus terlebih dahulu menetapkan ``sriov_numvfs`` ke ``0``, kemudian set ke "
-"nilai baru Anda."
-
-msgid ""
-"On the compute node containing the instance, verify creation of the ``fip`` "
-"namespace with the same ID as the provider network."
-msgstr ""
-"Pada node komputasi yang berisi instance, lakukan verifikasi penciptaan "
-"namespace ``fip`` dengan ID yang sama dengan jaringan provider."
-
-msgid ""
-"On the controller node or any host with access to the provider network, "
-"``ping`` the IPv4 and IPv6 addresses of the instance."
-msgstr ""
-"Pada controller node atau host dengan akses ke jaringan provider, lakukan "
-"``ping`` IPv4 dan alamat IPv6 instance."
-
-msgid ""
-"On the controller node or any host with access to the provider network, "
-"``ping`` the IPv6 address of the instance."
-msgstr ""
-"Pada controller node atau host dengan akses ke jaringan provider, lakukan "
-"``ping`` alamat IPv6 dari instance."
-
-msgid ""
-"On the controller node or any host with access to the provider network, "
-"``ping`` the floating IPv4 address of the instance."
-msgstr ""
-"Pada controller node atau host dengan akses ke jaringan provider, lakukan "
-"``ping`` alamat IPv4 dari instance."
-
-msgid ""
-"On the network node with the master router, administratively disable the "
-"overlay network interface."
-msgstr ""
-"Pada node jaringan dengan router utama, secara administratif menonaktifkan "
-"antarmuka jaringan overlay."
-
-msgid "On the network node, verify creation of the ``qrouter`` namespace."
-msgstr ""
-"Pada node jaringan, lakukan verifikasi penciptaan namespace ``qrouter``."
-
-msgid ""
-"On the network node, verify creation of the ``snat`` and ``qrouter`` "
-"namespaces with the same ID."
-msgstr ""
-"Pada node jaringan, lakukan verifikasi penciptaan ``snat`` dan namespace "
-"``qrouter`` dengan ID yang sama."
-
-msgid ""
-"On the original network node in step 2, administratively enable the overlay "
-"network interface. Note that the master router remains on the network node "
-"in step 3."
-msgstr ""
-"Pada node jaringan asli dalam langkah 2, secara administratif aktifkan "
-"antarmuka jaringan overlay. Perhatikan bahwa router utama tetap pada node "
-"jaringan dalam langkah 3."
-
-msgid ""
-"On the other network node, verify promotion of the backup router to master "
-"router by noting addition of IP addresses to the interfaces in the "
-"``qrouter`` namespace."
-msgstr ""
-"Pada node jaringan lainnya, lakukan verifikasi promosi cadangan router untuk "
-"router utama dengan mencatat penambahan alamat IP ke interface dalam "
-"namespace ``qrouter``."
-
-msgid ""
-"Once configuration is complete, you can launch instances with SR-IOV ports."
-msgstr ""
-"Setelah konfigurasi selesai, Anda dapat meluncurkan instance dengan port SR-"
-"IOV."
-
-msgid ""
-"Once starting the migration, south-north connections (instances to internet) "
-"will be severed. New connections will be able to start only when the "
-"migration is complete."
-msgstr ""
-"Setelah memulai migrasi, koneksi selatan-utara (instance ke internet) akan "
-"terputus. Koneksi baru akan dapat memulai hanya ketika migrasi selesai."
-
-msgid ""
-"Once the ``neutron-server`` has been configured and restarted, users will "
-"have functionality that covers three use cases, described in the following "
-"sections. In each of the use cases described below:"
-msgstr ""
-"Setelah ``neutron-server`` telah dikonfigurasi dan di-restart, pengguna akan "
-"memiliki fungsi yang mencakup tiga use case, dijelaskan di bagian berikut. "
-"Dalam setiap use case dijelaskan di bawah:"
-
-msgid ""
-"Once these steps are executed, the port's DNS data will be published in the "
-"external DNS service. This is an example:"
-msgstr ""
-"Setelah langkah-langkah ini dijalankan, data DNS milik port akan diterbitkan "
-"dalam layanan DNS eksternal. Ini adalah contohnya:"
-
-msgid ""
-"Once you have stacked run the command below to start the neutron-pd-agent:"
-msgstr ""
-"Setelah Anda menumpuknya, jalankan perintah di bawah ini untuk memulai "
-"neutron-pd-agent:"
-
-msgid "One BGP agent."
-msgstr "Satu agen BGP."
-
-msgid ""
-"One address scope containing IP address range 203.0.113.0/24 for provider "
-"networks, and IP address ranges 192.0.2.0/25 and 192.0.2.128/25 for self-"
-"service networks."
-msgstr ""
-"Satu lingkup alamat yang berisi kisaran alamat IP 203.0.113.0/24 untuk "
-"jaringan penyedia, dan kisaran alamat IP 192.0.2.0/25 dan 192.0.2.128/25 "
-"untuk jaringan self-service."
-
-msgid "One controller node with the following components:"
-msgstr "Satu node controller dengan komponen-komponen berikut:"
-
-msgid "One provider network using IP address range 203.0.113.0/24."
-msgstr "Satu jaringan provider menggunakan kisaran alamat IP 203.0.113.0/24."
-
-msgid "One-to-one NAT"
-msgstr "One-to-one NAT"
-
-msgid ""
-"Only compute resources can be attached via macvtap. Attaching other "
-"resources like DHCP, Routers and others is not supported. Therefore run "
-"either OVS or linux bridge in VLAN or flat mode on the controller node."
-msgstr ""
-"Hanya sumber komputasi dapat dihubungkan melalui macvtap. Menghubungkan "
-"sumber lain seperti DHCP, Router dan lainnya tidak didukung. Oleh karena itu "
-"jalankan OVS ataupun jembatan linux di VLAN atau modus datar pada controller "
-"node."
-
-msgid ""
-"Only for :ref:`config-dns-use-case-1`, if the port binding extension is "
-"enabled in the Networking service, the Compute service will execute one "
-"additional port update operation when allocating the port for the instance "
-"during the boot process. This may have a noticeable adverse effect in the "
-"performance of the boot process that must be evaluated before adoption of "
-"this use case."
-msgstr ""
-"Hanya untuk :ref:`config-dns-use-case-1`, jika port yang mengikat ekstensi "
-"diaktifkan dalam layanan Networking, layanan Compute akan mengeksekusi "
-"operasi update satu tambahan port ketika pengalokasian port instance selama "
-"proses boot . Ini mungkin memiliki efek yang merugikan nyata dalam kinerja "
-"proses boot yang harus dievaluasi sebelum adopsi dari use case ini."
-
-msgid ""
-"Only provides connectivity to an instance via the compute node on which the "
-"instance resides if the instance resides on a self-service network with a "
-"floating IPv4 address. Instances on self-service networks with only an IPv6 "
-"address or both IPv4 and IPv6 addresses rely on the network node for IPv6 "
-"connectivity."
-msgstr ""
-"Hanya menyediakan konektivitas ke sebuah instance melalui node komputasi "
-"dimana instance berada jika instance berada pada jaringan self-service "
-"dengan alamat IPv4 mengambang. Instance pada jaringan self-service dengan "
-"hanya alamat IPv6 atau kedua alamat IPv4 dan IPv6 tergantung pada node "
-"jaringan untuk konektivitas IPv6."
-
-msgid ""
-"Only supports self-service networks using a router. Provider networks "
-"operate at layer-2 and rely on physical network infrastructure for "
-"redundancy."
-msgstr ""
-"Hanya mendukung jaringan self-service menggunakan router. Jaringan provider "
-"beroperasi pada lapisan-2 dan bergantung pada infrastruktur jaringan fisik "
-"untuk redundansi."
-
-msgid "Open Virtual Network (OVN)"
-msgstr "Open Virtual Network (OVN)"
-
-msgid "Open source"
-msgstr "Open source"
-
-msgid "Open vSwitch"
-msgstr "Open vSwitch"
-
-msgid "Open vSwitch & Open vSwitch agent"
-msgstr "Agen Open vSwitch & Open vSwitch"
-
-msgid "Open vSwitch (OVS)"
-msgstr "Open vSwitch (OVS)"
-
-msgid ""
-"Open vSwitch (OVS) provides support for a Data Plane Development Kit (DPDK) "
-"datapath since OVS 2.2, and a DPDK-backed ``vhost-user`` virtual interface "
-"since OVS 2.4. The DPDK datapath provides lower latency and higher "
-"performance than the standard kernel OVS datapath, while DPDK-backed ``vhost-"
-"user`` interfaces can connect guests to this datapath. For more information "
-"on DPDK, refer to the `DPDK `__ website."
-msgstr ""
-"Open vSwitch (OVS) menyediakan dukungan untuk Data Plane Development Kit "
-"(DPDK) datapath sejak OVS 2.2, dan antarmuka virtual ``vhost-user`` DPDK-"
-"backed sejak OVS 2.4. The DPDK datapath menyediakan latency rendah dan "
-"kinerja tinggi dari OVS datapath kernel standar, sementara interface ``vhost-"
-"user`` DPDK-backed dapat berhubungan guests untuk datapath ini. Untuk "
-"informasi lebih lanjut tentang DPDK, rujuk pada situs `DPDK ` __."
-
-msgid "Open vSwitch agent"
-msgstr "Agen Open vSwitch"
-
-msgid "Open vSwitch agent, Linux bridge agent"
-msgstr "Agen Open vSwitch, Agen jembatan Linux"
-
-msgid "Open vSwitch mechanism and Open vSwitch agent"
-msgstr "Mekanisme Open vSwitch and agen Open vSwitch"
-
-msgid "Open vSwitch mechanism driver"
-msgstr " Driver mekanisme Open vSwitch"
-
-msgid "Open vSwitch with DPDK datapath"
-msgstr "Open vSwitch dengan DPDK datapath"
-
-msgid "Open vSwitch: High availability using DVR"
-msgstr "Open vSwitch: High availability menggunakan DVR"
-
-msgid "Open vSwitch: High availability using VRRP"
-msgstr "Open vSwitch: High availability menggunakan VRRP"
-
-msgid "Open vSwitch: Provider networks"
-msgstr "Open vSwitch: Jaringan provider"
-
-msgid "Open vSwitch: Self-service networks"
-msgstr "Open vSwitch: Jaringan self-service"
-
-msgid "OpenContrail"
-msgstr "OpenContrail"
-
-msgid "OpenDaylight"
-msgstr "OpenDaylight"
-
-msgid ""
-"OpenStack :term:`Compute service (nova)` is used to plug each virtual NIC on "
-"the VM into a particular network."
-msgstr ""
-"OpenStack :term:`Compute service (nova)` digunakan untuk plug setiap NIC "
-"virtual pada VM dalam jaringan tertentu."
-
-msgid ""
-"OpenStack :term:`Dashboard (horizon)` is used by administrators and project "
-"users to create and manage network services through a web-based graphical "
-"interface."
-msgstr ""
-"OpenStack :term:`Dashboard (horizon)` digunakan oleh administrator dan "
-"pengguna proyek untuk membuat dan mengelola layanan jaringan melalui "
-"antarmuka berbasis web grafis."
-
-msgid ""
-"OpenStack :term:`Identity service (keystone)` is used for authentication and "
-"authorization of API requests."
-msgstr ""
-"OpenStack :term:`Identity service (keystone)` digunakan untuk otentikasi dan "
-"otorisasi permintaan API."
-
-msgid "OpenStack Networking"
-msgstr "OpenStack Networking (jaringan OpenStack)"
-
-msgid "OpenStack Networking (neutron) server service and ML2 plug-in."
-msgstr "Layanan server OpenStack Networking (neutron) dan ML2 plug-in."
-
-msgid "OpenStack Networking Guide"
-msgstr "Panduan OpenStack Networking"
-
-msgid ""
-"OpenStack Networking Linux bridge layer-2 agent, DHCP agent, metadata agent, "
-"and any dependencies."
-msgstr ""
-"OpenStack Networking Linux bridge layer-2 agent, DHCP agent, metadata agent, "
-"dan dependensi."
-
-msgid "OpenStack Networking Linux bridge layer-2 agent, layer-3 agent, and any"
-msgstr ""
-"OpenStack Networking Linux bridge layer-2 agent, layer-3 agent, dan apapun"
-
-msgid "OpenStack Networking Macvtap layer-2 agent and any dependencies."
-msgstr "Agen layer-2 Macvtap OpenStack Networking dan dependensi."
-
-msgid ""
-"OpenStack Networking Open vSwitch (OVS) layer-2 agent, DHCP agent, metadata "
-"agent, and any dependencies including OVS."
-msgstr ""
-"OpenStack Networking Open vSwitch (OVS) layer-2 agent, DHCP agent, metadata "
-"agent, dan apapun dependensi termasuk OVS."
-
-msgid ""
-"OpenStack Networking Open vSwitch (OVS) layer-2 agent, layer-3 agent, and "
-"any including OVS."
-msgstr ""
-"OpenStack Networking Open vSwitch (OVS) layer-2 agent, layer-3 agent, dan "
-"apapun termasuk OVS."
-
-msgid ""
-"OpenStack Networking allows you to create and manage network objects, such "
-"as networks, subnets, and ports, which other OpenStack services can use. "
-"Plug-ins can be implemented to accommodate different networking equipment "
-"and software, providing flexibility to OpenStack architecture and deployment."
-msgstr ""
-"OpenStack Networking mengizinkan Anda untuk membuat dan mengelola objek "
-"jaringan, seperti jaringan, subnet, dan port, dimana layanan OpenStack "
-"lainnya dapat menggunakan. Plug-in dapat diimplementasikan untuk "
-"mengakomodasi peralatan jaringan dan perangkat lunak yang berbeda, yang "
-"memberikan fleksibilitas untuk arsitektur dan pengerahan OpenStack."
-
-msgid ""
-"OpenStack Networking consists of the neutron-server, a database for "
-"persistent storage, and any number of plug-in agents, which provide other "
-"services such as interfacing with native Linux networking mechanisms, "
-"external devices, or SDN controllers."
-msgstr ""
-"OpenStack Networking terdiri dari neutron-server, database untuk penyimpanan "
-"persisten, dan sejumlah agen plug-in, yang menyediakan layanan lain seperti "
-"berinteraksi dengan mekanisme jaringan Linux native, perangkat eksternal, "
-"atau SDN controllers."
-
-msgid "OpenStack Networking integrates with various OpenStack components:"
-msgstr "OpenStack Networking terintegrasi dengan berbagai komponen OpenStack:"
-
-msgid ""
-"OpenStack Networking is entirely standalone and can be deployed to a "
-"dedicated host. If your deployment uses a controller host to run centralized "
-"Compute components, you can deploy the Networking server to that specific "
-"host instead."
-msgstr ""
-"OpenStack Networking dapat sepenuhnya mandiri dan dapat digunakan untuk host "
-"yang didedikasikan. Jika pengerahan Anda menggunakan host controller untuk "
-"menjalankan komponen Compute terpusat, Anda dapat mengerahkan server "
-"Networking bukan untuk host tertentu."
-
-msgid ""
-"OpenStack Networking layer-2 (switching) agent, layer-3 agent, and any "
-"dependencies."
-msgstr ""
-"Agen OpenStack Networking layer-2 (switching), agen layer-3, dan dependensi."
-
-msgid ""
-"OpenStack Networking layer-2 agent, DHCP agent, metadata agent, and any "
-"dependencies."
-msgstr ""
-"Agen OpenStack Networking layer-2, agen DHCP, agen metadata, dan dependensi"
-
-msgid ""
-"OpenStack Networking layer-2 agent, layer-3 agent, and any dependencies."
-msgstr "OpenStack Networking layer-2 agent, layer-3 agent, dan dependensi."
-
-msgid "OpenStack Networking plug-in and agents"
-msgstr "Plug-in dan agen OpenStack Networking"
-
-msgid "OpenStack Networking server service and ML2 plug-in."
-msgstr "Layanan server OpenStack Networking dan ML2 plug-in."
-
-msgid ""
-"OpenStack can be setup such that OpenStack Networking directly provides RA, "
-"DHCP relay and DHCPv6 address and optional information for their networks or "
-"this can be delegated to external routers and services based on the drivers "
-"that are in use. There are two neutron subnet attributes - ``ipv6_ra_mode`` "
-"and ``ipv6_address_mode`` – that determine how IPv6 addressing and network "
-"information is provided to project instances:"
-msgstr ""
-"OpenStack bisa diatur sedemikian rupa sehingga OpenStack Networking langsung "
-"memberikan RA, penyiaran DHCP dan alamat DHCPv6 dan informasi opsional untuk "
-"jaringan mereka atau hal ini dapat didelegasikan kepada router dan layanan "
-"eksternal didasarkan pada driver yang sedang digunakan. Ada atribut dua "
-"subnet neutron - `` ipv6_ra_mode`` dan `` ipv6_address_mode`` - yang "
-"menentukan bagaimana IPv6 dan informasi jaringan disediakan untuk instance "
-"proyek:"
-
-msgid "OpenStack control & management network considerations"
-msgstr "Pertimbangan jaringan manajemen dan kontrol OpenStack"
-
-msgid ""
-"OpenStack control communication between servers and services over an IPv6 "
-"network."
-msgstr ""
-"OpenStack mengkontrol komunikasi antara server dan layanan melalui jaringan "
-"IPv6."
-
-msgid "OpenStack controller host - controlnode"
-msgstr "OpenStack controller host - controlnode"
-
-msgid ""
-"OpenStack currently doesn't support the privacy extensions defined by RFC "
-"4941. The interface identifier and DUID used must be directly derived from "
-"the MAC as described in RFC 2373. The compute hosts must not be setup to "
-"utilize the privacy extensions when generating their interface identifier."
-msgstr ""
-"OpenStack saat ini tidak mendukung ekstensi privasi yang didefinisikan oleh "
-"RFC 4941. Interface identifier dan DUID yang digunakan harus langsung "
-"berasal dari MAC seperti yang dijelaskan dalam RFC 2373. Host komputasi "
-"tidak harus mengatur untuk memanfaatkan ekstensi privasi saat membuat "
-"identifier interface identifier mereka ."
-
-msgid ""
-"OpenStack uses DNAT to route packets from instances to the OpenStack "
-"metadata service. Applications running inside of instances access the "
-"OpenStack metadata service by making HTTP GET requests to a web server with "
-"IP address 169.254.169.254. In an OpenStack deployment, there is no host "
-"with this IP address. Instead, OpenStack uses DNAT to change the destination "
-"IP of these packets so they reach the network interface that a metadata "
-"service is listening on."
-msgstr ""
-"OpenStack menggunakan DNAT untuk me-rute paket dari instance ke layanan "
-"metadata OpenStack. Aplikasi yang berjalan di dalam instance mengakses "
-"layanan metadata OpenStack dengan membuat permintaan HTTP GET ke server web "
-"dengan alamat IP 169.254.169.254. Dalam pengerahan OpenStack, tidak ada host "
-"dengan alamat IP ini. Sebaliknya, OpenStack menggunakan DNAT untuk mengubah "
-"IP tujuan dari paket tersebut sehingga mereka mencapai antarmuka jaringan "
-"dimana layanan metadata mendengarkannya."
-
-msgid ""
-"OpenStack uses SNAT to enable applications running inside of instances to "
-"connect out to the public Internet."
-msgstr ""
-"OpenStack menggunakan SNAT untuk mengaktifkan aplikasi yang berjalan dalam "
-"instance untuk menghubungkan ke Internet publik."
-
-msgid ""
-"OpenStack uses a third-party program called `dnsmasq `_ to implement the DHCP server. Dnsmasq writes to "
-"the syslog, where you can observe the DHCP request and replies::"
-msgstr ""
-"OpenStack menggunakan program pihak ketiga yang disebut `dnsmasq `_ untuk menjalankan server DHCP. Dnsmasq "
-"menulis ke syslog, dimana Anda dapat mengamati permintaan dan balasan DHCP::"
-
-msgid "Operation"
-msgstr "Operation (operasi)"
-
-msgid "Operation with Distributed Virtual Routers (DVR)"
-msgstr "Operasi dengan Distributed Virtual Routers (DVR)"
-
-msgid "Operational OpenStack Identity (keystone) service."
-msgstr "Layanan OpenStack Identity (keystone) operasional."
-
-msgid "Operational OpenStack Image Service (glance)."
-msgstr "Layanan OpenStack Image (glance) operasional."
-
-msgid ""
-"Operational SQL server with databases necessary for each OpenStack service."
-msgstr ""
-"Server SQL operasional dengan database yang diperlukan untuk setiap layanan "
-"OpenStack."
-
-msgid ""
-"Operational hypervisor components of the OpenStack Compute (nova) service "
-"with appropriate configuration to use the Networking service."
-msgstr ""
-"Komponen hypervisor operasional dari layanan OpenStack Compute (nova) dengan "
-"konfigurasi yang sesuai untuk menggunakan layanan Networking."
-
-msgid ""
-"Operational management components of the OpenStack Compute (nova) service "
-"with appropriate configuration to use the Networking service."
-msgstr ""
-"Komponen manajemen operasional dari layanan OpenStack Compute (nova) dengan "
-"konfigurasi yang sesuai untuk menggunakan layanan Networking."
-
-msgid "Operational message queue service."
-msgstr "Layanan antrian pesan operasional."
-
-msgid "Operations"
-msgstr "Operations (operasi)"
-
-msgid "Operations impact"
-msgstr "Dampak operasi"
-
-msgid ""
-"Operators (and users with admin role) can get the auto-allocated topology "
-"for a project by specifying the project ID:"
-msgstr ""
-"Operator (dan pengguna dengan peran admin) bisa mendapatkan topologi auto-"
-"allocated untuk proyek dengan menentukan ID proyek:"
-
-msgid ""
-"Optionally, create another subnet on the network with a different service "
-"type. For example, the ``compute:foo`` arbitrary service type."
-msgstr ""
-"Secara opsional, buat subnet lain di jaringan dengan tipe layanan yang "
-"berbeda. Sebagai contoh, tipe layanan apapun ``compute:foo`` "
-
-msgid ""
-"Optionally, enable IPv4 access from external networks such as the Internet "
-"to the instance."
-msgstr ""
-"Secara opsional, aktifkan akses IPv4 dari jaringan eksternal seperti "
-"Internet untuk instance."
-
-msgid ""
-"Optionally, set the needed ``notification_drivers`` in the ``[qos]`` section "
-"in ``/etc/neutron/neutron.conf`` (``message_queue`` is the default)."
-msgstr ""
-"Secara opsional, atur yang dibutuhkan ``notification_drivers`` dalam bagian "
-"``[qos]`` dalam ``/etc/neutron/neutron.conf`` (``message_queue`` menjadi "
-"default)."
-
-msgid "Or to run in headless mode:"
-msgstr "Atau untuk berjalan dalam modus tanpa kepala (headless mode):"
-
-msgid "Other Configuration Flag = 0"
-msgstr "Other Configuration Flag = 0"
-
-msgid "Other Configuration Flag = 1"
-msgstr "Other Configuration Flag = 1"
-
-msgid "Other features such as BGP dynamic routing"
-msgstr "Fitur lainnya seperti routing dinamis BGP"
-
-msgid ""
-"Other networks including provider networks and flat or VLAN self-service "
-"networks assume the value of the ``global_physnet_mtu`` option."
-msgstr ""
-"Jaringan lain termasuk jaringan penyedia dan jaringan datar atau self-"
-"service VLAN yang mengasumsikan nilai opsi ``global_physnet_mtu``."
-
-msgid "Overlay (tunnel) protocols"
-msgstr "Protokol overlay (tunnel)"
-
-msgid ""
-"Overlay: Handles self-service networks using an overlay protocol such as "
-"VXLAN or GRE."
-msgstr ""
-"Overlay: Menangani jaringan self-service menggunakan protokol overlay "
-"seperti VXLAN atau GRE."
-
-msgid "Overview"
-msgstr "Iktisar"
-
-msgid ""
-"PCI ``vendor_id`` and ``product_id`` as displayed by the Linux utility "
-"``lspci``."
-msgstr ""
-"PCI ``vendor_id`` dan ``product_id`` seperti yang ditampilkan oleh utilitas "
-"Linux ``lspci``."
-
-msgid ""
-"PCI address: The address uses the same syntax as in ``lspci`` and an "
-"asterisk (*) can be used to match anything."
-msgstr ""
-"Alamat PCI: alamat menggunakan sintaks yang sama seperti dalam ``lspci`` dan "
-"tanda bintang (*) dapat digunakan untuk mencocokkan apa pun."
-
-msgid ""
-"PCI-SIG Single Root I/O Virtualization and Sharing (SR-IOV) functionality is "
-"available in OpenStack since the Juno release. The SR-IOV specification "
-"defines a standardized mechanism to virtualize PCIe devices. This mechanism "
-"can virtualize a single PCIe Ethernet controller to appear as multiple PCIe "
-"devices. Each device can be directly assigned to an instance, bypassing the "
-"hypervisor and virtual switch layer. As a result, users are able to achieve "
-"low latency and near-line wire speed."
-msgstr ""
-"PCI-SIG Single Root I/O Virtualization dan Sharing (SR-IOV) secara "
-"fungsional tersedia dalam OpenStack sejak rilis Juno. Spesifikasi SR-IOV "
-"mendefinisikan mekanisme standar untuk virtualisasi perangkat PCIe. "
-"Mekanisme ini dapat virtualisasi PCIe Ethernet controller tunggal akan "
-"terlihat sebagai beberapa perangkat PCIe. Setiap perangkat dapat langsung "
-"ditugaskan ke sebuah instance, denganmelewati hypervisor dan virtual switch "
-"layer. Akibatnya, pengguna dapat mencapai latency rendah dan kecepatan kawat "
-"near-line."
-
-msgid "PF"
-msgstr "PF"
-
-msgid "Performance considerations"
-msgstr "Pertimbangan kinerja"
-
-msgid "Performance impact"
-msgstr "Dampak kinerja"
-
-msgid "Persist created VFs on reboot:"
-msgstr "Pertahankan VF yang dibuat ketika reboot:"
-
-msgid ""
-"Physical Function. The physical Ethernet controller that supports SR-IOV."
-msgstr "Physical Function. Physical Ethernet controller yang mendukung SR-IOV."
-
-msgid "Physical Network"
-msgstr "Physical Network (jaringan fisik)"
-
-msgid "Placement API: 1.1"
-msgstr "Placement API: 1.1"
-
-msgid "Plug-ins"
-msgstr "Plug-ins"
-
-msgid ""
-"Plugs and unplugs ports, creates networks or subnets, and provides IP "
-"addressing. The chosen plug-in and agents differ depending on the vendor and "
-"technologies used in the particular cloud. It is important to mention that "
-"only one plug-in can be used at a time."
-msgstr ""
-"Port plugs dan unplugs, menciptakan jaringan atau subnet, dan menyediakan "
-"alamat IP. Plug-in dan agen yang dipilih berbeda tergantung pada vendor dan "
-"teknologi yang digunakan di cloud tertentu. Hal ini penting untuk "
-"menyebutkan bahwa hanya satu plug-in dapat digunakan pada satu waktu."
-
-msgid "Pool"
-msgstr "Pool"
-
-msgid "Populate the database."
-msgstr "Mengisi database."
-
-msgid "Port chain"
-msgstr "Port chain (rantai port)"
-
-msgid "Port pair"
-msgstr "Port pair (pasangan port)"
-
-msgid "Port pair group"
-msgstr "Port pair group (kelompok pasangan port)"
-
-msgid "Port pair: [p1, p2]"
-msgstr "Port pair: [p1, p2]"
-
-msgid "Port pair: [p3, p4]"
-msgstr "Port pair: [p3, p4]"
-
-msgid "Port pair: [p5, p6]"
-msgstr "Port pair: [p5, p6]"
-
-msgid "Ports"
-msgstr "Port-Port"
-
-msgid "Ports can be created with a policy attached to them too."
-msgstr "Port dapat dibuat dengan kebijakan yang menyertainya juga."
-
-msgid ""
-"Ports with the device owner ``network:dhcp`` are exempt from the above IPAM "
-"logic for subnets with ``dhcp_enabled`` set to ``True``. This preserves the "
-"existing automatic DHCP port creation behaviour for DHCP-enabled subnets."
-msgstr ""
-"Port dengan pemilik perangkat ``network:dhcp`` dibebaskan dari logika IPAM "
-"di atas untuk subnet dengan ``dhcp_enabled`` diatur ke `` True``. Ini "
-"mempertahankan perilaku penciptaan port DHCP otomatis yang ada untuk subnet "
-"DHCP-enabled."
-
-msgid ""
-"Pre-Mitaka address scopes are not visible through the API. You cannot list "
-"address scopes or show details. Scopes exist implicitly as a catch-all for "
-"addresses that are not explicitly scoped."
-msgstr ""
-"Pre-Mitaka lingkup alamat tidak terlihat melalui API. Anda tidak bisa "
-"mendaftar lingkup alamat atau menunjukkan detail. Lingkup ada secara "
-"implisit sebagai catch-all untuk alamat yang tidak secara eksplisit "
-"terlingkup."
-
-msgid "Prefix advertisement"
-msgstr "Penyiaran update dan perubahan awal"
-
-msgid "Prefix delegation"
-msgstr "Penyerahan awalan (prefix delegation)"
-
-msgid ""
-"Prefix delegation became available in the Liberty release, it is not "
-"available in the Kilo release. HA and DVR routers are not currently "
-"supported by this feature."
-msgstr ""
-"Prefix delegation menjadi tersedia dalam rilis Liberty, tidak tersedia dalam "
-"rilis Kilo. Router HA dan DVR saat ini tidak didukung oleh fitur ini."
-
-msgid "Prerequisites"
-msgstr "Prasyarat"
-
-msgid "Prerequisites for demonstration"
-msgstr "Prasyarat untuk demonstrasi"
-
-msgid ""
-"Prerequisites, typically hardware requirements, generally increase with each "
-"building block. Each building block depends on proper deployment and "
-"operation of prior building blocks. For example, the first building block "
-"(provider networks) only requires one controller and two compute nodes, the "
-"second building block (self-service networks) adds a network node, and the "
-"high-availability building blocks typically add a second network node for a "
-"total of five nodes. Each building block could also require additional "
-"infrastructure or changes to existing infrastructure such as networks."
-msgstr ""
-"Prasyarat, biasanya kebutuhan hardware, umumnya meningkat dengan setiap blok "
-"bangunan. Setiap blok bangunan tergantung pada pengerahan yang tepat dan "
-"pengoperasian blok bangunan sebelumnya. Misalnya, blok bangunan pertama "
-"(jaringan provider) hanya membutuhkan satu controller dan dua node "
-"komputasi, blok bangunan kedua (jaringan self-service) menambahkan node "
-"jaringan, dan blok bangunan ketersediaan tinggi secara khusus menambahkan "
-"node jaringan kedua untuk sebanyak lima node. Setiap blok bangunan juga bisa "
-"memerlukan infrastruktur tambahan atau perubahan infrastruktur yang ada "
-"seperti jaringan."
-
-msgid "Preventing regular users from sharing objects with each other"
-msgstr "Pencegahan pengguna biasa berbagi objek dengan pengguna lainnya"
-
-msgid ""
-"Previously, a user had to configure a range of networking resources to boot "
-"a server and get access to the Internet. For example, the following steps "
-"are required:"
-msgstr ""
-"Sebelumnya, pengguna harus mengkonfigurasi berbagai sumber daya jaringan "
-"untuk boot server dan mendapatkan akses ke Internet. Misalnya, langkah "
-"berikut ini diperlukan:"
-
-msgid ""
-"Project ``b87b2fc13e0248a4a031d38e06dc191d`` will now be able to see the "
-"network when running :command:`openstack network list` and :command:"
-"`openstack network show` and will also be able to create ports on that "
-"network. No other users (other than admins and the owner) will be able to "
-"see the network."
-msgstr ""
-"Proyek ``b87b2fc13e0248a4a031d38e06dc191d`` sekarang akan dapat melihat "
-"jaringan ketika menjalankan :command:`openstack network list` dan :command:"
-"`openstack network show` dan juga akan dapat membuat port pada jaringan itu. "
-"Tidak ada pengguna lain (selain admin dan pemilik) akan dapat melihat "
-"jaringan."
-
-msgid ""
-"Project ``be98b82f8fdf46b696e9e01cebc33fd9`` will now be able to see the QoS "
-"policy when running :command:`openstack network qos policy list` and :"
-"command:`openstack network qos policy show` and will also be able to bind it "
-"to its ports or networks. No other users (other than admins and the owner) "
-"will be able to see the QoS policy."
-msgstr ""
-"Proyek ``be98b82f8fdf46b696e9e01cebc33fd9`` sekarang akan dapat melihat "
-"kebijakan QoS ketika menjalankan :command:`openstack network qos policy "
-"list` dan :command:`openstack network qos policy show` dan juga akan dapat "
-"mengikat ke port atau jaringan. Tidak ada pengguna lain (selain admin dan "
-"pemilik) akan dapat melihat kebijakan QoS."
-
-msgid ""
-"Project network configurations are made in the ``/etc/neutron/plugins/ml2/"
-"ml2_conf.ini`` configuration file on the neutron server:"
-msgstr ""
-"Konfigurasi jaringan proyek dibuat di file konfigurasi ``/etc/neutron/"
-"plugins/ml2/ml2_conf.ini`` pada server neutron:"
-
-msgid "Project network considerations"
-msgstr "Pertimbangan jaringan proyek"
-
-msgid "Project network types"
-msgstr "Tipe jaringan proyek"
-
-msgid ""
-"Project networks provide connectivity to instances for a particular project. "
-"Regular (non-privileged) users can manage project networks within the "
-"allocation that an administrator or operator defines for them. More "
-"information about project and provider networks see :doc:`intro-os-"
-"networking` or the `OpenStack Administrator Guide `__."
-msgstr ""
-"Jaringan proyek menyediakan konektivitas untuk instance untuk proyek "
-"tertentu. Pengguna reguler (non-istimewa) dapat mengelola jaringan proyek "
-"dalam alokasi dimana administrator atau operator mendefinisikan untuk "
-"mereka. Informasi lebih lanjut tentang proyek dan jaringan provider lihat :"
-"doc:`intro-os-networking` atau `OpenStack Administrator Guide `__."
-
-msgid "Project resources created by auto-allocation"
-msgstr "Sumber daya proyek yang dibuat oleh auto-allocation"
-
-msgid "Proprietary (vendor)"
-msgstr "Proprietary (vendor)"
-
-msgid "Provider (public/external) networks using IPv4 and IPv6"
-msgstr "Jaringan (public/external) provider menggunakan IPv4 dan IPv6"
-
-msgid "Provider network (VLAN)"
-msgstr "Jaringan operator (VLAN)"
-
-msgid "Provider network 1 (VLAN)"
-msgstr "Jaringan provider 1 (VLAN)"
-
-msgid "Provider network 1:"
-msgstr "Provider network (jaringan provider) 1:"
-
-msgid "Provider network 2 (VLAN)"
-msgstr "Jaringan provider 2 (VLAN)"
-
-msgid "Provider network 2:"
-msgstr "Provider network (jaringan provider) 2:"
-
-msgid "Provider network types"
-msgstr "Tipe jaringan operator (provider)"
-
-msgid "Provider networks"
-msgstr "Jaringan provider"
-
-msgid ""
-"Provider networks generally offer simplicity, performance, and reliability "
-"at the cost of flexibility. By default only administrators can create or "
-"update provider networks because they require configuration of physical "
-"network infrastructure. It is possible to change the user who is allowed to "
-"create or update provider networks with the following parameters of ``policy."
-"json``:"
-msgstr ""
-"Jaringan provider umumnya menawarkan kesederhanaan, kinerja, dan kehandalan "
-"pada biaya fleksibilitas. Secara default hanya administrator dapat membuat "
-"atau memperbarui jaringan provider karena mereka memerlukan konfigurasi "
-"infrastruktur jaringan fisik. Hal ini dimungkinkan untuk mengubah pengguna "
-"yang diizinkan untuk membuat atau memperbarui jaringan provider dengan "
-"parameter berikut `` policy.json``:"
-
-msgid ""
-"Provider networks offer layer-2 connectivity to instances with optional "
-"support for DHCP and metadata services. These networks connect, or map, to "
-"existing layer-2 networks in the data center, typically using VLAN (802.1q) "
-"tagging to identify and separate them."
-msgstr ""
-"Jaringan provider menawarkan layer-2 connectivity untuk instance dengan "
-"dukungan opsional untuk DHCP dan layanan metadata. Jaringan ini "
-"menghubungkan, atau memetakan, untuk layer-2 network yang ada di pusat "
-"data, biasanya menggunakan VLAN (802.1q) tagging untuk mengidentifikasi dan "
-"memisahkan mereka."
-
-msgid ""
-"Provider networks provide connectivity like project networks. But only "
-"administrative (privileged) users can manage those networks because they "
-"interface with the physical network infrastructure. More information about "
-"provider networks see :doc:`intro-os-networking` or the `OpenStack "
-"Administrator Guide `__."
-msgstr ""
-"Jaringan provider menyediakan konektivitas seperti jaringan proyek. Tetapi "
-"hanya pengguna administratif (istimewa) dapat mengelola jaringan mereka "
-"karena mereka berhadapan dengan infrastruktur jaringan fisik. Informasi "
-"lebih lanjut tentang jaringan provider lihat :doc:`intro-os-networking` atau "
-"`OpenStack Administrator Guide `__."
-
-msgid ""
-"Provider: Connects virtual and physical networks at layer-2. Typically uses "
-"physical network infrastructure for switching/routing traffic to external "
-"networks such as the Internet."
-msgstr ""
-"Provider: Menghubungkan jaringan virtual dan fisik pada lapisan-2. Biasanya "
-"menggunakan infrastruktur jaringan fisik untuk lalu lintas switching/routing "
-"untuk jaringan eksternal seperti Internet."
-
-msgid "Provides API, manages database, etc."
-msgstr "Sediakan API, kelola database, dll"
-
-msgid "Provides layer 2/3 connectivity to instances"
-msgstr "Sediakan konektivitas lapisan 2/3 untuk instance"
-
-msgid "QEMU 2.1.0"
-msgstr "QEMU 2.1.0"
-
-msgid "QEMU 2.5"
-msgstr "QEMU 2.5"
-
-msgid "QLogic"
-msgstr "QLogic"
-
-msgid ""
-"QoS currently works with ml2 only (SR-IOV, Open vSwitch, and linuxbridge are "
-"drivers that are enabled for QoS in Mitaka release)."
-msgstr ""
-"QoS saat ini bekerja dengan ML2 saja (SR-IOV, Open vSwitch, dan linuxbridge "
-"adalah driver yang diaktifkan untuk QoS dalam rilis Mitaka)."
-
-msgid ""
-"QoS is an advanced service plug-in. QoS is decoupled from the rest of the "
-"OpenStack Networking code on multiple levels and it is available through the "
-"ml2 extension driver."
-msgstr ""
-"QoS adalah plug-in layanan canggih. QoS dipisahkan dari sisa kode OpenStack "
-"Networking pada beberapa tingkat dan tersedia melalui driver ekstensi ml2."
-
-msgid ""
-"QoS is defined as the ability to guarantee certain network requirements like "
-"bandwidth, latency, jitter, and reliability in order to satisfy a Service "
-"Level Agreement (SLA) between an application provider and end users."
-msgstr ""
-"QoS didefinisikan sebagai kemampuan untuk menjamin kebutuhan jaringan "
-"tertentu seperti bandwidth, latency, jitter, dan kehandalan untuk memenuhi "
-"Service Level Agreement (SLA) antara penyedia aplikasi dan pengguna akhir "
-"(end user)."
-
-msgid ""
-"QoS policies are only created by admins with the default ``policy.json``. "
-"Therefore, you should have the cloud operator set them up on behalf of the "
-"cloud projects."
-msgstr ""
-"Kebijakan QoS hanya dibuat oleh admin dengan ``policy.json`` default. Oleh "
-"karena itu, Anda harus memiliki operator cloud mengaturnya atas nama proyek "
-"cloud."
-
-msgid "Quality of Service (QoS)"
-msgstr "Quality of Service (QoS)"
-
-msgid "Quotas"
-msgstr "Quotas (kuota)"
-
-msgid ""
-"Quotas are available for limiting the number of load balancers and load "
-"balancer pools. By default, both quotas are set to 10."
-msgstr ""
-"Kuota tersedia untuk membatasi jumlah penyeimbang beban dan kolam "
-"penyeimbang beban. Secara default, kedua kuota itu ditetapkan 10."
-
-msgid "RO"
-msgstr "RO"
-
-msgid "RW(POST only)"
-msgstr "RW(POST only)"
-
-msgid "Rack"
-msgstr "Rack"
-
-msgid "Re-enable the hypervisor."
-msgstr "Aktifkan kembali hypervisor."
-
-msgid ""
-"Reboot the hypervisor (or run \"smart\" live transition tool if available)."
-msgstr ""
-"Reboot hypervisor (atau menjalankan alat transisi hidup \"smart\" jika "
-"tersedia)."
-
-msgid "Reference Implementation"
-msgstr "Reference Implementation"
-
-msgid "Reference implementations"
-msgstr "Implementasi referensi"
-
-msgid "Reference implementations and other agents"
-msgstr "implementasi referensi dan agen lainnya"
-
-msgid "References"
-msgstr "Referensi"
-
-msgid "Referencing a subnet pool during subnet creation"
-msgstr "Mengacu kolam subnet selama pembuatan subnet"
-
-msgid ""
-"Regardless of address scopes, the floating IPs can be pinged from the "
-"external network:"
-msgstr ""
-"Terlepas dari lingkup alamat, IP mengambang dapat ping dari jaringan "
-"eksternal:"
-
-msgid "Regular port creation permissions on networks (since Liberty)."
-msgstr "Izin pembuatan port regular di jaringan (sejak Liberty)."
-
-msgid "Remove a network from a specified DHCP agent."
-msgstr "Hapus jaringan dari agen DHCP ditentukan."
-
-msgid "Remove a tag from a resource:"
-msgstr "Hapus tag dari sumber daya:"
-
-msgid "Rename the default segment to ``segment1``."
-msgstr "Mengubah nama segmen default ke ``segment1``."
-
-msgid ""
-"Replace ``DNS_RESOLVER`` with the IP address of a DNS resolver reachable "
-"from all virtual networks. For example:"
-msgstr ""
-"Ganti ``DNS_RESOLVER`` dengan alamat IP dari DNS resolver yang dapat "
-"dijangkau dari semua jaringan virtual. Sebagai contoh:"
-
-msgid ""
-"Replace ``DNS_RESOLVER`` with the IP address of a DNS resolver reachable "
-"from the virtual network and ``SUBNET_ID_OR_NAME`` with the UUID or name of "
-"the subnet. For example, using the ``selfservice`` subnet:"
-msgstr ""
-"Ganti ``DNS_RESOLVER`` dengan alamat IP dari DNS resolver yang dapat dicapai "
-"dari jaringan virtual dan ``SUBNET_ID_OR_NAME`` dengan UUID atau nama "
-"subnet. Misalnya, menggunakan subnet ``selfservice``:"
-
-msgid ""
-"Replace ``DNS_RESOLVER`` with the IP address of a DNS resolver reachable "
-"from the virtual network. For example:"
-msgstr ""
-"Ganti ``DNS_RESOLVER`` dengan alamat IP dari DNS resolver yang dapat dicapai "
-"dari jaringan virtual. Sebagai contoh:"
-
-msgid ""
-"Replace ``INTERFACE_DRIVER`` with the interface driver that the layer-2 "
-"agent in your environment uses. For example, ``openvswitch`` for Open "
-"vSwitch or ``linuxbridge`` for Linux bridge."
-msgstr ""
-"Ganti ``INTERFACE_DRIVER`` dengan driver antarmuka dimana agen layer-2 di "
-"lingkungan Anda menggunakannya. Sebagai contoh, ``openvswitch`` untuk Open "
-"vSwitch atau ``linuxbridge`` for Linux bridge."
-
-msgid ""
-"Replace ``LOCAL_AS`` with an appropriate local autonomous system number. The "
-"example configuration uses AS 1234."
-msgstr ""
-"Ganti ``LOCAL_AS`` dengan nomor sistem otonomi lokal yang tepat. Konfigurasi "
-"Contoh menggunakan AS 1234."
-
-msgid ""
-"Replace ``MACVTAP_INTERFACE`` with the name of the underlying interface that "
-"handles Macvtap mechanism driver interfaces. If using a prerequisite "
-"deployment example, replace ``MACVTAP_INTERFACE`` with the name of the "
-"underlying interface that handles overlay networks. For example, ``eth1``."
-msgstr ""
-"Ganti `` MACVTAP_INTERFACE`` dengan nama interface yang mendasari yang "
-"menangani antarmuka driver mekanisme Macvtap. Jika menggunakan contoh "
-"penyebaran prasyarat, ganti `` MACVTAP_INTERFACE`` dengan nama interface "
-"yang mendasari yang menangani jaringan overlay. Misalnya, ``eth1``."
-
-msgid ""
-"Replace ``MIN_VXLAN_ID`` and ``MAX_VXLAN_ID`` with VXLAN ID minimum and "
-"maximum values suitable for your environment."
-msgstr ""
-"Gantilah ``MIN_VXLAN_ID`` dan ``MAX_VXLAN_ID`` dengan nilai minimum dan "
-"maksimum VXLAN ID yang cocok dengan lingkungan Anda."
-
-msgid ""
-"Replace ``NETWORK_ID`` with the ID of the additional self-service network."
-msgstr "Gantilah ``NETWORK_ID`` dengan ID dari jaringan self-service tambahan"
-
-msgid "Replace ``NETWORK_ID`` with the ID of the provider network."
-msgstr "Gantilah ``NETWORK_ID`` dengan ID dari jaringan provider."
-
-msgid "Replace ``NETWORK_ID`` with the ID of the self-service network."
-msgstr "Ganti ``NETWORK_ID`` dengan ID jaringan self-service."
-
-msgid ""
-"Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the "
-"interface that handles VXLAN overlays for self-service networks."
-msgstr ""
-"Gantilah ``OVERLAY_INTERFACE_IP_ADDRESS`` dengan alamat IP dari antarmuka "
-"yang menangani VXLAN overlay untuk jaringan self-service."
-
-msgid ""
-"Replace ``P1_ID``, ``P2_ID``, ``P3_ID``, ``P4_ID``, ``P5_ID``, and ``P6_ID`` "
-"with the UUIDs of the respective ports."
-msgstr ""
-"Gantikan ``P1_ID``, ``P2_ID``, ``P3_ID``, ``P4_ID``, ``P5_ID``, dan "
-"``P6_ID`` dengan UUID dari port masing-masing."
-
-msgid "Replace ``PROJECT_ID`` with the project ID."
-msgstr "Ganti ``PROJECT_ID`` dengan ID proyek."
-
-msgid ""
-"Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface "
-"that handles provider networks. For example, ``eth1``."
-msgstr ""
-"Gantilah ``PROVIDER_INTERFACE`` dengan nama interface yang mendasarinya "
-"yang menangani jaringan penyedia. Sebagai contoh,``eth1``."
-
-msgid ""
-"Replace ``REMOTE_AS`` with an appropriate remote autonomous system number. "
-"The example configuration uses AS 4321 which triggers EBGP peering."
-msgstr ""
-"Ganti `` REMOTE_AS`` dengan nomor sistem otonom jarak jauh yang sesuai. "
-"Konfigurasi contoh menggunakan AS 4321 yang memicu EBGP peering."
-
-msgid ""
-"Replace ``ROUTER_ID`` with a suitable unique 32-bit number, typically an "
-"IPv4 address on the host running the agent. For example, 192.0.2.2."
-msgstr ""
-"Ganti ``ROUTER_ID`` dengan nomor 32-bit unik dan cocok, biasanya alamat IPv4 "
-"dari host menjalankan agen. Misalnya, 192.0.2.2."
-
-msgid ""
-"Replace ``TUNNEL_INTERFACE_IP_ADDRESS`` with the IP address of the interface "
-"that handles VXLAN project networks."
-msgstr ""
-"Ganti ``TUNNEL_INTERFACE_IP_ADDRESS`` dengan alamat IP dari antarmuka yang "
-"menangani jaringan proyek VXLAN."
-
-msgid ""
-"Replace ``VNI_START`` and ``VNI_END`` with appropriate numerical values."
-msgstr ""
-"Gantilah ``VNI_START`` dan ``VNI_END`` dengan nilai-nilai numerik yang "
-"sesuai."
-
-msgid "Replace all tags on the resource:"
-msgstr "Ganti semua tag pada sumber daya:"
-
-msgid "Required"
-msgstr "Required"
-
-msgid "Required extensions"
-msgstr "Ekstensi yang diperlukan"
-
-msgid "Requirements"
-msgstr "Requirements"
-
-msgid "Resource"
-msgstr "Resource"
-
-msgid "Resource purge"
-msgstr "Pembersihan sumber daya"
-
-msgid "Resource tags"
-msgstr "Tag sumber daya"
-
-msgid "Resources"
-msgstr "Resources (sumber daya)"
-
-msgid "Restart Apache to activate the new panel:"
-msgstr "Restart Apache untuk mengaktifkan panel baru:"
-
-msgid ""
-"Restart the Network service to activate the new configuration. You are now "
-"ready to create and manage load balancers with Octavia."
-msgstr ""
-"Restart layanan jaringan untuk mengaktifkan konfigurasi baru. Anda sekarang "
-"siap untuk membuat dan mengelola penyeimbang beban dengan Octavia."
-
-msgid ""
-"Restart the Network service to activate the new configuration. You are now "
-"ready to create load balancers with the LBaaS v2 agent."
-msgstr ""
-"Restart layanan jaringan untuk mengaktifkan konfigurasi baru. Anda sekarang "
-"siap untuk membuat penyeimbang beban dengan agen v2 LBaaS."
-
-msgid ""
-"Restart the ``neutron-l3-agent`` and ``neutron-server`` services to apply "
-"the settings."
-msgstr ""
-"Restart layanan ``neutron-l3-agent`` dan ``neutron-server`` untuk menerapkan "
-"pengaturan."
-
-msgid "Restart the ``neutron-server`` service."
-msgstr "Restart layanan ``neutron-server``."
-
-msgid "Restart the ``nova-compute`` service for the changes to go into effect."
-msgstr "Restart layanan ``nova-compute`` agar perubahan berlaku."
-
-msgid "Restart the ``nova-scheduler`` service."
-msgstr "Restart layanan ``nova-scheduler``."
-
-msgid "Restart the following services:"
-msgstr "Restart layanan berikut:"
-
-msgid "Result"
-msgstr "Result"
-
-msgid "Retrieving load balancer statistics"
-msgstr "Pengambilan statistik penyeimbang beban"
-
-msgid "Return traffic follows similar steps in reverse."
-msgstr "Lalu lintas balik mengikuti langkah yang sama secara terbalik."
-
-msgid ""
-"Return traffic follows similar steps in reverse. However, without a floating "
-"IPv4 address, hosts on the provider or external networks cannot originate "
-"connections to instances on the self-service network."
-msgstr ""
-"Lalu lintas balik mengikuti langkah yang sama secara terbalik. Namun, tanpa "
-"alamat IPv4 mengambang, host pada provider atau eksternal jaringan tidak "
-"dapat berasal koneksi untuk instance pada jaringan self-service."
-
-msgid "Role-Based Access Control (RBAC)"
-msgstr "Role-Based Access Control (RBAC)"
-
-msgid "Routed provider networks"
-msgstr "jaringan penyedia (provider) yang diarahkan"
-
-msgid ""
-"Routed provider networks imply that compute nodes reside on different "
-"segments. The operator must ensure that every compute host that is supposed "
-"to participate in a router provider network has direct connectivity to one "
-"of its segments."
-msgstr ""
-"Jaringan provider Routed menyiratkan bahwa komputasi node berada pada segmen "
-"yang berbeda. Operator harus memastikan bahwa setiap komputasi host yang "
-"seharusnya berpartisipasi dalam jaringan provider router telah memiliki "
-"konektivitas langsung ke salah satu segmen tersebut."
-
-msgid ""
-"Routed provider networks offer layer-3 connectivity to instances. These "
-"networks map to existing layer-3 networks in the data center. More "
-"specifically, the network maps to multiple layer-2 segments, each of which "
-"is essentially a provider network. Each has a router gateway attached to it "
-"which routes traffic between them and externally. The Networking service "
-"does not provide the routing."
-msgstr ""
-"Jaringan provider yang di-rute menawarkan lapisan-3 konektivitas untuk "
-"instance. Jaringan ini memetakan jaringan lapisan-3 yang ada di pusat data. "
-"Lebih khusus, jaringan memetakan ke beberapa segmen lapisan-2, masing-masing "
-"yang pada dasarnya adalah jaringan provider. Masing-masing jaringan memiliki "
-"router gateway yang terhubung ke yang me-rute lalu lintas antara jaringan "
-"mereka dan secara eksternal. Layanan Networking tidak menyediakan routing."
-
-msgid ""
-"Routed provider networks offer performance at scale that is difficult to "
-"achieve with a plain provider network at the expense of guaranteed layer-2 "
-"connectivity."
-msgstr ""
-"Jaringan provider yang di-rute menawarkan kinerja di skala yang sulit "
-"dicapai dengan jaringan provider polos (plain) pada tingkat ongkos "
-"konektivitas lapisan-2 yang terjamin."
-
-msgid ""
-"Routed provider networks require additional prerequisites over conventional "
-"provider networks. We recommend using the following procedure:"
-msgstr ""
-"Jaringan penyedia dialihkan (routed) memerlukan prasyarat tambahan melalui "
-"jaringan penyedia konvensional. Sebaiknya, kita gunakan prosedur berikut:"
-
-msgid "Router 1 contains IP addresses 203.0.113.11 and 192.0.2.1"
-msgstr "Router 1 berisi IP addresses 203.0.113.11 dan 192.0.2.1"
-
-msgid "Router 2 contains IP addresses 203.0.113.12 and 192.0.2.129"
-msgstr "Router 2 berisi IP addresses 203.0.113.12 dan 192.0.2.129"
-
-msgid "Router 3 contains IP addresses 203.0.113.13 and 198.51.100.1"
-msgstr "Router 3 berisi IP addresses 203.0.113.13 dan 198.51.100.1"
-
-msgid "Router advertisements"
-msgstr "Penyiaran update dan perubahan router"
-
-msgid "Router interfaces"
-msgstr "Antarmuka router"
-
-msgid "Router scheduler"
-msgstr "Alat penjadwal router"
-
-msgid "Router support"
-msgstr "Dukungan router"
-
-# #-#-#-#-# intro_networking_components.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# intro_os_networking_overview.pot (Networking Guide 0.9)
-# #-#-#-#-#
-# #-#-#-#-# scenario_legacy_lb.pot (Networking Guide 0.9) #-#-#-#-#
-# #-#-#-#-# scenario_legacy_ovs.pot (Networking Guide 0.9) #-#-#-#-#
-msgid "Routers"
-msgstr "Routers"
-
-msgid ""
-"Routers are special devices that enable packets to travel from one layer-3 "
-"network to another. Routers enable communication between two nodes on "
-"different layer-3 networks that are not directly connected to each other. "
-"Routers operate at layer-3 in the networking model. They route the traffic "
-"based on the destination IP address in the packet header."
-msgstr ""
-"Router adalah perangkat khusus yang mengaktifkan paket untuk perjalanan dari "
-"satu jaringan lapisan-3 ke yang lain. Router mengaktifkan komunikasi antara "
-"dua node pada berbagai jaringan lapisan-3 yang tidak langsung terhubung satu "
-"sama lain. Router beroperasi pada lapisan-3 dalam model jaringan. Mereka me-"
-"rute lalu lintas berdasarkan alamat IP tujuan dalam header paket."
-
-msgid ""
-"Routers provide virtual layer-3 services such as routing and NAT between "
-"self-service and provider networks or among self-service networks belonging "
-"to a project. The Networking service uses a layer-3 agent to manage routers "
-"via namespaces."
-msgstr ""
-"Router menyediakan layanan lapisan-3 virtual seperti routing dan NAT antara "
-"self-service dan jaringan provider atau antar jaringan self-service milik "
-"proyek. Layanan Networking menggunakan agen lapisan-3 untuk mengelola router "
-"melalui namespace."
-
-msgid ""
-"Routing between self-service networks occurs on the compute node containing "
-"the instance sending the packet. In this scenario, routing occurs on compute "
-"node 1 for packets from instance 1 to instance 2 and on compute node 2 for "
-"packets from instance 2 to instance 1."
-msgstr ""
-"Routing antara jaringan self-service terjadi pada node komputasi yang berisi "
-"instance yang sedang mengirim paket. Dalam skenario ini, routing terjadi "
-"pada komputasi node 1 untuk paket dari instance 1 ke instance 2 dan pada "
-"komputasi node 2 untuk paket dari instance 2 ke instance 1."
-
-msgid "Routing directly to a project network from an external network."
-msgstr "Routing langsung ke jaringan proyek dari jaringan eksternal."
-
-msgid "Routing services"
-msgstr "Layanan routing"
-
-msgid "Routing with address scopes for non-privileged users"
-msgstr "Routing dengan lingkup alamat untuk pengguna non-istimewa"
-
-msgid "Rule modification"
-msgstr "Modifikasi aturan"
-
-msgid "Run InfiniBand subnet managers to enable InfiniBand fabric."
-msgstr ""
-"Jalankan manajer subnet InfiniBand untuk mengaktifkan InfiniBand fabric."
-
-msgid "Run ``udhcpc`` in the VM; it cannot get the wanted IP."
-msgstr ""
-"Jalankan ``udhcpc`` di VM; ini tidak bisa mendapatkan IP yang diinginkan."
-
-msgid ""
-"Run a DB dump/restore tool that creates Networking data structures "
-"representing current legacy networking config."
-msgstr ""
-"Jalankan alat dump/restore DB yang menciptakan struktur data Networking yang "
-"mewakili konfigurasi jaringan legacy saat ini."
-
-msgid "Run a DHCP client in VM to see if it can get the wanted IP."
-msgstr ""
-"Jalankan klien DHCP di VM untuk melihat apakah ia bisa mendapatkan IP yang "
-"diinginkan."
-
-msgid "Run the ``neutron-lbaas`` database migration:"
-msgstr "Jalankan migrasi database ``neutron-lbaas``:"
-
-msgid "Runs ``nova-compute``, the Neutron L2 agent and DHCP agent"
-msgstr "Jakankan ``nova-compute``, agen Neutron L2 dan agen DHCP"
-
-msgid ""
-"Runs the Networking, Identity, and Compute services that are required to "
-"deploy VMs. The node must have at least one network interface that is "
-"connected to the Management Network. Note that ``nova-network`` should not "
-"be running because it is replaced by Neutron."
-msgstr ""
-"Jalankan layanan Networking, Identity, dan Compute yang diperlukan untuk "
-"pengerahan VM. Node harus memiliki setidaknya satu antarmuka jaringan yang "
-"terhubung ke Management Network. Perhatikan bahwa `` nova-network`` tidak "
-"harus berjalan karena digantikan oleh Neutron."
-
-msgid ""
-"SFC performs load balancing/distribution over the additional service "
-"functions in the port pair group."
-msgstr ""
-"SFC melakukan load balancing/distribution terhadap fungsi layanan tambahan "
-"pada kelompok pasangan port."
-
-msgid ""
-"SFC steers traffic matching the additional flow classifier to the port pair "
-"groups in the port chain."
-msgstr ""
-"SFC mengarahkan lalu lintas yang cocok dengan klassifier aliran tambahan "
-"untuk kelompok pasangan port dalam rantai port."
-
-msgid "SLAAC"
-msgstr "SLAAC"
-
-msgid "SNAT"
-msgstr "SNAT"
-
-msgid ""
-"SNAT high availability is implemented in a manner similar to the :ref:"
-"`deploy-lb-ha-vrrp` and :ref:`deploy-ovs-ha-vrrp` examples where "
-"``keepalived`` uses VRRP to provide quick failover of SNAT services."
-msgstr ""
-"SNAT ketersediaan tinggi diimplementasikan dengan cara yang sama dengan "
-"contoh :ref:`deploy-lb-ha-vrrp` dan :ref:`deploy-ovs-ha-vrrp` dimana "
-"``keepalived`` menggunakan VRRP untuk memberikan failover cepat layanan SNAT."
-
-msgid ""
-"SNAT solves this problem by modifying the source IP address to an IP address "
-"that is routable on the public Internet. There are different variations of "
-"SNAT; in the form that OpenStack deployments use, a NAT router on the path "
-"between the sender and receiver replaces the packet's source IP address with "
-"the router's public IP address. The router also modifies the source TCP or "
-"UDP port to another value, and the router maintains a record of the sender's "
-"true IP address and port, as well as the modified IP address and port."
-msgstr ""
-"SNAT memecahkan masalah ini dengan memodifikasi alamat IP sumber ke alamat "
-"IP yang routable di Internet publik. Ada variasi yang berbeda dari SNAT; "
-"dalam bentuk dimana pengerahan OpenStack menggunakan, router NAT di path "
-"antara pengirim dan penerima menggantikan alamat IP sumber paket dengan "
-"alamat IP publik router. Router juga memodifikasi TCP sumber atau port UDP "
-"ke nilai lain, dan router memelihara catatan alamat IP yang benar dari "
-"pengirim dan port, serta alamat IP yang dimodifikasi dan port."
-
-msgid "SR-IOV"
-msgstr "SR-IOV"
-
-msgid "SR-IOV agent"
-msgstr "Agen SR-IOV"
-
-msgid ""
-"SR-IOV features may require a specific NIC driver version, depending on the "
-"vendor. Intel NICs, for example, require ixgbe version 4.4.6 or greater, and "
-"ixgbevf version 3.2.2 or greater."
-msgstr ""
-"Fitur-IOV SR mungkin memerlukan versi driver NIC tertentu, tergantung pada "
-"vendor. Intel NIC, misalnya, memerlukan ixgbe versi 4.4.6 atau lebih besar, "
-"dan ixgbevf versi 3.2.2 atau lebih besar."
-
-msgid ""
-"SR-IOV is not integrated into the OpenStack Dashboard (horizon). Users must "
-"use the CLI or API to configure SR-IOV interfaces."
-msgstr ""
-"SR-IOV tidak terintegrasi ke dalam OpenStack Dashboard (horizon). Pengguna "
-"harus menggunakan CLI atau API untuk mengkonfigurasi antarmuka SR-IOV."
-
-msgid "SR-IOV with InfiniBand"
-msgstr "SR-IOV dengan InfiniBand"
-
-msgid "SRIOV"
-msgstr "SRIOV"
-
-msgid "SRIOV & SRIOV nic switch agent"
-msgstr "Agen SRIOV & SRIOV nic switch"
-
-msgid "SRIOV Nic Switch agent"
-msgstr "Agen SRIOV Nic Switch"
-
-msgid "SRIOV mechanism driver and SRIOV NIC switch agent"
-msgstr "Driver mekanisme SRIOV dan agen switch SRIOV NIC"
-
-msgid "SRIOV nic switch agent"
-msgstr "Agen SRIOV nic switch"
-
-msgid "Same as HostA"
-msgstr "Sama dengan HostA"
-
-msgid "Schedule BGP speaker to multiple agents."
-msgstr "Jadwalkan BGP speaker ke beberapa agen."
-
-msgid "Schedule the BGP speaker to an agent"
-msgstr "Jadwalkan speaker BGP ke agen"
-
-msgid ""
-"Second, associate the created policy with an existing neutron port. In order "
-"to do this, user extracts the port id to be associated to the already "
-"created policy. In the next example, we will assign the ``bw-limiter`` "
-"policy to the VM with IP address ``192.0.2.1``."
-msgstr ""
-"Kedua, kaitkan kebijakan yang dibuat dengan port neutron yang ada. Untuk "
-"melakukan ini, pengguna mengekstrak id port yang akan dikaitkan dengan "
-"kebijakan yang sudah dibuat. Pada contoh berikut, kita akan menetapkan "
-"kebijakan ``bb-limiter`` ke VM dengan alamat IP ``192.0.2.1``."
-
-msgid ""
-"Second, subnet pools can manage addresses across projects. The addresses are "
-"guaranteed not to overlap. If the addresses come from an externally routable "
-"pool then you know that all of the projects have addresses which are "
-"*routable* and unique. This can be useful in the following scenarios."
-msgstr ""
-"Kedua, kolam subnet dapat mengelola alamat di seluruh proyek. Alamat dijamin "
-"tidak tumpang tindih. Jika alamat berasal dari kolam eksternal routable maka "
-"Anda tahu bahwa semua proyek memiliki alamat yang *routable * dan unik. Hal "
-"ini dapat berguna dalam skenario berikut."
-
-msgid "Second, the router floating IP agent gateway external port:"
-msgstr "Kedua, port eksternal gerbang agen IP mengambang router :"
-
-msgid "Security"
-msgstr "Keamanan"
-
-msgid "Security Groups"
-msgstr "Grup keamanan"
-
-msgid "Security considerations"
-msgstr "Pertimbangan keamanan"
-
-msgid ""
-"Security group rules (10) on the provider bridge handle firewalling and "
-"connection tracking for the packet."
-msgstr ""
-"Aturan kelompok keamanan (10) pada provider bridge menangani firewall dan "
-"pelacakan koneksi untuk paket."
-
-msgid ""
-"Security group rules (13) on the self-service bridge handle firewalling and "
-"connection tracking for the packet."
-msgstr ""
-"Aturan kelompok keamanan (13) pada self-service bridge menangani firewall "
-"dan pelacakan koneksi untuk paket."
-
-msgid ""
-"Security group rules (14) on the provider bridge handle firewalling and "
-"connection tracking for the packet."
-msgstr ""
-"Aturan kelompok keamanan (14) dari provider bridge menangani firewall dan "
-"pelacakan koneksi untuk paket."
-
-msgid ""
-"Security group rules (14) on the security group bridge handle firewalling "
-"and connection tracking for the packet."
-msgstr ""
-"Security group rules (14) pada security group bridge menangani firewall dan "
-"pelacakan koneksi paket."
-
-msgid ""
-"Security group rules (17) on the security group bridge handle firewalling "
-"and connection tracking for the packet."
-msgstr ""
-"Security group rules (17) pada security group bridge menangani firewall dan "
-"pelacakan koneksi paket."
-
-msgid ""
-"Security group rules (18) on the security group bridge handle firewalling "
-"and connection tracking for the packet."
-msgstr ""
-"Security group rules (18) pada security group bridge menangani firewall dan "
-"koneksi pelacakan paket."
-
-msgid ""
-"Security group rules (18) on the self-service bridge handle firewalling and "
-"connection tracking for the packet."
-msgstr ""
-"Aturan kelompok keamanan (18) pada self-service bridge menangani firewall "
-"dan pelacakan koneksi untuk paket."
-
-msgid ""
-"Security group rules (21) on the security group bridge handle firewalling "
-"and connection tracking for the packet."
-msgstr ""
-"Security group rules (21) dari security group bridge menangani firewall dan "
-"pelacakan koneksi paket."
-
-msgid ""
-"Security group rules (22) on the security group bridge handle firewalling "
-"and connection tracking for the packet."
-msgstr ""
-"Security group rules (22) pada security group bridge menangani firewall dan "
-"pelacakan koneksi paket."
-
-msgid ""
-"Security group rules (3) on the provider bridge handle firewalling and "
-"connection tracking for the packet."
-msgstr ""
-"Aturan kelompok keamanan (3) dari jembatan provider menangani firewall dan "
-"pelacakan koneksi untuk paket."
-
-msgid ""
-"Security group rules (3) on the security group bridge handle firewalling and "
-"connection tracking for the packet."
-msgstr ""
-"Security group rules (3) pada security group bridge menangani firewall dan "
-"pelacakan koneksi paket."
-
-msgid ""
-"Security group rules (3) on the self-service bridge handle firewalling and "
-"connection tracking for the packet."
-msgstr ""
-"Aturan kelompok keamanan (3) pada self-service bridge menangani firewall dan "
-"pelacakan koneksi untuk paket."
-
-msgid ""
-"Security group rules (30) on the security group bridge handle firewalling "
-"and connection tracking for the packet."
-msgstr ""
-"Security group rules (30) pada security group bridge menangani firewall dan "
-"pelacakan koneksi paket."
-
-msgid ""
-"Security group rules (9) on the self-service bridge handle firewalling and "
-"connection tracking for the packet."
-msgstr ""
-"Aturan kelompok keamanan (9) pada self-service bridge menangani firewall dan "
-"pelacakan koneksi untuk paket."
-
-msgid ""
-"Security group rules are stateful. Thus, allowing ingress TCP port 22 for "
-"secure shell automatically creates rules that allow return egress traffic "
-"and ICMP error messages involving those TCP connections."
-msgstr ""
-"Aturan kelompok keamanan menjadi stateful (tegas). Dengan demikian, "
-"mengizinkan ingress TCP port 22 untuk secure shell secara otomatis membuat "
-"aturan yang mengizinkan lalu lintas egress kembali dan pesan kesalahan ICMP "
-"yang melibatkan koneksi TCP."
-
-msgid "Security groups"
-msgstr "Kelompok keamanan"
-
-msgid ""
-"Security groups are not supported when using SR-IOV, thus, the firewall "
-"driver must be disabled. This can be done in the ``neutron.conf`` file."
-msgstr ""
-"Kelompok keamanan tidak didukung saat penggunaa SR-IOV, dengan demikian, "
-"driver firewall harus dinonaktifkan. Hal ini dapat dilakukan dalam file "
-"``neutron.conf``."
-
-msgid ""
-"Security groups provide a container for virtual firewall rules that control "
-"ingress (inbound to instances) and egress (outbound from instances) network "
-"traffic at the port level. Security groups use a default deny policy and "
-"only contain rules that allow specific traffic. Each port can reference one "
-"or more security groups in an additive fashion. The firewall driver "
-"translates security group rules to a configuration for the underlying packet "
-"filtering technology such as ``iptables``."
-msgstr ""
-"Kelompok keamanan menyediakan kontainer untuk aturan virtual firewall yang "
-"mengontrol ingress (inbound ke instance) dan egress (outbound dari instance) "
-"lalu lintas jaringan di tingkat port. Kelompok keamanan menggunakan default "
-"menyangkal kebijakan dan hanya berisi aturan yang mengizinkan lalu lintas "
-"tertentu. Setiap port dapat referensi satu atau lebih kelompok keamanan "
-"dalam mode aditif. Driver firewall menerjemahkan aturan kelompok keamanan "
-"untuk konfigurasi teknologi penyaringan paket yang mendasarinya seperti "
-"``iptables``."
-
-msgid ""
-"See :ref:`config-dns-int-ext-serv` for detailed instructions on how to "
-"create the externally accessible network."
-msgstr ""
-"Lihat :ref:`config-dns-int-ext-serv` untuk petunjuk rinci tentang cara "
-"membuat jaringan yang dapat diakses secara eksternal."
-
-msgid ""
-"See :ref:`config-dns-performance-considerations` for an explanation of the "
-"potential performance impact associated with this use case."
-msgstr ""
-"Lihat :ref:`config-dns-performance-considerations` untuk penjelasan potensi "
-"dampak kinerja yang terkait dengan kasus ini digunakan."
-
-msgid "See :ref:`config-routed-provider-networks` for more information."
-msgstr ""
-"Lihat :ref:`config-routed-provider-networks` untuk informasi lebih lanjut."
-
-msgid "See :ref:`config-subnet-pools` for more information."
-msgstr "Lihat :ref:`config-subnet-pools` untuk informasi lebih lanjut."
-
-msgid ""
-"See `bugs `__ for "
-"more information."
-msgstr ""
-"Lihat `bugs `__ "
-"untuk informasi lebih lanjut."
-
-msgid ""
-"See the `Installation Tutorials and Guides `_ "
-"and `Configuration Reference `_ for your "
-"OpenStack release to obtain the appropriate additional configuration for the "
-"``[DEFAULT]``, ``[database]``, ``[keystone_authtoken]``, ``[nova]``, and "
-"``[agent]`` sections."
-msgstr ""
-"Lihat `Installation Tutorials and Guides `_ dan "
-"`Configuration Reference `_ untuk rilis "
-"OpenStack Anda untuk mendapatkan konfigurasi tambahan yang sesuai untuk "
-"bagian ``[DEFAULT]``, ``[database]``, ``[keystone_authtoken]``, ``[nova]``, "
-"and ``[agent]``."
-
-msgid ""
-"See the `developer documentation `_ for more information."
-msgstr ""
-"Lihat `developer documentation `_ untuk informasi lebih lanjut."
-
-msgid "Segment"
-msgstr "Segment (segmen)"
-
-msgid "Segmentation ID"
-msgstr "Segmentation ID (ID segmentasi)"
-
-msgid "Segmentation type"
-msgstr "Segmentation type (tipe segmentasi)"
-
-msgid ""
-"Select the driver that manages virtual interfaces in ``/etc/neutron/"
-"lbaas_agent.ini``:"
-msgstr ""
-"Pilih driver yang mengelola antarmuka virtual dalam ``/etc/neutron/"
-"lbaas_agent.ini``:"
-
-msgid ""
-"Self-service (project/private/internal) networks including routers using "
-"IPv4 and IPv6"
-msgstr ""
-"Jaringan (proyek/private/internal) self-service meliputi routers menggunakan "
-"IPv4 dan IPv6"
-
-msgid "Self-service network 1 (VXLAN)"
-msgstr "Jaringan self-service 1 (VXLAN)"
-
-msgid "Self-service network 2 (VXLAN)"
-msgstr "Jaringan self-service 2 (VXLAN)"
-
-msgid ""
-"Self-service network 3 uses a unique IP address range 198.51.100.0/24 to "
-"demonstrate that the BGP speaker does not advertise prefixes outside of "
-"address scopes."
-msgstr ""
-"Self-service network 3 menggunakan range alamat IP yang unik 198.51.100.0/24 "
-"untuk menunjukkan bahwa BGP speaker tidak mengiklankan prefiks di luar "
-"lingkup alamat."
-
-msgid "Self-service networks"
-msgstr "Jaringan self-service"
-
-msgid ""
-"Self-service networks 1 and 2 use IP address ranges inside of the address "
-"scope."
-msgstr ""
-"Jaringan self-service 1 dan 2 menggunakan berkisar alamat IP dalam lingkup "
-"alamat."
-
-msgid ""
-"Self-service networks primarily enable general (non-privileged) projects to "
-"manage networks without involving administrators. These networks are "
-"entirely virtual and require virtual routers to interact with provider and "
-"external networks such as the Internet. Self-service networks also usually "
-"provide DHCP and metadata services to instances."
-msgstr ""
-"Jaringan self-service terutama mengaktifkan secara umum proyek (non-"
-"privileged) untuk mengelola jaringan tanpa melibatkan administrator. "
-"Jaringan ini sepenuhnya virtual dan membutuhkan router virtual untuk "
-"berinteraksi dengan provider dan jaringan eksternal seperti Internet. "
-"Jaringan self-service juga biasanya menyediakan DHCP dan layanan metadata "
-"untuk instance."
-
-msgid "Self-service networks:"
-msgstr "Self-service networks (jaringan swalayan):"
-
-msgid "Self-service router"
-msgstr "Self-service router"
-
-msgid ""
-"Separate firewall rule IDs or names with spaces. The order in which you "
-"specify the rules is important."
-msgstr ""
-"Pisahkan ID aturan firewall atau nama dengan spasi. Urutan menjadi penting, "
-"dimana Anda menentukan aturan urutan itu."
-
-msgid "Server"
-msgstr "Server"
-
-msgid "Service and component hierarchy"
-msgstr "Layanan dan hirarki komponen"
-
-msgid "Service function chaining"
-msgstr "Service function chaining (SFC)"
-
-msgid "Service subnets"
-msgstr "Subnet layanan"
-
-msgid ""
-"Service subnets enable operators to define valid port types for each subnet "
-"on a network without limiting networks to one subnet or manually creating "
-"ports with a specific subnet ID. Using this feature, operators can ensure "
-"that ports for instances and router interfaces, for example, always use "
-"different subnets."
-msgstr ""
-"Subnet layanan mengaktifkan operator untuk menentukan tipe port yang valid "
-"untuk setiap subnet pada jaringan tanpa membatasi jaringan ke satu subnet "
-"atau secara manual membuat port dengan ID subnet tertentu. Menggunakan fitur "
-"ini, operator dapat memastikan bahwa port untuk instance dan antarmuka "
-"router, misalnya, selalu menggunakan subnet yang berbeda."
-
-msgid "Services"
-msgstr "Layanan-layanan"
-
-msgid ""
-"Set ``AZAwareWeightScheduler`` to ``network_scheduler_driver`` in ``/etc/"
-"neutron/neutron.conf`` so that the Networking service schedules a network "
-"according to the availability zone:"
-msgstr ""
-"Atur ``AZAwareWeightScheduler`` ke ``network_scheduler_driver`` dalam ``/etc/"
-"neutron/neutron.conf`` sehingga layanan Networking menjadwal jaringan sesuai "
-"dengan zona ketersediaan:"
-
-msgid ""
-"Set ``AZLeastRoutersScheduler`` to ``router_scheduler_driver`` in file ``/"
-"etc/neutron/neutron.conf`` so that the Networking service schedules a router "
-"according to the availability zone:"
-msgstr ""
-"Atur ``AZLeastRoutersScheduler`` ke ``router_scheduler_driver`` dalam file "
-"``/etc/neutron/neutron.conf`` sehingga layanan Networking menjadwal router "
-"sesuai dengan zona ketersediaan:"
-
-msgid ""
-"Set the \"has_transitioned\" flag in the Compute hypervisor database/config."
-msgstr "Atur flag \"has_transitioned\" di Compute hypervisor database/config."
-
-msgid "Set the ``ha`` attribute of the router to ``True``."
-msgstr "Atur attribut router ``ha`` ke ``True``."
-
-msgid ""
-"Set the admin_state_up to ``False``. This will severe south-north "
-"connections until admin_state_up is set to ``True`` again."
-msgstr ""
-"Atur admin_state_up ke ``False``. Ini akan ada koneksi selatan-utara yang "
-"parah sampai admin_state_up diatur ke ``True`` kembali."
-
-msgid ""
-"Set the admin_state_up to ``True``. After this, south-north connections can "
-"start."
-msgstr ""
-"Atur admin_state_up ke ``True``. Setelah ini, koneksi selatan-utara dapat "
-"mulai."
-
-msgid ""
-"Set the external gateway for the router, which will create an interface and "
-"allocate an IP address on demo-ext-net:"
-msgstr ""
-"Atur gateway eksternal untuk router, yang akan membuat sebuah antarmuka dan "
-"mengalokasikan alamat IP pada demo-ext-net:"
-
-msgid ""
-"Set the following configuration options in file ``/etc/neutron/neutron."
-"conf`` so that you get DHCP high availability."
-msgstr ""
-"Atur opsi konfigurasi berikut dalam file ``/etc/neutron/neutron.conf`` "
-"sehingga Anda mendapatkan DHCP ketersediaan tinggi."
-
-msgid ""
-"Set the following configuration options in file ``/etc/neutron/neutron."
-"conf`` so that you get L3 high availability."
-msgstr ""
-"Atur opsi konfigurasi berikut dalam file ``/etc/neutron/neutron.conf`` "
-"sehingga Anda mendapatkan L3 ketersediaan tinggi."
-
-msgid "Set up a default external network"
-msgstr "Mengatur jaringan eksternal standar"
-
-msgid ""
-"Setting ``ipv6_ra_mode`` to ``slaac`` will result in OpenStack Networking "
-"routers being configured to send RA packets, when they are created. This "
-"results in the following values set for the address configuration flags in "
-"the RA messages:"
-msgstr ""
-"Pengaturan ``ipv6_ra_mode`` untuk ``slaac`` akan menghasilkan router "
-"OpenStack Networking yang dikonfigurasi untuk mengirim paket RA, ketika "
-"mereka diciptakan. Hal ini menyebabkan nilai berikut ditetapkan untuk flag "
-"konfigurasi alamat dalam pesan RA:"
-
-msgid "Setting quotas for LBaaS v2"
-msgstr "Penetapan kuota untuk LBaaS v2"
-
-msgid ""
-"Setting up an external network is described in `OpenStack Administrator "
-"Guide `_. Assuming the external network to be used for the auto-allocation "
-"feature is named ``public``, make it the ``default`` external network with "
-"the following command:"
-msgstr ""
-"Menyiapkan jaringan eksternal dijelaskan di `OpenStack Administrator Guide "
-"`_. "
-"Dengan asumsi jaringan eksternal yang akan digunakan untuk fitur alokasi "
-"otomatis dinamakan ``public``, jadikan jaringan eksternal ``default`` dengan "
-"perintah berikut:"
-
-msgid "Sharing a QoS policy with specific projects"
-msgstr "Berbagi kebijakan QoS dengan proyek tertentu"
-
-msgid "Sharing a network with specific projects"
-msgstr "Berbagi jaringan dengan proyek tertentu"
-
-msgid ""
-"Sharing an object with a specific project is accomplished by creating a "
-"policy entry that permits the target project the ``access_as_shared`` action "
-"on that object."
-msgstr ""
-"Berbagi sebuah objek dengan proyek tertentu dilakukan dengan pembuatan entri "
-"kebijakan yang mengizinkan proyek target aksi `` access_as_shared`` pada "
-"objek itu."
-
-msgid "Sharing an object with specific projects"
-msgstr "Berbagi obyek dengan proyek tertentu"
-
-msgid "Show agent details."
-msgstr "Tampilkan detail agen."
-
-msgid "Show available dynamic routing agents."
-msgstr "Tampilkan agen routing dinamis yang tersedia"
-
-msgid "Show trunk details to get the ``port_id`` of the trunk."
-msgstr "Tampilkan detail trunk untuk mendapatkan ``port_id`` dari trunk."
-
-msgid ""
-"Similar to :ref:`deploy-ovs-selfservice-networktrafficflow-ns1`, except the "
-"router namespace on the network node becomes the SNAT namespace. The network "
-"node still contains the router namespace, but it serves no purpose in this "
-"case."
-msgstr ""
-"Mirip dengan :ref:`deploy-ovs-selfservice-networktrafficflow-ns1`, kecuali "
-"namespace router pada node jaringan menjadi namespace SNAT. Node jaringan "
-"masih mengandung namespace router, tetapi node jaringan ini melayani tanpa "
-"tujuan dalam hal ini."
-
-msgid ""
-"Similar to conventional networking, layer-2 (switching) handles transit of "
-"traffic between ports on the same segment and layer-3 (routing) handles "
-"transit of traffic between segments."
-msgstr ""
-"Mirip dengan jaringan konvensional, lapisan-2 (switching) menangani transit "
-"lalu lintas antara port pada segmen yang sama dan lapisan-3 (routing) "
-"menangani transit lalu lintas antara segmen."
-
-msgid ""
-"Similar to legacy HA routers, DVR/SNAT HA routers provide a quick fail over "
-"of the SNAT service to a backup DVR/SNAT router on an l3-agent running on a "
-"different node."
-msgstr ""
-"Mirip dengan legacy HA router, DVR/SNAT HA router menyediakan penanganan "
-"kegagalan (failover) cepat layanan SNAT ke router DVR/SNAT backup pada l3-"
-"agent yang berjalan pada node yang berbeda."
-
-msgid ""
-"Similar to the classic scenario, all network traffic on a project network "
-"that requires routing actively traverses only one network node regardless of "
-"the quantity of network nodes providing HA for the router. Therefore, this "
-"high-availability implementation primarily addresses failure situations "
-"instead of bandwidth constraints that limit performance. However, it "
-"supports random distribution of routers on different network nodes to reduce "
-"the chances of bandwidth constraints and to improve scaling."
-msgstr ""
-"Serupa dengan skenario klasik, semua lalu lintas jaringan pada jaringan "
-"proyek yang memerlukan routing yang aktif hanya melintasi satu node jaringan "
-"terlepas dari kuantitas node jaringan menyediakan HA untuk router. Oleh "
-"karena itu, pelaksanaan ketersediaan tinggi ini terutama membahas situasi "
-"kegagalan bukan kendala bandwidth yang membatasi kinerja. Namun, hal itu "
-"mendukung distribusi router acak pada node jaringan yang berbeda untuk "
-"mengurangi kemungkinan kendala bandwidth dan untuk meningkatkan skala."
-
-msgid ""
-"Similar to the self-service deployment example, this configuration supports "
-"multiple VXLAN self-service networks. After enabling high-availability, all "
-"additional routers use VRRP. The following procedure creates an additional "
-"self-service network and router. The Networking service also supports adding "
-"high-availability to existing routers. However, the procedure requires "
-"administratively disabling and enabling each router which temporarily "
-"interrupts network connectivity for self-service networks with interfaces on "
-"that router."
-msgstr ""
-"Serupa dengan contoh pengerahan self-service, konfigurasi ini mendukung "
-"beberapa VXLAN jaringan self-service. Setelah mengaktifkan high-"
-"availability, semua router tambahan menggunakan VRRP. Prosedur berikut "
-"membuat jaringan self-service tambahan dan router. Layanan Networking juga "
-"mendukung penambahan high-availability untuk router yang ada. Namun, "
-"prosedur ini secara administratif membutuhkan penonaktifkan dan pengaktifkan "
-"setiap router yang sementara mengganggu konektivitas jaringan untuk jaringan "
-"self-service dengan interface pada router itu."
-
-msgid ""
-"Similar to the self-service deployment example, this configuration supports "
-"multiple VXLAN self-service networks. After enabling high-availability, all "
-"additional routers use distributed routing. The following procedure creates "
-"an additional self-service network and router. The Networking service also "
-"supports adding distributed routing to existing routers."
-msgstr ""
-"Serupa dengan contoh pengerahan self-service, konfigurasi ini mendukung "
-"beberapa jaringan self-service VXLAN. Setelah mengaktifkan ketersediaan "
-"tinggi, semua router tambahan menggunakan routing terdistribusikan. Prosedur "
-"berikut membuat jaringan self-service tambahan dan router. Layanan "
-"Networking juga mendukung penambahan routing terdistribusikan untuk router "
-"yang ada."
-
-msgid ""
-"Similarly, when the ``l3_ha = True`` flag is configured, routers created by "
-"all users default to HA."
-msgstr ""
-"Demikian pula, ketika flag ``l3_ha = True`` dikonfigurasi, router yang "
-"dibuat oleh semua pengguna menjadi default untuk HA."
-
-msgid ""
-"Since Liberty, Networking maintains two parallel Alembic migration branches."
-msgstr ""
-"Sejak Liberty, Networking mempertahankan dua cabang migrasi Alembic yang "
-"paralel."
-
-msgid "Single large layer-2 network"
-msgstr " Jaringan lapisan-2 besar tunggal."
-
-msgid ""
-"Single large layer-2 networks become complex at scale and involve "
-"significant failure domains."
-msgstr ""
-"Jaringan lapisan-2 besar tunggal menjadi kompleks pada skala dan melibatkan "
-"failure domain yang signifikan."
-
-msgid "Single stack IPv6 project networking"
-msgstr "Satu tumpukan (stack) jaringan proyek IPv6"
-
-msgid ""
-"Some underlying physical network architectures contain a unique layer-2 "
-"network for overlay networks using protocols such as VXLAN and GRE."
-msgstr ""
-"Beberapa arsitektur jaringan fisik yang mendasarinya memuat jaringan "
-"lapisan-2 yang unik untuk jaringan overlay menggunakan protokol seperti "
-"VXLAN dan GRE."
-
-msgid ""
-"Some underlying physical network architectures contain multiple layer-2 "
-"networks with different MTU values. You can configure each flat or VLAN "
-"provider network in the bridge or interface mapping options of the layer-2 "
-"agent to reference a unique MTU value."
-msgstr ""
-"Beberapa arsitektur jaringan fisik yang mendasari berisi beberapa jaringan "
-"lapisan-2 dengan nilai-nilai MTU yang berbeda. Anda dapat mengkonfigurasi "
-"setiap jaringan penyedia VLAN atau datar di jembatan atau opsi pemetaan "
-"antarmuka dari agen lapisan-2 untuk referensi nilai MTU yang unik."
-
-msgid "Source a regular (non-administrative) project credentials."
-msgstr "Dapatkan sumber kredensial proyek (non-administratif) biasa."
-
-msgid ""
-"Source the administrative project credentials and list the enabled "
-"extensions."
-msgstr ""
-"Dapatkan sumber kredensial proyek administrasi dan daftar ekstensi yang "
-"diaktifkan."
-
-msgid "Source the administrative project credentials."
-msgstr "Dapatkan sumber kredensial proyek administrasi."
-
-msgid "Source the credentials of the project that owns the ``net1`` network."
-msgstr "Sumber kepercayaan dari proyek yang memiliki jaringan ``net1``."
-
-msgid ""
-"Source the necessary project credentials. The administrative project can "
-"delete resources for all other projects. A regular project can delete its "
-"own network resources and those belonging to other projects for which it has "
-"sufficient access."
-msgstr ""
-"Dapatkan sumber kredensial proyek yang diperlukan. Proyek administrasi dapat "
-"menghapus sumber daya untuk semua proyek lainnya. Sebuah proyek yang teratur "
-"dapat menghapus sumber daya jaringan sendiri dan sumber daya yang termasuk "
-"proyek lain yang memiliki akses yang memadai."
-
-msgid "Specialized"
-msgstr "Specialized"
-
-msgid "Start DHCP agent on HostB. The VM gets the wanted IP again."
-msgstr "Mulailah agen DHCP pada HostB. VM mendapatkan yang diinginkan IP lagi."
-
-msgid ""
-"Start by creating a listener, attaching a pool, and then adding members:"
-msgstr ""
-"Mulailah dengan membuat pendengar, melekatkan kolam, dan kemudian "
-"menambahkan anggota:"
-
-msgid ""
-"Start by creating a load balancer on a network. In this example, the "
-"``private`` network is an isolated network with two web server instances:"
-msgstr ""
-"Mulailah dengan pembuatan penyeimbang beban pada jaringan. Dalam contoh ini, "
-"jaringan ``private`` merupakan jaringan terisolasi dengan dua instance web "
-"server:"
-
-msgid ""
-"Start neutron-server in intended final config, except with REST API "
-"restricted to read-write only by nova-api."
-msgstr ""
-"Mulailah neutron-server di config akhir yang ditujukan, kecuali dengan REST "
-"API yang terbatas untuk hanya read-write dengan nova-api."
-
-msgid "Start the LBaaS v2 agent:"
-msgstr "Mulai agen LBaaS v2:"
-
-msgid "Start the following services:"
-msgstr "Mulai layanan berikut:"
-
-msgid ""
-"Starting with the Liberty release, OpenStack Networking includes a pluggable "
-"interface for the IP Address Management (IPAM) function. This interface "
-"creates a driver framework for the allocation and de-allocation of subnets "
-"and IP addresses, enabling the integration of alternate IPAM implementations "
-"or third-party IP Address Management systems."
-msgstr ""
-"Dimulai dari rilis Liberty, OpenStack Networking mencakup antarmuka "
-"pluggable untuk fungsi IP Address Manajemen (IPAM). Interface ini "
-"menciptakan kerangka kerja driver untuk alokasi dan de-alokasi subnet dan "
-"alamat IP, yang memungkinkan integrasi implementasi IPAM alternatif atau "
-"pihak ketiga sistem IP Address Manajemen."
-
-msgid "Stateless Address Auto Configuration (SLAAC)"
-msgstr "Stateless Address Auto Configuration (SLAAC)"
-
-msgid ""
-"Stop the DHCP agent on HostA. Besides stopping the ``neutron-dhcp-agent`` "
-"binary, you must stop the ``dnsmasq`` processes."
-msgstr ""
-"Hentikan agen DHCP pada HostA. Selain penghentian binary ``neutron-dhcp-"
-"agent``, Anda harus menghentikan proses ``dnsmasq``."
-
-msgid "Stop the DHCP agent on HostB too."
-msgstr "Hhentikan agen DHCP pada HostB juga."
-
-msgid "Subnet pools"
-msgstr "Subnet pools (kolam subnet)"
-
-msgid ""
-"Subnet pools have a quota system which is a little bit different than other "
-"quotas in Neutron. Other quotas in Neutron count discrete instances of an "
-"object against a quota. Each time you create something like a router, "
-"network, or a port, it uses one from your total quota."
-msgstr ""
-"Kolam subnet memiliki sistem kuota yang sedikit berbeda dari kuota lainnya "
-"di Neutron. Kuota lainnya di Neutron menghitung instance diskrit suatu obyek "
-"terhadap kuota. Setiap kali Anda membuat sesuatu seperti router, jaringan, "
-"atau port, menggunakan salah satu dari jumlah kuota."
-
-msgid ""
-"Subnet pools have been made available since the Kilo release. It is a simple "
-"feature that has the potential to improve your workflow considerably. It "
-"also provides a building block from which other new features will be built "
-"in to OpenStack Networking."
-msgstr ""
-"Kolam subnet telah tersedia sejak rilis Kilo. Ini adalah fitur sederhana "
-"yang memiliki potensi untuk meningkatkan alur kerja Anda secara mencukupi. "
-"Ini juga menyediakan blok bangunan dimana fitur baru lainnya akan dibangun "
-"untuk OpenStack Networking."
-
-msgid "Subnets"
-msgstr "Subnet-subnet"
-
-msgid "Subnets and ARP"
-msgstr "Subnets dan ARP"
-
-msgid ""
-"Subports can be added to a trunk in two ways: creating the trunk with "
-"subports or adding subports to an existing trunk."
-msgstr ""
-"Subports dapat ditambahkan ke sebuah trunk dengan dua cara: menciptakan "
-"trunk dengan subports atau menambahkan subports ke trunk yang ada."
-
-msgid "Supported Ethernet controllers"
-msgstr "Ethernet controller yang didukung"
-
-msgid "Supported QoS rule types"
-msgstr " Tipe aturan Quality of Service (QoS) yang didukung"
-
-msgid "Supported objects for sharing with specific projects"
-msgstr "Objek yang didukung untuk berbagi dengan proyek tertentu"
-
-msgid "Supports L2 firewalling (VM ports)"
-msgstr "Mendukung L2 firewall (port VM)"
-
-msgid "Supports L3 firewalling for router ports"
-msgstr "Mendukung L3 firewall untuk port router"
-
-msgid "Supports L3 firewalling for routers"
-msgstr "Mendukung L3 firewall untuk router"
-
-msgid ""
-"Supports only instance ports. Ports for DHCP and layer-3 (routing) services "
-"must use another mechanism driver such as Linux bridge or Open vSwitch (OVS)."
-msgstr ""
-"Hanya mendukung instance port. Port untuk layanan DHCP dan layer-3 "
-"(routing) harus menggunakan driver mekanisme lain seperti jembatan Linux "
-"atau Open vSwitch (OVS)."
-
-msgid "Supports only untagged (flat) and tagged (VLAN) networks."
-msgstr "Hanya mendukung jaringan untagged (flat) dan tagged (VLAN)."
-
-msgid "Switches"
-msgstr "Switches"
-
-msgid ""
-"Switches are Multi-Input Multi-Output (MIMO) devices that enable packets to "
-"travel from one node to another. Switches connect hosts that belong to the "
-"same layer-2 network. Switches enable forwarding of the packet received on "
-"one port (input) to another port (output) so that they reach the desired "
-"destination node. Switches operate at layer-2 in the networking model. They "
-"forward the traffic based on the destination Ethernet address in the packet "
-"header."
-msgstr ""
-"Switch adalah perangkat Multi-Input Multi-Output (MIMO) yang mengaktifkan "
-"paket untuk perjalanan dari satu node ke yang lain. Switch menghubungkan "
-"host yang tergabung dalam jaringan lapisan-2 sama. Switch mengaktifkan "
-"penerusan paket yang diterima pada satu port (input) ke port lain (output) "
-"sehingga mereka mencapai node tujuan yang diinginkan. Switch beroperasi pada "
-"lapisan-2 dalam model jaringan. Mereka meneruskan lalu lintas berdasarkan "
-"alamat Ethernet tujuan dalam header paket."
-
-msgid "TCP/UDP/ICMP"
-msgstr "TCP/UDP/ICMP"
-
-msgid "TODO"
-msgstr "TODO"
-
-msgid "Term"
-msgstr "Term"
-
-msgid ""
-"Test IPv4 and IPv6 connectivity to the Internet or other external network."
-msgstr ""
-"Lakukan uji konektivitas IPv4 dan IPv6 ke Internet atau jaringan eksternal "
-"lainnya."
-
-msgid ""
-"The *Internet Control Message Protocol* (ICMP) is a protocol used for "
-"sending control messages over an IP network. For example, a router that "
-"receives an IP packet may send an ICMP packet back to the source if there is "
-"no route in the router's routing table that corresponds to the destination "
-"address (ICMP code 1, destination host unreachable) or if the IP packet is "
-"too large for the router to handle (ICMP code 4, fragmentation required and "
-"\"don't fragment\" flag is set)."
-msgstr ""
-"*Internet Control Message Protocol* (ICMP) adalah protokol yang digunakan "
-"untuk mengirimkan pesan kontrol melintasi jaringan IP. Sebagai contoh, "
-"sebuah router yang menerima paket IP dapat mengirim paket ICMP kembali ke "
-"sumber jika tidak ada rute dalam tabel routing router yang sesuai dengan "
-"alamat tujuan (ICMP code 1, host tujuan tidak terjangkau) atau jika paket IP "
-"terlalu besar untuk router untuk menangani (ICMP code 4, fragmentasi "
-"dibutuhkan dan flag \"don't fragment\" diatur)."
-
-msgid ""
-"The *Transmission Control Protocol* (TCP) is the most commonly used layer 4 "
-"protocol in networked applications. TCP is a *connection-oriented* protocol: "
-"it uses a client-server model where a client connects to a server, where "
-"*server* refers to the application that receives connections. The typical "
-"interaction in a TCP-based application proceeds as follows:"
-msgstr ""
-"*Transmission Control Protocol* (TCP) adalah protokol yang paling umum "
-"digunakan lapisan 4 dalam aplikasi jaringan. TCP adalah protokol *connection-"
-"oriented*: menggunakan model client-server dimana klien terhubung ke server, "
-"dimana *server * mengacu pada aplikasi yang menerima koneksi. Interaksi yang "
-"khas dalam aplikasi berbasis TCP mengikuti sebagai berikut:"
-
-msgid ""
-"The *User Datagram Protocol* (UDP) is another layer 4 protocol that is the "
-"basis of several well-known networking protocols. UDP is a *connectionless* "
-"protocol: two applications that communicate over UDP do not need to "
-"establish a connection before exchanging data. UDP is also an *unreliable* "
-"protocol. The operating system does not attempt to retransmit or even detect "
-"lost UDP packets. The operating system also does not provide any guarantee "
-"that the receiving application sees the UDP packets in the same order that "
-"they were sent in."
-msgstr ""
-"*User Datagram Protocol* (UDP) adalah satu protokol lapisan 4 lain yang "
-"merupakan dasar dari beberapa protokol jaringan yang terkenal. UDP adalah "
-"protokol *connectionless*: dua aplikasi yang berkomunikasi melalui UDP tidak "
-"perlu membuat sambungan sebelum bertukar data. UDP juga protokol "
-"*unreliable*. Sistem operasi tidak melakukan pengiriman ulang atau bahkan "
-"mendeteksi paket UDP hilang. Sistem operasi juga tidak memberikan jaminan "
-"bahwa aplikasi penerima melihat paket UDP dalam urutan yang sama ketika "
-"mereka dikirim."
-
-msgid ""
-"The *max_l3_agents_per_router* determine the number of backup DVR/SNAT "
-"routers which will be instantiated."
-msgstr ""
-"*max_l3_agents_per_router* menentukan jumlah cadangan router DVR/SNAT yang "
-"akan dipakai."
-
-msgid ""
-"The :command:`openstack network agent show` command shows details for a "
-"specified agent:"
-msgstr ""
-"Perintah :command:`openstack network agent show` menunjukkan rincian untuk "
-"agen tertentu:"
-
-msgid ""
-"The :command:`ping` and :command:`mtr` Linux command-line tools are two "
-"examples of network utilities that use ICMP."
-msgstr ""
-"Linux command-line tool :command:`ping` dan :command:`mtr` adalah dua "
-"contoh dari jaringan utilitas yang menggunakan ICMP."
-
-msgid ""
-"The API allows searching/filtering of the ``GET /v2.0/networks`` API. The "
-"following query parameters are supported:"
-msgstr ""
-"API mengizinkan searching/filtering dari ``GET /v2.0/networks`` API. "
-"Parameter permintaan berikut ini didukung:"
-
-msgid ""
-"The BGP speaker advertises the next-hop IP address for eligible self-service "
-"networks and floating IP addresses for instances using those networks."
-msgstr ""
-"Speaker BGP menyiarkan update dan perubahan alamat IP next-hop untuk "
-"jaringan self-service yang memenuhi syarat dan alamat IP mengambang untuk "
-"instance menggunakan jaringan tersebut."
-
-msgid ""
-"The BGP speaker associates with the external network that provides a gateway "
-"on the router."
-msgstr ""
-"BGP speaker berassosiasi dengan jaringan eksternal yang menyediakan gateway "
-"pada router."
-
-msgid ""
-"The BGP speaker has the ``advertise_floating_ip_host_routes`` attribute set "
-"to ``True``."
-msgstr ""
-"BGP speaker memiliki ``advertise_floating_ip_host_routes`` atribut diatur ke "
-"``True``."
-
-msgid ""
-"The BGP speaker has the ``advertise_tenant_networks`` attribute set to "
-"``True``."
-msgstr ""
-"BGP speaker memiliki ``advertise_tenant_networks`` atribut diatur ke "
-"``True``."
-
-msgid ""
-"The Compute REST API is available throughout the entire process, although "
-"there is a brief period where it is made read-only during a database "
-"migration. The Networking REST API will need to expose (to nova-api) all "
-"details necessary for reconstructing the information previously held in the "
-"legacy networking database."
-msgstr ""
-"Compute REST API tersedia di sepnajang seluruh proses, meskipun ada periode "
-"singkat dimana itu dibuat read-only selama migrasi database. Networking REST "
-"API memerlukan untuk mengekspos (ke nova-api) semua rincian yang diperlukan "
-"untuk merekonstruksi informasi yang sebelumnya diadakan di database jaringan "
-"legacy."
-
-msgid ""
-"The DHCP agent is responsible for :term:`DHCP ` and RADVD (Router Advertisement Daemon) services. It "
-"requires a running L2 agent on the same node."
-msgstr ""
-"Agen DHCP bertanggung jawab untuk :term:`DHCP ` dan layanan RADVD (Router Advertisement Daemon). Hal ini "
-"membutuhkan agen L2 berjalan pada node yang sama."
-
-msgid ""
-"The DHCP agent provides an appropriate MTU value to instances using IPv4, "
-"while the L3 agent provides an appropriate MTU value to instances using "
-"IPv6. IPv6 uses RA via the L3 agent because the DHCP agent only supports "
-"IPv4. Instances using IPv4 and IPv6 should obtain the same MTU value "
-"regardless of method."
-msgstr ""
-"Agen DHCP memberikan nilai MTU yang tepat untuk instance penggunaan IPv4, "
-"sementara agen L3 memberikan nilai MTU yang tepat untuk instance penggunaan "
-"IPv6. IPv6 menggunakan RA melalui agen L3 karena agen DHCP hanya mendukung "
-"IPv4. Instance yang menggunakan IPv4 dan IPv6 harus mendapatkan nilai MTU "
-"yang sama terlepas dari metode."
-
-msgid ""
-"The Dashboard panels for managing LBaaS v2 are available starting with the "
-"Mitaka release."
-msgstr ""
-"Panel Dashboard untuk mengelola LBaaS v2 tersedia mulai dari rilis Mitaka."
-
-msgid ""
-"The DevStack documentation offers a `simple method to deploy Octavia "
-"`_ and test the service with redundant load balancer instances. If you "
-"already have Octavia installed and configured within your environment, you "
-"can configure the Network service to use Octavia:"
-msgstr ""
-"Dokumentasi DevStack menawarkan `simple method to deploy Octavia `_ "
-"dan menguji layanan dengan instance penyeimbang beban redundant "
-"(berlebihan). Jika Anda sudah memiliki Octavia terinstal dan dikonfigurasi "
-"dalam lingkungan Anda, Anda dapat mengkonfigurasi layanan jaringan untuk "
-"menggunakan Octavia:"
-
-msgid ""
-"The Firewall-as-a-Service (FWaaS) API is an experimental API that enables "
-"early adopters and vendors to test their networking implementations."
-msgstr ""
-"Firewall-as-a-Service (FWaaS) API adalah API eksperimental yang mengaktifkan "
-"pengadopsi awal dan vendor untuk menguji implementasi jaringan mereka."
-
-msgid ""
-"The Firewall-as-a-Service (FWaaS) plug-in applies firewalls to OpenStack "
-"objects such as projects, routers, and router ports."
-msgstr ""
-"Firewall-as-a-Service (FWaaS) plug-in memberlakukan firewall untuk objek "
-"OpenStack seperti proyek, router, dan port router."
-
-msgid ""
-"The ID returned by this command is a network which can be used for booting a "
-"VM."
-msgstr ""
-"ID yang dikembalikan oleh perintah ini adalah jaringan yang dapat digunakan "
-"untuk booting VM."
-
-msgid ""
-"The IP address allocation pool starting at ``.11`` improves clarity of the "
-"diagrams. You can safely omit it."
-msgstr ""
-"Kolam alokasi alamat IP mulai ``.11`` meningkatkan kejelasan diagram. Anda "
-"dapat dengan aman menghilangkan itu."
-
-msgid ""
-"The IPv4 address resides in a private IP address range (RFC1918). Thus, the "
-"Networking service performs source network address translation (SNAT) for "
-"the instance to access external networks such as the Internet. Access from "
-"external networks such as the Internet to the instance requires a floating "
-"IPv4 address. The Networking service performs destination network address "
-"translation (DNAT) from the floating IPv4 address to the instance IPv4 "
-"address on the self-service network. On the other hand, the Networking "
-"service architecture for IPv6 lacks support for NAT due to the significantly "
-"larger address space and complexity of NAT. Thus, floating IP addresses do "
-"not exist for IPv6 and the Networking service only performs routing for IPv6 "
-"subnets on self-service networks. In other words, you cannot rely on NAT to "
-"\"hide\" instances with IPv4 and IPv6 addresses or only IPv6 addresses and "
-"must properly implement security groups to restrict access."
-msgstr ""
-"Alamat IPv4 berada dalam kisaran alamat IP pribadi (RFC1918). Dengan "
-"demikian, layanan Networking melakukan penterjemahan alamat jaringan sumber "
-"(SNAT) instance untuk mengakses jaringan eksternal seperti Internet. Akses "
-"dari jaringan eksternal seperti Internet untuk instance memerlukan alamat "
-"IPv4 mengambang. Layanan Networking melakukan penterjemahan alamat jaringan "
-"tujuan (DNAT) dari alamat IPv4 mengambang ke alamat IPv4 instance pada "
-"jaringan self-service . Di sisi lain, arsitektur layanan Networking untuk "
-"IPv6 tidak memiliki dukungan untuk NAT karena ruang alamat lebih besar "
-"(larger address space) secara nyata dan kompleksitas NAT. Dengan demikian, "
-"alamat IP mengambang tidak ada untuk IPv6 dan layanan Networking hanya "
-"melakukan routing untuk subnet IPv6 pada jaringan self-service. Dengan kata "
-"lain, Anda tidak dapat mengandalkan NAT untuk \"hide\" instance dengan "
-"alamat IPv4 dan IPv6 atau hanya alamat IPv6 dan harus menerapkan secara "
-"benar kelompok keamanan untuk membatasi akses."
-
-msgid ""
-"The Internet Protocol (IP) specifies how to route packets between hosts that "
-"are connected to different local networks. IP relies on special network "
-"hosts called *routers* or *gateways*. A router is a host that is connected "
-"to at least two local networks and can forward IP packets from one local "
-"network to another. A router has multiple IP addresses: one for each of the "
-"networks it is connected to."
-msgstr ""
-"Internet Protocol (IP) menentukan bagaimana rute paket antara host yang "
-"terhubung ke jaringan lokal yang berbeda. IP bergantung pada host jaringan "
-"khusus yang disebut *routers* atau *gateways*. Sebuah router adalah host "
-"yang terhubung ke setidaknya dua jaringan lokal dan dapat meneruskan paket "
-"IP dari satu jaringan lokal ke yang lain. Sebuah router memiliki beberapa "
-"alamat IP: satu untuk masing-masing jaringan dimana router terhubung."
-
-msgid ""
-"The L3 agent offers advanced layer 3 services, like virtual Routers and "
-"Floating IPs. It requires an L2 agent running in parallel."
-msgstr ""
-"Agen L3 menawarkan layanan lapisan 3 lanjutan, seperti Router virtual dan IP "
-"mengambang. Hal ini membutuhkan agen L2 berjalan secara paralel."
-
-msgid ""
-"The L3 metering agent enables layer3 traffic metering. It requires a running "
-"L3 agent on the same node."
-msgstr ""
-"Agen metering L3 mengaktifkan metering lalu lintas layer3. Hal ini "
-"membutuhkan agen L3 berjalan pada node yang sama."
-
-msgid ""
-"The LBaaS v2 agent collects four types of statistics for each load balancer "
-"every six seconds. Users can query these statistics with the :command:"
-"`neutron lbaas-loadbalancer-stats` command:"
-msgstr ""
-"Agen LBaaS v2 mengumpulkan empat jenis statistik untuk setiap load balancer "
-"setiap enam detik. Pengguna dapat query statistik ini dengan perintah :"
-"command:`neutron lbaas-loadbalancer-stats`:"
-
-msgid ""
-"The Linux bridge agent configures Linux bridges to realize L2 networks for "
-"OpenStack resources."
-msgstr ""
-"Agen jembatan Linux mengkonfigurasi jembatan Linux untuk mewujudkan jaringan "
-"L2 untuk sumber daya OpenStack."
-
-msgid ""
-"The Linux bridge agent lacks support for other overlay protocols such as GRE "
-"and Geneve."
-msgstr ""
-"Agen jembatan Linux memiliki dukungan untuk protokol overlay lain seperti "
-"GRE dan Geneve."
-
-msgid ""
-"The Linux bridge mechanism driver uses only Linux bridges and ``veth`` pairs "
-"as interconnection devices. A layer-2 agent manages Linux bridges on each "
-"compute node and any other node that provides layer-3 (routing), DHCP, "
-"metadata, or other network services."
-msgstr ""
-"Driver mekanisme jembatan Linux hanya menggunakan jembatan Linux dan pasang "
-"``veth`` sebagai perangkat interkoneksi. Agen lapisan-2 mengelola jembatan "
-"Linux pada setiap node komputasi dan node lain yang menyediakan lapisan-3 "
-"(routing), DHCP, metadata, atau layanan jaringan lainnya."
-
-msgid ""
-"The Load-Balancer-as-a-Service (LBaaS) API provisions and configures load "
-"balancers. The reference implementation is based on the HAProxy software "
-"load balancer."
-msgstr ""
-"Load-Balancer-as-a-Service (LBaaS) API menyediakan dan mengkonfigurasi "
-"penyeimbang beban. Implementasi referensi didasarkan pada penyeimbang beban "
-"perangkat lunak HAProxy."
-
-msgid "The ML2 plug-in supports trunking with the following mechanism drivers:"
-msgstr ""
-"ML2 plug-in mendukung trunking dengan driver mekanisme sebagai berikut:"
-
-msgid ""
-"The MacVTap agent uses kernel MacVTap devices for realizing L2 networks for "
-"OpenStack instances. Network attachments for other resources like routers, "
-"DHCP, and so on are not supported."
-msgstr ""
-"Agen MacVTap menggunakan perangkat MacVTap kernel untuk mewujudkan jaringan "
-"L2 untuk instance OpenStack. Network attachment untuk sumber daya lain "
-"seperti router, DHCP, dan sebagainya tidak didukung."
-
-msgid ""
-"The Macvtap mechanism driver for the ML2 plug-in generally increases network "
-"performance of instances."
-msgstr ""
-"Driver mekanisme Macvtap untuk ML2 plug-in umumnya meningkatkan kinerja "
-"jaringan instance."
-
-msgid ""
-"The Macvtap mechanism driver only applies to compute nodes. Otherwise, the "
-"environment resembles the prerequisite deployment example."
-msgstr ""
-"Driver mekanisme Macvtap hanya berlaku untuk komputasi node. Jika tidak, "
-"lingkungan menyerupai contoh prasyarat pengerahan."
-
-msgid ""
-"The Metadata agent allows instances to access cloud-init meta data and user "
-"data via the network. It requires a running L2 agent on the same node."
-msgstr ""
-"Agen Metadata memungkinkan instance untuk mengakses data meta cloud-init dan "
-"data pengguna melalui jaringan. Hal ini membutuhkan agen L2 berjalan pada "
-"node yang sama."
-
-msgid ""
-"The Modular Layer 2 (ML2) neutron plug-in is a framework allowing OpenStack "
-"Networking to simultaneously use the variety of layer 2 networking "
-"technologies found in complex real-world data centers. The ML2 framework "
-"distinguishes between the two kinds of drivers that can be configured:"
-msgstr ""
-"Modular Layer 2 (ML2) neutron plug-in adalah kerangka yang memungkinkan "
-"OpenStack Networking untuk secara bersamaan menggunakan berbagai teknologi "
-"jaringan lapisan 2 yang ditemukan di pusat data real-world yang kompleks. "
-"Kerangka ML2 membedakan antara dua jenis driver yang dapat dikonfigurasi:"
-
-msgid ""
-"The Networking REST API is publicly read-only until after the migration is "
-"complete. During the migration, Networking REST API is read-write only to "
-"nova-api, and changes to Networking are only allowed via nova-api."
-msgstr ""
-"Networking REST API menjadi read-only publik sampai setelah migrasi selesai. "
-"Selama migrasi, Networking REST API menjadi read-write hanya untuk nova-api, "
-"dan perubahan Networking hanya diperbolehkan melalui nova-api."
-
-msgid ""
-"The Networking client requires a protocol value. If the rule is protocol "
-"agnostic, you can use the ``any`` value."
-msgstr ""
-"Klien Networking membutuhkan nilai protokol. Jika aturan ini adalah protokol "
-"agnostik, Anda dapat menggunakan nilai ``any``."
-
-msgid ""
-"The Networking service does not provide layer-3 services between segments. "
-"Instead, it relies on physical network infrastructure to route subnets. "
-"Thus, both the Networking service and physical network infrastructure must "
-"contain configuration for routed provider networks, similar to conventional "
-"provider networks. In the future, implementation of dynamic routing "
-"protocols may ease configuration of routed networks."
-msgstr ""
-"Layanan Networking tidak menyediakan layanan lapisan-3 antar segmen. "
-"Sebaliknya, hal itu bergantung pada infrastruktur jaringan fisik untuk rute "
-"subnet. Dengan demikian, layanan Networking dan infrastruktur jaringan fisik "
-"harus berisi konfigurasi untuk jaringan penyedia dialihkan (routed), mirip "
-"dengan jaringan penyedia konvensional. Di masa depan, pelaksanaan protokol "
-"routing dinamis dapat meringankan konfigurasi jaringan dialihkan (routed)."
-
-msgid ""
-"The Networking service does not provision routing among segments. The "
-"operator must implement routing among segments of a provider network. Each "
-"subnet on a segment must contain the gateway address of the router interface "
-"on that particular subnet. For example:"
-msgstr ""
-"Layanan Networking tidak penyediaan routing antara segmen. Operator harus "
-"mengimplementasikan routing antara segmen di jaringan penyedia. Setiap "
-"subnet di segmen harus berisi alamat gateway dari antarmuka router pada "
-"subnet tertentu itu. Sebagai contoh:"
-
-msgid ""
-"The Networking service enables users to control the name assigned to ports "
-"by the internal DNS. To enable this functionality, do the following:"
-msgstr ""
-"Layanan Networking memungkinkan pengguna untuk mengontrol nama ditugaskan "
-"untuk port oleh DNS internal. Untuk mengaktifkan fungsi ini, lakukan hal "
-"berikut:"
-
-msgid ""
-"The Networking service implements routers using a layer-3 agent that "
-"typically resides at least one network node. Contrary to provider networks "
-"that connect instances to the physical network infrastructure at layer-2, "
-"self-service networks must traverse a layer-3 agent. Thus, oversubscription "
-"or failure of a layer-3 agent or network node can impact a significant "
-"quantity of self-service networks and instances using them. Consider "
-"implementing one or more high-availability features to increase redundancy "
-"and performance of self-service networks."
-msgstr ""
-"Layanan Networking menerapkan router yang menggunakan agen lapisan-3 yang "
-"biasanya berada pada setidaknya satu node jaringan. Bertentangan dengan "
-"jaringan provider yang menghubungkan instance untuk infrastruktur jaringan "
-"fisik pada lapisan-2, jaringan self-service harus melintasi agen lapisan-3. "
-"Dengan demikian, kelebihan permintaan atau kegagalan dari agen lapisan-3 "
-"atau node jaringan dapat berdampak pada jumlah yang signifikan dari jaringan "
-"self-service dan instance yang menggunakan mereka. Mempertimbangkan untuk "
-"menerapkan satu atau lebih fitur ketersediaan tinggi untuk meningkatkan "
-"redundansi dan kinerja jaringan swalayan."
-
-msgid "The Networking service internal DNS resolution"
-msgstr "Layanan Networking resolusi DNS internal"
-
-msgid ""
-"The Networking service offers a load balancer feature called \"LBaaS v2\" "
-"through the ``neutron-lbaas`` service plug-in."
-msgstr ""
-"Layanan Networking menawarkan fitur penyeimbang beban (load balancer) "
-"disebut \"LBaaS v2\" melalui plug-in layanan ``neutron-lbaas``."
-
-msgid ""
-"The Networking service offers several methods to configure name resolution "
-"(DNS) for instances. Most deployments should implement case 1 or 2. Case 3 "
-"requires security considerations to prevent leaking internal DNS information "
-"to instances."
-msgstr ""
-"Layanan Networking menawarkan beberapa metode untuk mengkonfigurasi resolusi "
-"nama (DNS) untuk instance. Kebanyakan pengerahan harus menerapkan case 1 "
-"atau 2. Case 3 membutuhkan pertimbangan keamanan untuk mencegah kebocoran "
-"informasi DNS internal untuk instance."
-
-msgid ""
-"The Networking service only references the underlying physical network MTU. "
-"Changing the underlying physical network device MTU requires configuration "
-"of physical network devices such as switches and routers."
-msgstr ""
-"Layanan Networking hanya mereferensi MTU jaringan fisik yang mendasarinya. "
-"Perubahan MTU perangkat jaringan fisik yang mendasarinya memerlukan "
-"konfigurasi perangkat jaringan fisik seperti switch dan router."
-
-msgid ""
-"The Networking service provides a purge mechanism to delete the following "
-"network resources for a project:"
-msgstr ""
-"Layanan Networking menyediakan mekanisme pembersihan untuk menghapus sumber "
-"daya jaringan berikut untuk proyek:"
-
-msgid ""
-"The Networking service schedules a network to one of the agents within the "
-"selected zone as with ``WeightScheduler``. In this case, scheduler refers to "
-"``dhcp_load_type`` as well."
-msgstr ""
-"Layanan Networking menjadwal jaringan ke salah satu agen dalam zona terpilih "
-"sebagai dengan ``WeightScheduler``. Dalam hal ini, scheduler mengacu juga "
-"``dhcp_load_type``."
-
-msgid ""
-"The Networking service schedules a router to one of the agents within the "
-"selected zone as with ``LeastRouterScheduler``."
-msgstr ""
-"Layanan Networking menjadwal router untuk salah satu agen dalam zona "
-"terpilih sebagai dengan ``LeastRouterScheduler``."
-
-msgid ""
-"The Networking service supports the following underlying physical network "
-"architectures. Case 1 refers to the most common architecture. In general, "
-"architectures should avoid cases 2 and 3."
-msgstr ""
-"Layanan Networking mendukung arsitektur jaringan fisik yang mendasari "
-"berikutnya. Case 1 mengacu pada arsitektur yang paling umum. Secara umum, "
-"arsitektur harus menghindari case 2 dan 3."
-
-msgid ""
-"The Networking service supports underlying physical networks using jumbo "
-"frames and also enables instances to use jumbo frames minus any overlay "
-"protocol overhead. For example, an underlying physical network with a 9000-"
-"byte MTU yields a 8950-byte MTU for instances using a VXLAN network with "
-"IPv4 endpoints. Using IPv6 endpoints for overlay networks adds 20 bytes of "
-"overhead for any protocol."
-msgstr ""
-"Layanan Networking mendukung jaringan fisik yang mendasarinya menggunakan "
-"frame jumbo dan juga mengaktifkan instance menggunakan frame jumbo dikurangi "
-"overhead protokol overlay. Sebagai contoh, sebuah jaringan fisik yang "
-"mendasarinya dengan MTU 9000-byte menghasilkan MTU 8950-byte untuk instance "
-"menggunakan jaringan VXLAN dengan endpoint IPv4. Penggunaan endpoint IPv6 "
-"untuk jaringan overlay menambahkan 20 byte overhead untuk setiap protokol."
-
-msgid ""
-"The Networking service uses the MTU of the underlying physical network to "
-"calculate the MTU for virtual network components including instance network "
-"interfaces. By default, it assumes a standard 1500-byte MTU for the "
-"underlying physical network."
-msgstr ""
-"Layanan Networking menggunakan MTU dari jaringan fisik yang mendasarinya "
-"untuk menghitung MTU untuk komponen jaringan virtual termasuk antarmuka "
-"jaringan instance. Secara default, hal ini diasumsikan MTU 1500-byte "
-"standard untuk jaringan fisik yang mendasari."
-
-msgid ""
-"The Networking service uses the layer-3 agent to provide router "
-"advertisement. Provider networks rely on physical network infrastructure for "
-"layer-3 services rather than the layer-3 agent. Thus, the physical network "
-"infrastructure must provide router advertisement on provider networks for "
-"proper operation of IPv6."
-msgstr ""
-"Layanan Networking menggunakan agen lapisan-3 untuk memberikan router "
-"advertisement. Jaringan penyedia mengandalkan infrastruktur jaringan fisik "
-"untuk lapisan layer-3 daripada agen layer-3. Dengan demikian, infrastruktur "
-"jaringan fisik harus menyediakan router advertisement pada jaringan provider "
-"untuk operasi IPv6 yang tepat."
-
-msgid ""
-"The Networking service, code-named neutron, provides an API that lets you "
-"define network connectivity and addressing in the cloud. The Networking "
-"service enables operators to leverage different networking technologies to "
-"power their cloud networking. The Networking service also provides an API to "
-"configure and manage a variety of network services ranging from L3 "
-"forwarding and :term:`NAT ` to load "
-"balancing, perimeter firewalls, and virtual private networks."
-msgstr ""
-"Layanan Networking, code-named neutron, menyediakan API yang memungkinkan "
-"Anda menentukan konektivitas jaringan dan menangani di cloud. Layanan "
-"Networking mengaktifkan para operator untuk memanfaatkan teknologi jaringan "
-"yang berbeda untuk memperdayakan jaringan cloud mereka. Layanan Networking "
-"juga menyediakan API untuk mengkonfigurasi dan mengelola berbagai layanan "
-"jaringan mulai dari L3 forwarding dan :term:`NAT ` sampai load balancing, perimeter firewall, dan jaringan "
-"private virtual."
-
-msgid ""
-"The OVS integration bridge ``int-br-provider`` patch port (19) forwards the "
-"packet to the OVS provider bridge ``phy-br-provider`` patch port (20)."
-msgstr ""
-"OVS integration bridge ``int-br-provider`` patch port (19) meneruskan paket "
-"ke OVS provider bridge ``phy-br-provider`` patch port (20)."
-
-msgid ""
-"The OVS integration bridge ``int-br-provider`` patch port (6) forwards the "
-"packet to the OVS provider bridge ``phy-br-provider`` patch port (7)."
-msgstr ""
-"OVS integration bridge ``int-br-provider`` patch port (6) meneruskan paket "
-"ke OVS provider bridge ``phy-br-provider`` patch port (7)."
-
-msgid ""
-"The OVS integration bridge ``patch-tun`` patch port (10) forwards the packet "
-"to the OVS tunnel bridge ``patch-int`` patch port (11)."
-msgstr ""
-"OVS integration bridge ``patch-tun`` patch port (10) meneruskan paket ke OVS "
-"tunnel bridge ``patch-int`` patch port (11)."
-
-msgid ""
-"The OVS integration bridge ``patch-tun`` patch port (19) forwards the packet "
-"to the OVS tunnel bridge ``patch-int`` patch port (20)."
-msgstr ""
-"OVS integration bridge ``patch-tun`` patch port (19) meneruskan paket ke OVS "
-"tunnel bridge ``patch-int`` patch port (20)."
-
-msgid ""
-"The OVS integration bridge ``patch-tun`` patch port (6) forwards the packet "
-"to the OVS tunnel bridge ``patch-int`` patch port (7)."
-msgstr ""
-"OVS integration bridge ``patch-tun`` patch port (6) meneruskan paket ke OVS "
-"tunnel bridge ``patch-int`` patch port (7)."
-
-msgid ""
-"The OVS integration bridge ``patch-tun`` port (10) forwards the packet to "
-"the OVS tunnel bridge ``patch-int`` port (11)."
-msgstr ""
-"OVS integration bridge ``patch-tun`` port (10) meneruskan paket ke OVS "
-"tunnel bridge ``patch-int`` port (11)."
-
-msgid "The OVS integration bridge adds an internal VLAN tag to the packet."
-msgstr "OVS integration bridge menambahkan tag VLAN internal ke paket."
-
-msgid "The OVS integration bridge adds the internal VLAN tag to the packet."
-msgstr "OVS integration bridge menambahkan internal VLAN tag ke paket."
-
-msgid ""
-"The OVS integration bridge exchanges the internal VLAN tag for an internal "
-"tunnel ID."
-msgstr ""
-"OVS integration bridge menukarkan internal VLAN tag ke internal tunnel ID."
-
-msgid ""
-"The OVS integration bridge patch port (6) forwards the packet to the OVS "
-"tunnel bridge patch port (7)."
-msgstr ""
-"OVS integration bridge patch port (6) meneruskan paket ke OVS tunnel bridge "
-"patch port (7)."
-
-msgid ""
-"The OVS integration bridge port for self-service network 1 (15) removes the "
-"internal VLAN tag and forwards the packet to the self-service network 1 "
-"interface (16) in the router namespace."
-msgstr ""
-"OVS integration bridge port untuk jaringan self-service 1 (15) menghapus "
-"internal VLAN tag dan meneruskan paket ke jaringan self-service 1 antarmuka "
-"(16) dalam namespace router."
-
-msgid ""
-"The OVS integration bridge port for self-service network 1 (6) removes the "
-"internal VLAN tag and forwards the packet to the self-service network 1 "
-"interface in the distributed router namespace (6)."
-msgstr ""
-"OVS integration bridge port untuk self-service network 1 (6) menghapus "
-"internal VLAN tag dan meneruskan paket ke self-service network 1 interface "
-"di distributed router namespace (6)."
-
-msgid ""
-"The OVS integration bridge port for the provider network (6) removes the "
-"internal VLAN tag and forwards the packet to the provider network interface "
-"(6) in the router namespace."
-msgstr ""
-"OVS integration bridge port untuk the provider network (6) menghapus "
-"internal VLAN tag dan meneruskan paket ke provider network interface (6) "
-"dalam router namespace."
-
-msgid ""
-"The OVS integration bridge port for the provider network (6) removes the "
-"internal VLAN tag and forwards the packet to the provider network interface "
-"(7) in the floating IP namespace. This interface responds to any ARP "
-"requests for the instance floating IPv4 address."
-msgstr ""
-"OVS integration bridge port untuk provider network (6) menghapus internal "
-"VLAN tagdan meneruskan paket ke provider network interface (7) dalam "
-"namespace IP mengambang. Interface ini merespon setiap permintaan ARP untuk "
-"alamat IPv4 mengambang instance."
-
-msgid ""
-"The OVS integration bridge port for the self-service network (15) removes "
-"the internal VLAN tag and forwards the packet to the self-service network "
-"interface (16) in the router namespace."
-msgstr ""
-"OVS integration bridge port untuk the self-service network (15) menghaps "
-"internal VLAN tag dan meneruskan paket ke self-service network interface "
-"(16) dalam router namespace."
-
-msgid ""
-"The OVS integration bridge removes the internal VLAN tag from the packet."
-msgstr "OVS integration bridge menghapus tag VLAN internal dari paket."
-
-msgid ""
-"The OVS integration bridge security group port (12) forwards the packet to "
-"the security group bridge OVS port (13) via ``veth`` pair."
-msgstr ""
-"OVS integration bridge security group port (12) meneruskan paket ke security "
-"group bridge OVS port (13) melalui pasangan ``veth``."
-
-msgid ""
-"The OVS integration bridge security group port (15) forwards the packet to "
-"the security group bridge OVS port (16) via ``veth`` pair."
-msgstr ""
-"OVS integration bridge security group port (15) meneruskan paket ke security "
-"group bridge OVS port (16) melalui pasangan ``veth``."
-
-msgid ""
-"The OVS integration bridge security group port (16) forwards the packet to "
-"the security group bridge OVS port (17)."
-msgstr ""
-"OVS integration bridge security group port (16) menerskan paket ke security "
-"group bridge OVS port (17)."
-
-msgid ""
-"The OVS integration bridge security group port (19) forwards the packet to "
-"the security group bridge OVS port (20) via ``veth`` pair."
-msgstr ""
-"OVS integration bridge security group port (19) meneruskan paket ke the "
-"security group bridge OVS port (20) melalui pasangan ``veth``."
-
-msgid ""
-"The OVS integration bridge security group port (20) removes the internal "
-"VLAN tag and forwards the packet to the security group bridge OVS port (21)."
-msgstr ""
-"OVS integration bridge security group port (20) menghapus internal VLAN tag "
-"dan meneruskan paket ke security group bridge OVS port (21)."
-
-msgid ""
-"The OVS integration bridge security group port (28) forwards the packet to "
-"the security group bridge OVS port (29) via ``veth`` pair."
-msgstr ""
-"OVS integration bridge security group port (28) meneruskan paket ke security "
-"group bridge OVS port (29) melalui pasangan ``veth``."
-
-msgid ""
-"The OVS integration bridge swaps the actual VLAN tag 101 with the internal "
-"VLAN tag."
-msgstr ""
-"OVS integration bridge menukar (swap) actual VLAN tag 101 dengan internal "
-"VLAN tag."
-
-msgid ""
-"The OVS integration bridge swaps the actual VLAN tag 102 with the internal "
-"VLAN tag."
-msgstr ""
-"OVS integration bridge menukar (swap) actual VLAN tag 102 dengan internal "
-"VLAN tag."
-
-msgid ""
-"The OVS provider bridge ``phy-br-provider`` patch port (14) forwards the "
-"packet to the OVS integration bridge ``int-br-provider`` patch port (15)."
-msgstr ""
-"OVS provider bridge ``phy-br-provider`` patch port (14) meneruskan paket ke "
-"OVS integration bridge ``int-br-provider`` patch port (15)."
-
-msgid ""
-"The OVS provider bridge ``phy-br-provider`` patch port (18) forwards the "
-"packet to the OVS integration bridge ``int-br-provider`` patch port (19)."
-msgstr ""
-"OVS provider bridge ``phy-br-provider`` patch port (18) meneruskan paket ke "
-"OVS integration bridge ``int-br-provider`` patch port (19)."
-
-msgid ""
-"The OVS provider bridge ``phy-br-provider`` port (4) forwards the packet to "
-"the OVS integration bridge ``int-br-provider`` port (5)."
-msgstr ""
-"OVS provider bridge ``phy-br-provider`` port (4) meneruskan paket ke OVS "
-"integration bridge ``int-br-provider`` port (5)."
-
-msgid ""
-"The OVS provider bridge provider network port (21) forwards the packet to "
-"the physical network interface (22)."
-msgstr ""
-"OVS provider bridge provider network port (21) meneruskan paket ke physical "
-"network interface (22)."
-
-msgid ""
-"The OVS provider bridge provider network port (8) forwards the packet to the "
-"physical network interface (9)."
-msgstr ""
-"OVS provider bridge provider network port (8) meneruskan paket ke physical "
-"network interface (9)."
-
-msgid ""
-"The OVS provider bridge swaps actual VLAN tag 101 with the internal VLAN tag."
-msgstr ""
-"OVS provider bridge menukar actual VLAN tag 101 dengan internal VLAN tag."
-
-msgid ""
-"The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag 101."
-msgstr ""
-"OVS provider bridge menukar (swap) VLAN tag dengan actual VLAN tag 101."
-
-msgid "The OVS tunnel bridge (12) wraps the packet using VNI 101."
-msgstr "OVS tunnel bridge (12) membungkus paket menggunakan VNI 101."
-
-msgid "The OVS tunnel bridge (21) wraps the packet using VNI 102."
-msgstr "OVS tunnel bridge (21) membungkus paket menggunakan VNI 102."
-
-msgid "The OVS tunnel bridge (8) wraps the packet using VNI 101."
-msgstr "OVS tunnel bridge (8) membungkus (wrap) paket menggunakan VNI 101."
-
-msgid ""
-"The OVS tunnel bridge ``patch-int`` patch port (13) forwards the packet to "
-"the OVS integration bridge ``patch-tun`` patch port (14)."
-msgstr ""
-"OVS tunnel bridge ``patch-int`` patch port (13) meneruskan paket ke OVS "
-"integration bridge ``patch-tun`` patch port (14)."
-
-msgid ""
-"The OVS tunnel bridge ``patch-int`` patch port (17) forwards the packet to "
-"the OVS integration bridge ``patch-tun`` patch port (18)."
-msgstr ""
-"OVS tunnel bridge ``patch-int`` patch port (17) meneruskan paket ke OVS "
-"integration bridge ``patch-tun`` patch port (18)."
-
-msgid ""
-"The OVS tunnel bridge ``patch-int`` patch port (26) forwards the packet to "
-"the OVS integration bridge ``patch-tun`` patch port (27)."
-msgstr ""
-"The OVS tunnel bridge ``patch-int`` patch port (26) forwards the packet to "
-"the OVS integration bridge ``patch-tun`` patch port (27)."
-
-msgid ""
-"The OVS tunnel bridge exchanges the internal tunnel ID for an internal VLAN "
-"tag."
-msgstr "OVS tunnel bridge menukar internal tunnel ID dengan internal VLAN tag."
-
-msgid ""
-"The OVS tunnel bridge patch port (13) forwards the packet to the OVS "
-"integration bridge patch port (14)."
-msgstr ""
-"OVS tunnel bridge patch port (13) meneruskan paket ke OVS integration bridge "
-"patch port (14)."
-
-msgid ""
-"The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID to "
-"it."
-msgstr ""
-"OVS tunnel bridge membuka paket dan menambahkan internal tunnel IDuntuk "
-"paket itu."
-
-msgid ""
-"The Open vSwitch (OVS) mechanism driver uses a combination of OVS and Linux "
-"bridges as interconnection devices. However, optionally enabling the OVS "
-"native implementation of security groups removes the dependency on Linux "
-"bridges."
-msgstr ""
-"Driver mekanisme Open vSwitch (OVS) menggunakan kombinasi OVS dan Linux "
-"bridge sebagai perangkat interkoneksi. Namun, secara opsional pengaktifan "
-"OVS native implementation of security group menghilangkan ketergantungan "
-"pada Linux bridge."
-
-msgid ""
-"The Open vSwitch agent configures the Open vSwitch to realize L2 networks "
-"for OpenStack resources."
-msgstr ""
-"Agen Open vSwitch mengkonfigurasi Open vSwitch untuk mewujudkan jaringan L2 "
-"untuk sumber daya OpenStack."
-
-msgid ""
-"The OpenStack :term:`Networking service ` "
-"provides an API that allows users to set up and define network connectivity "
-"and addressing in the cloud. The project code-name for Networking services "
-"is neutron. OpenStack Networking handles the creation and management of a "
-"virtual networking infrastructure, including networks, switches, subnets, "
-"and routers for devices managed by the OpenStack Compute service (nova). "
-"Advanced services such as firewalls or :term:`virtual private networks "
-"(VPNs) ` can also be used."
-msgstr ""
-"OpenStack :term:`Networking service ` "
-"menyediakan API yang mengizinkan pengguna untuk mengatur dan menentukan "
-"konektivitas jaringan dan menanganinya di cloud. Code-name proyek untuk "
-"layanan Networking adalah neutron. OpenStack Networking menangani pembuatan "
-"dan pengelolaan infrastruktur jaringan virtual, termasuk jaringan, switch, "
-"subnet, dan router untuk perangkat yang dikelola oleh layanan OpenStack "
-"Compute (nova). Layanan canggih seperti firewall atau :term:`virtual private "
-"networks (VPNs) ` juga dapat digunakan."
-
-msgid ""
-"The OpenStack Networking API includes support for Layer 2 networking and :"
-"term:`IP address management (IPAM) `, as well "
-"as an extension for a Layer 3 router construct that enables routing between "
-"Layer 2 networks and gateways to external networks. OpenStack Networking "
-"includes a growing list of plug-ins that enable interoperability with "
-"various commercial and open source network technologies, including routers, "
-"switches, virtual switches and software-defined networking (SDN) controllers."
-msgstr ""
-"OpenStack Networking API meliputi dukungan untuk Layer 2 networking dan :"
-"term:`IP address management (IPAM) `, serta "
-"ekstensi konstruksi untuk Layer 3 router yang mengaktifkan routing diantara "
-"Layer 2 networks dan gateway ke jaringan eksternal. OpenStack Networking "
-"meliputi daftar tumbuh plug-in yang mengaktifkan interoperabilitas dengan "
-"berbagai teknologi jaringan komersial dan open source, termasuk router, "
-"switch, switch virtual dan software-defined networking (SDN) controller."
-
-msgid ""
-"The OpenStack Networking service is extensible. Extensions serve two "
-"purposes: they allow the introduction of new features in the API without "
-"requiring a version change and they allow the introduction of vendor "
-"specific niche functionality. Applications can programmatically list "
-"available extensions by performing a GET on the :code:`/extensions` URI. "
-"Note that this is a versioned request; that is, an extension available in "
-"one API version might not be available in another."
-msgstr ""
-"Layanan OpenStack Networking adalah extensible. Ekstensi melayani dua "
-"tujuan: mereka mengizinkan pengenalan fitur baru di API tanpa memerlukan "
-"perubahan versi dan mereka mengizinkan pengenalan niche spesifik vendor "
-"secara fungsional. Aplikasi secara pemrograman bisa mendaftar ekstensi yang "
-"tersedia dengan melakukan GET pada :code:`/extensions` URI. Catatan bahwa "
-"ini adalah permintaan berversi; yaitu, extension tersedia dalam suatu versi "
-"API mungkin tidak tersedia di versi lain."
-
-msgid ""
-"The PTR records will be created in zones owned by a project with admin "
-"privileges. See :ref:`config-dns-int-ext-serv` for more details."
-msgstr ""
-"Catatan PTR akan dibuat di zona yang dimiliki oleh sebuah proyek dengan hak "
-"admin. Lihat :ref: `config-dns-int-ext-serv` untuk lebih jelasnya."
-
-msgid ""
-"The QoS implementation requires a burst value to ensure proper behavior of "
-"bandwidth limit rules in the Open vSwitch and Linux bridge agents. If you do "
-"not provide a value, it defaults to 80% of the bandwidth limit which works "
-"for typical TCP traffic."
-msgstr ""
-"Pelaksanaan QoS memerlukan burst value untuk memastikan perilaku yang tepat "
-"dari aturan batas bandwidth dalam agen Open vSwitch dan Linux bridge. Jika "
-"Anda tidak memberikan nilai, standarnya ke 80% dari batas bandwidth yang "
-"bekerja untuk lalu lintas TCP tipikal."
-
-msgid ""
-"The Role-Based Access Control (RBAC) policy framework enables both operators "
-"and users to grant access to resources for specific projects."
-msgstr ""
-"Kerangka kebijakan Role-Based Access Control (RBAC) mengaktifkan operator "
-"dan pengguna untuk memberikan akses ke sumber daya untuk proyek-proyek "
-"tertentu."
-
-msgid "The SNAT gateway resides on 203.0.113.11."
-msgstr "SNAT gateway berada pada 203.0.113.11."
-
-msgid ""
-"The SR-IOV agent was optional before Mitaka, and was not enabled by default "
-"before Liberty."
-msgstr ""
-"Agen SR-IOV sebagai opsional sebelum Mitaka, dan tidak diaktifkan secara "
-"default sebelum Liberty."
-
-msgid ""
-"The VLAN sub-interface port (13) on the provider bridge forwards the packet "
-"to the provider physical network interface (14)."
-msgstr ""
-"VLAN sub-interface port (13) di provider bridge meneruskan paket ke "
-"provider physical network interface (14)."
-
-msgid ""
-"The VLAN sub-interface port (4) on the provider bridge forwards the packet "
-"to the physical network interface (5)."
-msgstr ""
-"Port sub-interface VLAN (4) di provider bridge meneruskan paket ke antarmuka "
-"jaringan fisik (5)."
-
-msgid ""
-"The Virtual Private Network-as-a-Service (VPNaaS) is a neutron extension "
-"that introduces the VPN feature set."
-msgstr ""
-"Virtual Private Network-as-a-Service (VPNaaS) adalah ekstensi neutron yang "
-"memperkenalkan set fitur VPN."
-
-msgid ""
-"The ``active_connections`` count is the total number of connections that "
-"were active at the time the agent polled the load balancer. The other three "
-"statistics are cumulative since the load balancer was last started. For "
-"example, if the load balancer restarts due to a system error or a "
-"configuration change, these statistics will be reset."
-msgstr ""
-"Count ``active_connections`` adalah jumlah koneksi yang aktif pada saat agen "
-"disurvei penyeimbang beban. Tiga statistik lainnya adalah kumulatif sejak "
-"penyeimbang beban yang terakhir dimulai. Misalnya, jika penyeimbang beban "
-"restart karena kesalahan sistem atau perubahan konfigurasi, statistik ini "
-"akan disetel ulang."
-
-msgid ""
-"The ``availability_zone`` attribute can be defined in ``dhcp-agent`` and "
-"``l3-agent``. To define an availability zone for each agent, set the value "
-"into ``[AGENT]`` section of ``/etc/neutron/dhcp_agent.ini`` or ``/etc/"
-"neutron/l3_agent.ini``:"
-msgstr ""
-"Atribut ``availability_zone`` dapat didefinisikan dalam ``dhcp-agent`` and "
-"``l3-agent``. Untuk menentukan zona ketersediaan untuk setiap agen, tetapkan "
-"nilai ke dalam bagian ``[AGENT]`` dari ``/etc/neutron/dhcp_agent.ini`` atau "
-"``/etc/neutron/l3_agent.ini``:"
-
-msgid ""
-"The ``availability_zones`` attribute does not have a value until the "
-"resource is scheduled. Once the Networking service schedules the resource to "
-"zones according to ``availability_zone_hints``, ``availability_zones`` shows "
-"in which zone the resource is hosted practically. The ``availability_zones`` "
-"may not match ``availability_zone_hints``. For example, even if you specify "
-"a zone with ``availability_zone_hints``, all agents of the zone may be dead "
-"before the resource is scheduled. In general, they should match, unless "
-"there are failures or there is no capacity left in the zone requested."
-msgstr ""
-"Atribut ``availability_zones`` tidak memiliki nilai sampai sumber yang "
-"dijadwalkan. Setelah layanan Networking menjadwal sumber daya untuk zona "
-"menurut ``availability_zone_hints``, ``availability_zones`` menunjukkan zona "
-"dimana sumber disimpan (hosted) secara praktis. ``availability_zones`` "
-"mungkin tidak cocok dengan ``availability_zone_hints``. Misalnya, bahkan "
-"jika Anda menentukan zona dengan ``availability_zone_hints``, semua agen "
-"dari zona mungkin akan mati sebelum sumber dijadwalkan. Secara umum, mereka "
-"harus cocok, kecuali ada kegagalan atau tidak ada kapasitas yang tersisa di "
-"zona yang diminta."
-
-msgid ""
-"The ``chain_parameters`` attribute contains one or more parameters for the "
-"port chain. Currently, it only supports a correlation parameter that "
-"defaults to ``mpls`` for consistency with :term:`Open vSwitch` (OVS) "
-"capabilities. Future values for the correlation parameter may include the :"
-"term:`network service header (NSH)`."
-msgstr ""
-"Atribut ``chain_parameters`` berisi satu atau lebih parameter untuk rantai "
-"port. Saat ini, hanya mendukung parameter korelasi yang defaultnya ``mpls`` "
-"untuk konsistensi dengan kemampuan (OVS) :term:`Open vSwitch` . Nilai-nilai "
-"masa depan untuk parameter korelasi dapat mencakup :term:`network service "
-"header (NSH)`."
-
-msgid ""
-"The ``default_prefix_length`` defines the subnet size you will get if you do "
-"not specify ``--prefix-length`` when creating a subnet."
-msgstr ""
-"``default_prefix_length`` mendefinisikan ukuran subnet dimana Anda akan "
-"mendapatkan jika Anda tidak menentukan `` --prefix-length`` saat membuat "
-"subnet."
-
-msgid ""
-"The ``dns_assignment`` attribute also shows that the port's ``hostname`` in "
-"the Networking service internal DNS is ``my-vm``."
-msgstr ""
-"Atribut ``dns_assignment`` juga menunjukkan bahwa ``hostname`` milik port "
-"di DNS internal layanan Networking adalah``my-vm``."
-
-msgid ""
-"The ``dns_name`` and ``dns_domain`` attributes of a floating IP must be "
-"specified together on creation. They cannot be assigned to the floating IP "
-"separately."
-msgstr ""
-"Atribut ``dns_name`` dan ``dns_domain``` dari IP mengambang harus ditentukan "
-"bersama-sama pada pembuatan. Mereka tidak dapat ditugaskan ke IP mengambang "
-"secara terpisah."
-
-msgid ""
-"The ``dns_name`` and ``dns_domain`` of a floating IP have precedence, for "
-"purposes of being published in the external DNS service, over the "
-"``dns_name`` of its associated port and the ``dns_domain`` of the port's "
-"network, whether they are specified or not. Only the ``dns_name`` and the "
-"``dns_domain`` of the floating IP are published in the external DNS service."
-msgstr ""
-"`` dns_name`` dan `` dns_domain`` dari IP mengambang berada di awal (have "
-"precedence), untuk tujuan penerbitan dalam layanan DNS eksternal, di atas "
-"``dns_name`` port yang terkait dan ``dns_domain`` dari jaringan port, apakah "
-"mereka dibuat spesifik atau tidak. Hanya ``dns_name`` dan ``dns_domain`` "
-"dari IP mengambang akan diterbitkan dalam layanan DNS eksternal."
-
-msgid ""
-"The ``exclude_devices`` parameter is empty, therefore, all the VFs "
-"associated with eth3 may be configured by the agent. To exclude specific "
-"VFs, add them to the ``exclude_devices`` parameter as follows:"
-msgstr ""
-"Parameter ``exclude_devices`` kosong, oleh karena itu, semua VF terkait "
-"dengan eth3 dapat dikonfigurasi oleh agen. Untuk mengeluarkan VF tertentu, "
-"tambahkan VF ke parameter ``exclude_devices`` sebagai berikut:"
-
-msgid "The ``external_network_bridge`` option intentionally contains no value."
-msgstr "Opsi ``external_network_bridge`` sengaja tidak mengandung nilai."
-
-msgid ""
-"The ``force_metadata`` option forces the DHCP agent to provide a host route "
-"to the metadata service on ``169.254.169.254`` regardless of whether the "
-"subnet contains an interface on a router, thus maintaining similar and "
-"predictable metadata behavior among subnets."
-msgstr ""
-"Opsi ``force_metadata`` memaksa agen DHCP untuk memberikan rute host ke "
-"layanan metadata pada ``169.254.169.254`` terlepas dari apakah subnet berisi "
-"sebuah antarmuka pada router, dengan demikian mempertahankan perilaku "
-"metadata yang sama dan dapat diprediksi antar subnet."
-
-msgid ""
-"The ``ipv6_address_mode`` attribute is used to control how addressing is "
-"handled by OpenStack. There are a number of different ways that guest "
-"instances can obtain an IPv6 address, and this attribute exposes these "
-"choices to users of the Networking API."
-msgstr ""
-"Atribut ``ipv6_address_mode`` digunakan untuk mengontrol bagaimana "
-"pengalamatan ditangani oleh OpenStack. Ada sejumlah cara yang berbeda dimana "
-"guest instance dapat memperoleh alamat IPv6, dan atribut ini membuka "
-"(expose) pilihan ini untuk pengguna dari API Networking."
-
-msgid ""
-"The ``ipv6_ra_mode`` attribute is used to control router advertisements for "
-"a subnet."
-msgstr ""
-"Atribut ``ipv6_ra_mode`` digunakan untuk mengontrol penyiaran update dan "
-"perubahan router untuk subnet."
-
-msgid ""
-"The ``physical_device_mappings`` parameter is not limited to be a 1-1 "
-"mapping between physical networks and NICs. This enables you to map the same "
-"physical network to more than one NIC. For example, if ``physnet2`` is "
-"connected to ``eth3`` and ``eth4``, then ``physnet2:eth3,physnet2:eth4`` is "
-"a valid option."
-msgstr ""
-"Parameter ``physical_device_mappings`` tidak terbatas menjadi pemetaan 1-1 "
-"antara jaringan fisik dan NIC. Hal ini memungkinkan Anda untuk memetakan "
-"jaringan fisik yang sama untuk lebih dari satu NIC. Misalnya, jika "
-"``physnet2`` terhubung ke ``eth3`` dan ``eth4``, maka ``physnet2: eth3, "
-"physnet2: eth4`` adalah pilihan yang valid."
-
-msgid ""
-"The ``provider`` value in the ``network_vlan_ranges`` option lacks VLAN ID "
-"ranges to support use of arbitrary VLAN IDs."
-msgstr ""
-"Nilai ``provider`` di opsi ``network_vlan_ranges`` tidak memiliki berkisar "
-"VLAN ID untuk mendukung penggunaan ID VLAN bebas."
-
-msgid ""
-"The ``segmentation-type`` and ``segmentation-id`` parameters are optional in "
-"the Networking API. However, all drivers as of the Newton release require "
-"both to be provided when adding a subport to a trunk. Future drivers may be "
-"implemented without this requirement."
-msgstr ""
-"Parameter ``segmentation-type`` dan ``segmentation-id`` adalah opsional "
-"dalam Networking API. Namun, semua driver pada rilis Newton mengharuskan "
-"kedua parameter yang akan diberikan ketika menambahkan subport ke trunk. "
-"Driver yang akan datang dapat dilaksanakan tanpa persyaratan ini."
-
-msgid ""
-"The ``segmentation-type`` and ``segmentation-id`` specified by the user on "
-"the subports is intentionally decoupled from the ``segmentation-type`` and "
-"ID of the networks. For example, it is possible to configure the Networking "
-"service with ``tenant_network_types = vxlan`` and still create subports with "
-"``segmentation_type = vlan``. The Networking service performs remapping as "
-"necessary."
-msgstr ""
-"``segmentasi-type`` dan ``segmentasi-id`` ditentukan oleh pengguna pada "
-"subports yang sengaja dipisahkan dari ``segmentasi-type`` dan ID jaringan. "
-"Sebagai contoh, adalah mungkin untuk mengkonfigurasi layanan Networking "
-"dengan ``tenant_network_types = vxlan`` dan masih membuat subports dengan "
-"``segmentation_type = vlan``. Layanan Networking melakukan mapping ulang "
-"seperlunya."
-
-msgid ""
-"The ``service_function_parameters`` attribute includes one or more "
-"parameters for the service function. Currently, it only supports a "
-"correlation parameter that determines association of a packet with a chain. "
-"This parameter defaults to ``none`` for legacy service functions that lack "
-"support for correlation such as the NSH. If set to ``none``, the data plane "
-"implementation must provide service function proxy functionality."
-msgstr ""
-"Atribut ``service_function_parameters`` mencakup satu atau lebih parameter "
-"untuk fungsi pelayanan. Saat ini, atribut itu hanya mendukung parameter "
-"korelasi yang menentukan asosiasi paket dengan rantai. Parameter ini default "
-"ke ``none`` untuk fungsi layanan legacy yang tidak memiliki dukungan untuk "
-"korelasi seperti NSH tersebut. Jika diatur ke ``none``, pelaksanaan data "
-"plane harus secara funcional menyediakan fungsi pelayanan proxy."
-
-msgid ""
-"The ``share`` option allows any project to use this network. To limit access "
-"to provider networks, see :ref:`config-rbac`."
-msgstr ""
-"Opsi ``share`` memungkinkan proyek untuk menggunakan jaringan ini. Untuk "
-"membatasi akses ke jaringan provider, lihat :ref: `config-rbac`."
-
-msgid ""
-"The ``tags``, ``tags-any``, ``not-tags``, and ``not-tags-any`` arguments can "
-"be combined to build more complex queries. Example::"
-msgstr ""
-"Argumen ``tags``, ``tags-any``, ``not-tags``, dan ``not-tags-any`` dapat "
-"dikombinasikan untuk membangun query yang lebih kompleks. Contoh::"
-
-msgid ""
-"The ``target-project`` parameter specifies the project that requires access "
-"to the QoS policy. The ``action`` parameter specifies what the project is "
-"allowed to do. The ``type`` parameter says that the target object is a QoS "
-"policy. The final parameter is the ID of the QoS policy we are granting "
-"access to."
-msgstr ""
-"Parameter ``target-project`` menentukan proyek yang membutuhkan akses ke "
-"kebijakan QoS. Parameter ``action`` menentukan proyek apa yang diperbolehkan "
-"untuk dilakukan. Parameter ``type`` mengatakan bahwa objek target adalah "
-"kebijakan QoS. Parameter terakhir adalah ID kebijakan QoS dimana kita "
-"memberikan akses ke."
-
-msgid ""
-"The ``target-project`` parameter specifies the project that requires access "
-"to the network. The ``action`` parameter specifies what the project is "
-"allowed to do. The ``type`` parameter indicates that the target object is a "
-"network. The final parameter is the ID of the network we are granting "
-"external access to."
-msgstr ""
-"Parameter ``target-project`` menentukan proyek yang membutuhkan akses ke "
-"jaringan. Parameter ``action`` menentukan proyek apa yang diperbolehkan "
-"untuk dilakukan. Parameter ``type`` menunjukkan bahwa objek target adalah "
-"jaringan. Parameter terakhir adalah ID jaringan dimana kami memberikan akses "
-"eksternalnya."
-
-msgid ""
-"The ``target-project`` parameter specifies the project that requires access "
-"to the network. The ``action`` parameter specifies what the project is "
-"allowed to do. The ``type`` parameter says that the target object is a "
-"network. The final parameter is the ID of the network we are granting access "
-"to."
-msgstr ""
-"Parameter ``target-project``` menentukan proyek yang membutuhkan akses ke "
-"jaringan. Parameter ``action`` menentukan proyek apa yang diperbolehkan "
-"untuk dilakukan. Parameter ``type`` mengatakan bahwa objek target adalah "
-"jaringan. Parameter terakhir adalah ID jaringan dimana kita memberikan akses "
-"ke."
-
-msgid ""
-"The ``tenant_network_types`` option contains no value because the "
-"architecture does not support self-service networks."
-msgstr ""
-"Opsi ``tenant_network_types`` tidak mengandung nilai karena arsitektur tidak "
-"mendukung jaringan self-service."
-
-msgid ""
-"The `branches`, `current`, and `history` commands all accept a ``--verbose`` "
-"option, which, when passed, will instruct :command:`neutron-db-manage` to "
-"display more verbose output for the specified command:"
-msgstr ""
-"Perintah `branches`,`current`, dan `history` semuanya menerima opsi ``--"
-"verbose``, ketika melewatinya, dan perintah ini akan menginstruksikan :"
-"command:`neutron-db-manage` untuk menampilkan output yang lebih banyak kata "
-"(verbose) untuk perintah tertentu:"
-
-msgid ""
-"The ability to control port security and QoS rate limit settings was added "
-"in Liberty."
-msgstr ""
-"Kemampuan untuk mengontrol pengaturan batas keamanan port dan tingkat QoS "
-"ditambahkan di Liberty."
-
-msgid ""
-"The above example returns any networks that have the \"red\" and \"blue\" "
-"tags, plus at least one of \"green\" and \"orange\"."
-msgstr ""
-"Contoh di atas mengembalikan setiap jaringan yang memiliki tag \"red\" dan "
-"\"blue\", ditambah setidaknya satu dari \"green\" dan \"orange\"."
-
-msgid ""
-"The above is all you need in this scenario, but more information on "
-"installing, configuring, and running Dibbler is available in the Dibbler "
-"user guide, at `Dibbler – a portable DHCPv6 `_."
-msgstr ""
-"Hal di atas adalah semua hal yang Anda butuhkan untuk skenario ini, tetapi "
-"informasi lebih lanjut tentang instalasi, konfigurasi, dan mejalankan "
-"Dibbler tersedia di buku petunjuk Dibbler, di `Dibbler – a portable DHCPv6 "
-"`_."
-
-msgid ""
-"The administrator can configure some optional configuration options. For "
-"more details, see the related section in the `Configuration Reference "
-"`__."
-msgstr ""
-"Administrator dapat mengkonfigurasi beberapa opsi konfigurasi opsional. "
-"Untuk lebih jelasnya, lihat bagian terkait di `Configuration Reference "
-"`__."
-
-msgid ""
-"The administrator can configure the VXLAN multicast group that should be "
-"used."
-msgstr ""
-"Administrator dapat mengkonfigurasi grup multicast VXLAN yang harus "
-"digunakan."
-
-msgid ""
-"The administrator needs to configure a list of physical network names that "
-"can be used for provider networks. For more details, see the related section "
-"in the `Configuration Reference `__."
-msgstr ""
-"Administrator perlu mengkonfigurasi daftar nama jaringan fisik yang dapat "
-"digunakan untuk jaringan provider. Untuk lebih jelasnya, lihat bagian "
-"terkait di `Configuration Reference `__."
-
-msgid ""
-"The administrator needs to configure a list of physical network names that "
-"can be used for provider networks. For more details, see the related section "
-"in the `Configuration Reference `__."
-msgstr ""
-"Administrator perlu mengkonfigurasi daftar nama jaringan fisik yang dapat "
-"digunakan untuk jaringan provider. Untuk lebih jelasnya, lihat bagian "
-"terkait di `Configuration Reference `__."
-
-msgid ""
-"The administrator needs to configure the range of VLAN IDs that can be used "
-"for project network allocation. For more details, see the related section in "
-"the `Configuration Reference `__."
-msgstr ""
-"Administrator perlu untuk mengkonfigurasi berbagai ID VLAN yang dapat "
-"digunakan untuk alokasi jaringan proyek. Untuk lebih jelasnya, lihat bagian "
-"terkait di `Configuration Reference `__."
-
-msgid ""
-"The administrator needs to configure the range of VXLAN IDs that can be used "
-"for project network allocation. For more details, see the related section in "
-"the `Configuration Reference `__."
-msgstr ""
-"Administrator perlu untuk mengkonfigurasi berbagai ID VXLAN yang dapat "
-"digunakan untuk alokasi jaringan proyek. Untuk lebih jelasnya, lihat bagian "
-"terkait di `Configuration Reference `__."
-
-msgid ""
-"The administrator needs to configure the range of tunnel IDs that can be "
-"used for project network allocation. For more details, see the related "
-"section in the `Configuration Reference `__."
-msgstr ""
-"Administrator perlu untuk mengkonfigurasi berbagai ID terowongan (tunnel ID) "
-"yang dapat digunakan untuk alokasi jaringan proyek. Untuk lebih jelasnya, "
-"lihat bagian terkait di `Configuration Reference `__."
-
-msgid ""
-"The administrator needs to define a list PCI hardware that shall be used by "
-"OpenStack. For more details, see the related section in the `Configuration "
-"Reference `__."
-msgstr ""
-"Administrator perlu mendefinisikan daftar hardware PCI yang harus digunakan "
-"oleh OpenStack. Untuk lebih jelasnya, lihat bagian terkait di `Configuration "
-"Reference `__."
-
-msgid "The agent currently only supports the Ryu BGP driver."
-msgstr "Agen saat ini hanya mendukung driver Ryu BGP."
-
-msgid "The attributes can also be left unset."
-msgstr "Atribut juga dapat dibiarkan tanpa disetel."
-
-msgid ""
-"The auto-allocated topology for a user never changes. In practice, when a "
-"user boots a server omitting the ``--nic`` option, and there is more than "
-"one network available, the Compute service will invoke the API behind ``auto "
-"allocated topology create``, fetch the network UUID, and pass it on during "
-"the boot process."
-msgstr ""
-"Topologi auto-allocated untuk pengguna tidak pernah berubah. Dalam "
-"praktiknya, ketika pengguna mem-boot sebuah server yang menghilangkan opsi "
-"``--nic``, dan ada lebih dari satu jaringan yang tersedia, layanan Compute "
-"akan memanggil API di belakang ``auto allocated topology create``, mengambil "
-"jaringan UUID , Dan menyebarkannya selama proses boot."
-
-msgid ""
-"The auto-allocation feature creates one network topology in every project "
-"where it is used. The auto-allocated network topology for a project contains "
-"the following resources:"
-msgstr ""
-"Fitur auto-alokasi menciptakan satu topologi jaringan di setiap proyek "
-"dimana ia digunakan. Topologi jaringan auto-allocated untuk proyek berisi "
-"sumber daya berikut:"
-
-msgid ""
-"The auto-allocation feature introduced in Mitaka simplifies the procedure of "
-"setting up an external connectivity for end-users, and is also known as "
-"**Get Me A Network**."
-msgstr ""
-"Fitur auto-alokasi yang diperkenalkan di Mitaka menyederhanakan prosedur "
-"pemasangan sebuah konektivitas eksternal untuk end-user, dan juga dikenal "
-"sebagai **Get Me A Network**."
-
-msgid ""
-"The auto-allocation feature requires at least one default subnetpool. One "
-"for IPv4, or one for IPv6, or one of each."
-msgstr ""
-"Fitur auto-alokasi membutuhkan setidaknya satu subnetpool default. Satu "
-"untuk IPv4, atau satu untuk IPv6, atau masing-masing satu."
-
-msgid ""
-"The basic deployment model consists of one controller node, two or more "
-"network nodes, and multiple computes nodes."
-msgstr ""
-"Model pengerahan dasar terdiri dari satu controller node, dua atau lebih "
-"node jaringan, dan beberapa node komputasi."
-
-msgid "The basics"
-msgstr "Dasar"
-
-msgid ""
-"The central concepts with OpenStack firewalls are the notions of a firewall "
-"policy and a firewall rule. A policy is an ordered collection of rules. A "
-"rule specifies a collection of attributes (such as port ranges, protocol, "
-"and IP addresses) that constitute match criteria and an action to take "
-"(allow or deny) on matched traffic. A policy can be made public, so it can "
-"be shared across projects."
-msgstr ""
-"Konsep pusat dengan firewall OpenStack adalah pengertian dari kebijakan "
-"firewall dan aturan firewall. Kebijakan adalah koleksi aturan yang "
-"diperintahkan. Aturan menentukan koleksi atribut (seperti rentang port, "
-"protokol, dan alamat IP) yang merupakan kriteria yang cocok dan suatu "
-"tindakan untuk melakukan (mengizinkan atau menolak) pada lalu lintas yang "
-"cocok. Sebuah kebijakan dapat dibuat publik, sehingga bisa dibagi di seluruh "
-"proyek."
-
-msgid ""
-"The client sends a discover (\"I’m a client at MAC address ``08:00:27:"
-"b9:88:74``, I need an IP address\")"
-msgstr ""
-"Klien mengirimkan pilihan (\"I’m a client at MAC address ``08:00:27:"
-"b9:88:74``, I need an IP address\")"
-
-msgid ""
-"The cloud consumer can decide via the neutron APIs VNIC_TYPE attribute, if "
-"an instance gets a normal OVS port or an SRIOV port."
-msgstr ""
-"Konsumen cloud dapat memutuskan melalui atribut API VNIC_TYPE neutron, jika "
-"sebuah instance mendapat port OVS normal atau port SRIOV."
-
-msgid "The command also indicates if a project lacks network resources."
-msgstr ""
-"Perintah ini juga menunjukkan jika proyek tidak memiliki sumber daya "
-"jaringan."
-
-msgid "The command provides no output."
-msgstr "Perintah ini tidak memberikan keluaran (output)."
-
-msgid ""
-"The command provides output that includes a completion percentage and the "
-"quantity of successful or unsuccessful network resource deletions. An "
-"unsuccessful deletion usually indicates sharing of a resource with one or "
-"more additional projects."
-msgstr ""
-"Perintah ini menyediakan output yang mencakup persentase penyelesaian dan "
-"kuantitas penghapusan sumber daya jaringan sukses atau gagal. Sebuah "
-"penghapusan berhasil biasanya menunjukkan berbagi sumber daya dengan satu "
-"atau lebih proyek tambahan."
-
-msgid ""
-"The configuration supports multiple VXLAN self-service networks. For "
-"simplicity, the following procedure creates one self-service network and a "
-"router with a gateway on the flat provider network. The router uses NAT for "
-"IPv4 network traffic and directly routes IPv6 network traffic."
-msgstr ""
-"Konfigurasi mendukung beberapa jaringan self-service VXLAN. Untuk "
-"mempermudah, prosedur berikut menciptakan satu jaringan self-service dan "
-"router dengan gateway di jaringan provider datar (flat). Router menggunakan "
-"NAT untuk lalu lintas jaringan IPv4 dan langsung me-rute lalu lintas "
-"jaringan IPv6."
-
-msgid ""
-"The configuration supports one flat or multiple VLAN provider networks. For "
-"simplicity, the following procedure creates one flat provider network."
-msgstr ""
-"Konfigurasi ini mendukung satu jaringan datar atau beberapa jaringan "
-"penyedia VLAN. Untuk mempermudah, prosedur berikut menciptakan satu jaringan "
-"operator datar."
-
-msgid ""
-"The coordination between the Networking service and the Compute scheduler "
-"requires the following minimum API micro-versions."
-msgstr ""
-"Koordinasi antara layanan Networking dan Compute schedule memerlukan mikro-"
-"versi API minimum berikut."
-
-msgid ""
-"The core plug-in must support the ``availability_zone`` extension. The core "
-"plug-in also must support the ``network_availability_zone`` extension to "
-"schedule a network according to availability zones. The ``Ml2Plugin`` "
-"supports it. The router service plug-in must support the "
-"``router_availability_zone`` extension to schedule a router according to the "
-"availability zones. The ``L3RouterPlugin`` supports it."
-msgstr ""
-"Plug-in inti harus mendukung ekstensi ``availability_zone``. Plug-in inti "
-"juga harus mendukung ekstensi ``network_availability_zone`` untuk "
-"menjadwalkan jaringan sesuai dengan zona ketersediaan. ``Ml2Plugin`` "
-"mendukungnya. Layanan router plug-in harus mendukung ekstensi "
-"``router_availability_zone`` untuk menjadwal router sesuai dengan zona "
-"ketersediaan. ``L3RouterPlugin`` mendukungnya."
-
-msgid ""
-"The current process as designed is a minimally viable migration with the "
-"goal of deprecating and then removing legacy networking. Both the Compute "
-"and Networking teams agree that a one-button migration process from legacy "
-"networking to OpenStack Networking (neutron) is not an essential requirement "
-"for the deprecation and removal of the legacy networking at a future date. "
-"This section includes a process and tools which are designed to solve a "
-"simple use case migration."
-msgstr ""
-"Proses saat seperti yang dirancang adalah migrasi minimal yang layak dengan "
-"tujuan mencela (deprecating) dan kemudian menghapus (removing) jaringan "
-"legacy. Tim Compute maupun Networking setuju bahwa proses migrasi one-button "
-"dari warisan jaringan untuk OpenStack Networking (neutron) bukan merupakan "
-"persyaratan penting untuk depresiasi dan penghapusan warisan jaringan di "
-"masa mendatang. Bagian ini mencakup proses dan alat-alat yang dirancang "
-"untuk memecahkan migrasi use case migration yang sederhana."
-
-msgid "The currently supported resources are:"
-msgstr "Sumber daya saat ini didukung adalah:"
-
-msgid ""
-"The database management command-line tool is called :command:`neutron-db-"
-"manage`. Pass the ``--help`` option to the tool for usage information."
-msgstr ""
-"Alat command-line manajemen database disebut :command:`neutron-db-manage`. "
-"Melewatkan opsi ``--help`` ke alat itu untuk informasi penggunaan."
-
-msgid ""
-"The default ``policy.json`` file will not allow regular users to share "
-"objects with every other project using a wildcard; however, it will allow "
-"them to share objects with specific project IDs."
-msgstr ""
-"File ``policy.json`` default tidak akan mengizinkan pengguna biasa untuk "
-"berbagi objek dengan proyek lainnya menggunakan wildcard; Namun, hal itu "
-"akan mengizinkan mereka untuk berbagi objek dengan ID proyek tertentu."
-
-msgid "The deployment examples refer one or more of the following nodes:"
-msgstr "Contoh pengerahan merujuk satu atau lebih node berikut:"
-
-msgid ""
-"The deployment examples refer to one or more of the following networks and "
-"network interfaces:"
-msgstr ""
-"Contoh penyebaran mengacu pada satu atau lebih jaringan berikut dan "
-"antarmuka jaringan:"
-
-msgid ""
-"The distributed router namespace routes the packet to self-service network 2."
-msgstr "Distributed router namespace me-rute paket ke self-service network 2."
-
-msgid ""
-"The driver interface is designed to allow separate drivers for each subnet "
-"pool. However, the current implementation allows only a single IPAM driver "
-"system-wide."
-msgstr ""
-"Interface driver dirancang untuk memungkinkan driver yang terpisah untuk "
-"setiap kolam subnet. Namun, pelaksanaan saat ini memungkinkan hanya system-"
-"wide driver IPAM tunggal. "
-
-msgid ""
-"The enablement of this functionality is prerequisite for the enablement of "
-"the Networking service integration with an external DNS service, which is "
-"described in detail in :ref:`config-dns-int-ext-serv`."
-msgstr ""
-"Pemberdayaan fungsi ini merupakan prasyarat untuk pemberdayaan dari "
-"integrasi layanan Networking dengan layanan DNS eksternal, yang dijelaskan "
-"secara rinci dalam :ref: `config-dns-int-ext-serv`."
-
-msgid ""
-"The example configuration assumes sufficient knowledge about the Networking "
-"service, routing, and BGP. For basic deployment of the Networking service, "
-"consult one of the :ref:`deploy`. For more information on BGP, see `RFC 4271 "
-"`_."
-msgstr ""
-"Contoh konfigurasi mengasumsikan pengetahuan yang cukup tentang layanan "
-"Networking, routing, dan BGP. Untuk pengerahan dasar layanan Networking, "
-"konsultasikan salah satu :ref: `deploy`. Untuk informasi lebih lanjut "
-"tentang BGP, lihat `RFC 4271 `_."
-
-msgid "The example configuration involves the following components:"
-msgstr "Contoh konfigurasi melibatkan komponen-komponen berikut:"
-
-msgid "The example network ``net1`` must exist before creating ports on it."
-msgstr "The example network ``net1`` must exist before creating ports on it."
-
-msgid "The examples assume the OpenStack DNS service as the external DNS."
-msgstr "Contoh ini menganggap layanan DNS OpenStack sebagai DNS eksternal."
-
-msgid "The external and self-service network reside in the same address scope."
-msgstr ""
-"Jaringan eksternal dan self-service berada dalam lingkup alamat yang sama."
-
-msgid "The external network (12) receives the packet."
-msgstr "Jaringan eksternal (12) menerima paket."
-
-msgid "The external network (16) receives the packet."
-msgstr "External network (16) menerima paket."
-
-msgid ""
-"The firewall remains in PENDING\\_CREATE state until you create a Networking "
-"router and attach an interface to it."
-msgstr ""
-"firewall tetap dalam keadaan PENDING \\_CREATE sampai Anda membuat router "
-"Networking dan menghubungkan sebuah antarmuka untuk itu."
-
-msgid ""
-"The first branch is called expand and is used to store expansion-only "
-"migration rules. These rules are strictly additive and can be applied while "
-"the Neutron server is running."
-msgstr ""
-"Cabang pertama disebut memperluas dan digunakan untuk menyimpan aturan "
-"migrasi expansion-only. Aturan-aturan ini secara ketat aditif dan dapat "
-"diterapkan ketika server Neutron berjalan."
-
-msgid ""
-"The first step to configure the integration with an external DNS service is "
-"to enable the functionality described in :ref:`config-dns-int-dns-"
-"resolution`. Once this is done, the user has to take the following steps and "
-"restart ``neutron-server``."
-msgstr ""
-"Langkah pertama untuk mengkonfigurasi integrasi dengan layanan DNS eksternal "
-"adalah mengaktifkan fungsi yang dijelaskan dalam :ref: `config-dns-int-dns-"
-"resolution`. Setelah ini dilakukan, pengguna harus mengambil langkah berikut "
-"dan restart ``neutron-server``."
-
-msgid ""
-"The first time host *A* attempts to communicate with host *B*, the "
-"destination MAC address is not known. Host *A* makes an ARP request to the "
-"local network. The request is a broadcast with a message like this:"
-msgstr ""
-"Waktu pertama host *A* mencoba untuk berkomunikasi dengan host *B*, alamat "
-"MAC tujuan tidak diketahui. Host *A* membuat permintaan ARP ke jaringan "
-"lokal. Permintaan ini adalah broadcast (penyiaran) dengan pesan seperti ini:"
-
-msgid ""
-"The first value in the ``tenant_network_types`` option becomes the default "
-"project network type when a regular user creates a network."
-msgstr ""
-"Nilai pertama dalam opsi ``tenant_network_types`` menjadi tipe jaringan "
-"proyek default ketika user biasa membuat sebuah jaringan."
-
-msgid ""
-"The flag ``--default`` (and ``--no-default`` flag) is only effective with "
-"external networks and has no effects on regular (or internal) networks."
-msgstr ""
-"Flag ``--default`` (and ``--no-default`` flag) hanya efektif dengan jaringan "
-"eksternal dan tidak memiliki efek pada jaringan reguler (atau internal)."
-
-msgid ""
-"The floating IP agent gateways (one per compute node) reside on "
-"203.0.113.12, 203.0.113.13, and 203.0.113.14."
-msgstr ""
-"Agen gateway IP mengambang (satu per node komputasi) berada pada "
-"203.0.113.12, 203.0.113.13, dan 203.0.113.14."
-
-msgid ""
-"The floating IP namespace routes the packet (8) to the distributed router "
-"namespace (9) using a pair of IP addresses on the DVR internal network. This "
-"namespace contains the instance floating IPv4 address."
-msgstr ""
-"Namespace IP mengambang me-rute paket (8) ke distributed router namespace "
-"(9) menggunakan sepasang alamat IP pada jaringan internal DVR. Namespace "
-"ini berisi alamat IPv4 mengambang instance."
-
-msgid "The following attributes are added into network and router:"
-msgstr "Atribut berikut ditambahkan ke dalam jaringan dan router:"
-
-msgid ""
-"The following deployment examples provide building blocks of increasing "
-"architectural complexity using the Networking service reference architecture "
-"which implements the Modular Layer 2 (ML2) plug-in and either the Open "
-"vSwitch (OVS) or Linux bridge mechanism drivers. Both mechanism drivers "
-"support the same basic features such as provider networks, self-service "
-"networks, and routers. However, more complex features often require a "
-"particular mechanism driver. Thus, you should consider the requirements (or "
-"goals) of your cloud before choosing a mechanism driver."
-msgstr ""
-"Contoh pengerahan berikut menyediakan blok bangunan seiring dengan "
-"peningkatan kompleksitas arsitektur menggunakan arsitektur referensi layanan "
-"Networking yang mengimplementasikan Modular Layer 2 (ML2) plug-in dan Open "
-"vSwitch (OVS) ataupun driver mekanisme Linux bridge. Kedua driver mekanisme "
-"mendukung fitur dasar yang sama seperti jaringan provider, jaringan self-"
-"service, dan router. Namun, fitur yang lebih kompleks sering membutuhkan "
-"driver mekanisme tertentu. Dengan demikian, Anda harus mempertimbangkan "
-"persyaratan (atau tujuan) dari cloud Anda sebelum pemilihan driver mekanisme."
-
-msgid ""
-"The following diagram depicts FWaaS v1 protection. It illustrates the flow "
-"of ingress and egress traffic for the VM2 instance:"
-msgstr ""
-"Diagram berikut menggambarkan perlindungan FWaaS v1. Ini menggambarkan "
-"aliran masuknya (ingress) dan lalu lintas jalan keluar (egress) untuk "
-"instance VM2:"
-
-msgid ""
-"The following example outlines how you can configure service subnets in a "
-"DVR-enabled deployment, with the goal of minimizing public IP address "
-"consumption. This example uses three subnets on the same external network:"
-msgstr ""
-"Contoh berikut menguraikan bagaimana Anda dapat mengkonfigurasi subnet "
-"layanan dalam pengerahan DVR-enabled, dengan tujuan meminimalkan konsumsi "
-"alamat IP publik. Contoh ini menggunakan tiga subnet pada jaringan eksternal "
-"yang sama:"
-
-msgid ""
-"The following figure describes virtual connectivity among components for two "
-"tagged (VLAN) networks. Essentially, all networks use a single OVS "
-"integration bridge with different internal VLAN tags. The internal VLAN tags "
-"almost always differ from the network VLAN assignment in the Networking "
-"service. Similar to the untagged network case, the DHCP agent may reside on "
-"a different compute node."
-msgstr ""
-"Gambar berikut menjelaskan konektivitas virtual antara komponen untuk dua "
-"jaringan tag (VLAN). Pada dasarnya, semua jaringan menggunakan jembatan OVS "
-"integrasi tunggal dengan tag VLAN internal yang berbeda. Tag VLAN internal "
-"yang hampir selalu berbeda dari tugas VLAN jaringan dalam layanan "
-"Networking. Mirip dengan kasus jaringan untagged, agen DHCP dapat berada "
-"pada node komputasi yang berbeda."
-
-msgid ""
-"The following figure describes virtual connectivity among components for two "
-"tagged (VLAN) networks. Essentially, each network uses a separate bridge "
-"that contains a port on the VLAN sub-interface on the provider physical "
-"network interface. Similar to the single untagged network case, the DHCP "
-"agent may reside on a different compute node."
-msgstr ""
-"Gambar berikut menjelaskan konektivitas virtual antar komponen untuk dua "
-"jaringan tag (VLAN). Pada dasarnya, setiap jaringan menggunakan jembatan "
-"terpisah yang berisi port pada sub-interface VLAN pada antarmuka jaringan "
-"fisik provider. Mirip dengan kasus jaringan tunggal untagged, agen DHCP "
-"dapat berada pada node komputasi yang berbeda."
-
-msgid ""
-"The following figure shows components and connectivity for one self-service "
-"network and one untagged (flat) network. In this particular case, the "
-"instance resides on the same compute node as the DHCP agent for the network. "
-"If the DHCP agent resides on another compute node, the latter only contains "
-"a DHCP namespace with a port on the OVS integration bridge."
-msgstr ""
-"Gambar berikut menunjukkan komponen dan konektivitas untuk satu jaringan "
-"self-service dan satu jaringan tanpa tanda (flat). Dalam kasus ini, "
-"instance berada pada node komputasi yang sama sebagai agen DHCP untuk "
-"jaringan. Jika agen DHCP berada pada node komputasi lain, yang terakhir "
-"hanya berisi namespace DHCP dengan port pada jembatan integrasi OVS."
-
-msgid ""
-"The following figure shows components and connectivity for one self-service "
-"network and one untagged (flat) network. The master router resides on "
-"network node 1. In this particular case, the instance resides on the same "
-"compute node as the DHCP agent for the network. If the DHCP agent resides on "
-"another compute node, the latter only contains a DHCP namespace and Linux "
-"bridge with a port on the overlay physical network interface."
-msgstr ""
-"Gambar berikut menunjukkan komponen dan konektivitas untuk satu jaringan "
-"self-service dan satu jaringan untagged (flat). Router master berada pada "
-"node jaringan 1. Dalam kasus ini, instance berada pada node komputasi yang "
-"sama sebagai agen DHCP untuk jaringan. Jika agen DHCP berada pada node "
-"komputasi lain, yang terakhir hanya berisi namespace DHCP dan Linux bridge "
-"dengan port pada interface jaringan fisik overlay."
-
-msgid ""
-"The following figure shows components and connectivity for one self-service "
-"network and one untagged (flat) provider network. In this particular case, "
-"the instance resides on the same compute node as the DHCP agent for the "
-"network. If the DHCP agent resides on another compute node, the latter only "
-"contains a DHCP namespace and Linux bridge with a port on the overlay "
-"physical network interface."
-msgstr ""
-"Gambar berikut menunjukkan komponen dan konektivitas untuk satu jaringan "
-"self-service dan satu jaringan operator untagged (flat). Dalam kasus ini, "
-"instance berada pada node komputasi yang sama sebagai agen DHCP untuk "
-"jaringan. Jika agen DHCP berada pada node komputasi lain, yang terakhir "
-"hanya berisi namespace DHCP dan jembatan Linux dengan port pada interface "
-"jaringan fisik overlay."
-
-msgid ""
-"The following figure shows components and connectivity for one self-service "
-"network and one untagged (flat) provider network. In this particular case, "
-"the instance resides on the same compute node as the DHCP agent for the "
-"network. If the DHCP agent resides on another compute node, the latter only "
-"contains a DHCP namespace and with a port on the OVS integration bridge."
-msgstr ""
-"Gambar berikut menunjukkan komponen dan konektivitas untuk satu jaringan "
-"self-service dan satu jaringan provider tanpa tanda (flat). Dalam kasus ini, "
-"instance berada pada node komputasi yang sama sebagai agen DHCP untuk "
-"jaringan. Jika agen DHCP berada pada node komputasi lain, yang terakhir "
-"hanya berisi namespace DHCP dan dengan port di jembatan integrasi OVS."
-
-msgid ""
-"The following figure shows components and connectivity for one untagged "
-"(flat) network. In this particular case, the instance resides on the same "
-"compute node as the DHCP agent for the network. If the DHCP agent resides on "
-"another compute node, the latter only contains a DHCP namespace and Linux "
-"bridge with a port on the provider physical network interface."
-msgstr ""
-"Gambar berikut menunjukkan komponen dan konektivitas untuk satu jaringan "
-"untagged (flat). Dalam kasus ini, instance berada pada node komputasi yang "
-"sama sebagai agen DHCP untuk jaringan. Jika agen DHCP berada pada node "
-"komputasi lain, yang terakhir hanya berisi namespace DHCP dan Linux bridge "
-"dengan port pada antarmuka jaringan fisik provider."
-
-msgid ""
-"The following figure shows components and connectivity for one untagged "
-"(flat) network. In this particular case, the instance resides on the same "
-"compute node as the DHCP agent for the network. If the DHCP agent resides on "
-"another compute node, the latter only contains a DHCP namespace with a port "
-"on the OVS integration bridge."
-msgstr ""
-"Gambar berikut menunjukkan komponen dan konektivitas untuk satu jaringan "
-"untagged (flat). Dalam kasus ini, instance berada pada node komputasi yang "
-"sama sebagai agen DHCP untuk jaringan. Jika agen DHCP berada pada node "
-"komputasi lain, yang terakhir hanya berisi namespace DHCP dengan port di "
-"jembatan integrasi OVS."
-
-msgid ""
-"The following illustrates the creation of a port with ``my-port`` in its "
-"``dns_name`` attribute."
-msgstr ""
-"Berikut ini menggambarkan pembuatan port dengan ``my-port`` dalam "
-"attributnya ``dns_name``."
-
-msgid ""
-"The following is an example of an instance creation, showing how its "
-"``hostname`` populates the ``dns_name`` attribute of the allocated port:"
-msgstr ""
-"Berikut ini adalah contoh dari pembuatan instance, yang menunjukkan "
-"bagaimana ``hostname`` akan mengisi atribut ``dns_name`` dari port yang "
-"dialokasikan:"
-
-msgid "The following is an example:"
-msgstr "Berikut ini adalah contoh:"
-
-msgid ""
-"The following link provides a great step by step tutorial on setting up IPv6 "
-"with OpenStack: `Tenant IPV6 deployment in OpenStack Kilo release `_."
-msgstr ""
-"Link berikut memberikan langkah besar tutorial langkah demi langkah tentang "
-"pengaturan IPv6 dengan OpenStack: `Tenant IPV6 deployment in OpenStack Kilo "
-"release `_."
-
-msgid "The following manufacturers are known to work:"
-msgstr "Produsen berikut diketahui bekerja:"
-
-msgid ""
-"The following sections describe the flow of network traffic in several "
-"common scenarios. *North-south* network traffic travels between an instance "
-"and external network such as the Internet. *East-west* network traffic "
-"travels between instances on the same or different networks. In all "
-"scenarios, the physical network infrastructure handles switching and routing "
-"among provider networks and external networks such as the Internet. Each "
-"case references one or more of the following components:"
-msgstr ""
-"Bagian berikut menjelaskan arus lalu lintas jaringan di beberapa skenario "
-"umum. Lalu lintas jaringan *north-south* berjalan antara instance dan "
-"jaringan eksternal seperti Internet. Lalu lintas jaringan *east-barat * "
-"berjalan antara instance pada jaringan yang sama atau berbeda. Dalam semua "
-"skenario, infrastruktur jaringan fisik menangani switching dan routing "
-"antara jaringan provider dan jaringan eksternal seperti Internet. Setiap "
-"kasus merujuk satu atau lebih dari komponen berikut:"
-
-msgid ""
-"The following shows the dnsmasq process that libvirt manages as it appears "
-"in the output of :command:`ps`::"
-msgstr ""
-"Berikut ini menunjukkan proses dnsmasq dimana libvirt mengelola seperti yang "
-"muncul dalam output :command:`ps`::"
-
-msgid ""
-"The following steps create a routed provider network with two segments. Each "
-"segment contains one IPv4 subnet and one IPv6 subnet."
-msgstr ""
-"Langkah-langkah berikut membuat jaringan penyedia dialihkan dengan dua "
-"segmen. Setiap segmen berisi satu subnet IPv4 dan satu subnet IPv6."
-
-msgid "The following steps involve compute node 1."
-msgstr "Langkah berikut melibatkan komputasi node 1."
-
-msgid "The following steps involve compute node 1:"
-msgstr "Langkah berikut melibatkan komputasi node 1:"
-
-msgid "The following steps involve compute node 2:"
-msgstr "Langkah berikut melibatkan komputasi node 2:"
-
-msgid "The following steps involve the compute node:"
-msgstr "Langkah berikut melibatkan node komputasi:"
-
-msgid "The following steps involve the network node:"
-msgstr "Langkah berikut melibatkan node jaringan:"
-
-msgid "The following steps involve the physical network infrastructure:"
-msgstr "Langkah-langkah berikut melibatkan infrastruktur jaringan fisik:"
-
-msgid "The following table compares v1 and v2 features."
-msgstr "Tabel berikut membandingkan fitur v1 dan v2."
-
-msgid ""
-"The following tables shows which reference implementations support which non-"
-"L2 neutron agents:"
-msgstr ""
-"Tabel berikut menunjukkan dimana reference implementation mendukung yang "
-"agen neutron non-L2:"
-
-msgid "The following terms are used throughout this document:"
-msgstr "Istilah berikut digunakan di seluruh dokumen ini:"
-
-msgid "The following type drivers are available"
-msgstr "Driver tipe berikut tersedia"
-
-msgid ""
-"The following use cases refer to adding tags to networks, but the same can "
-"be applicable to any other supported Networking service resource:"
-msgstr ""
-"Kasus penggunaan (use case) berikut mengacu pada penambahan tag ke jaringan, "
-"tetapi hal yang sama dapat berlaku untuk setiap sumber daya layanan "
-"Networking yang didukung lainnya:"
-
-msgid ""
-"The health of your ``keepalived`` instances can be automatically monitored "
-"via a bash script that verifies connectivity to all available and configured "
-"gateway addresses. In the event that connectivity is lost, the master router "
-"is rescheduled to another node."
-msgstr ""
-"Kesehatan instance ``keepalived`` Anda dapat secara otomatis dipantau "
-"melalui skrip bash yang memverifikasi konektivitas ke semua alamat gerbang "
-"yang dikonfigurasi dan tersedia. Dalam hal konektivitas yang hilang, master "
-"router dijadwal ulang ke node lain."
-
-msgid ""
-"The host containing the BGP agent must have layer-3 connectivity to the "
-"provider router."
-msgstr ""
-"Host yang berisi agen BGP harus memiliki konektivitas lapisan-3 dengan "
-"router provider."
-
-msgid ""
-"The instance 1 interface (1) forwards the packet to the provider bridge "
-"instance port (2) via ``veth`` pair."
-msgstr ""
-"Instance 1 interface (1) meneruskan paket ke provider bridge instance port "
-"(2) melalui pasangan ``veth``."
-
-msgid ""
-"The instance 1 interface (1) forwards the packet to the security group "
-"bridge instance port (2) via ``veth`` pair."
-msgstr ""
-"Instance 1 interface (1) meneruskan paket ke security group bridge instance "
-"port (2) melalui pasangan ``veth``."
-
-msgid ""
-"The instance 1 interface (1) forwards the packet to the self-service bridge "
-"instance port (2) via ``veth`` pair."
-msgstr ""
-"The instance 1 interface (1) meneruskan paket untuk self-service bridge "
-"instance port (2) melalui pasangan ``veth``."
-
-msgid ""
-"The instance interface (1) forwards the packet to the provider bridge "
-"instance port (2) via ``veth`` pair."
-msgstr ""
-"Antarmuka instance (1) meneruskan paket ke provider bridge instance port (2) "
-"melalui pasangan ``veth``."
-
-msgid ""
-"The instance interface (1) forwards the packet to the security group bridge "
-"instance port (2) via ``veth`` pair."
-msgstr ""
-"Instance interface (1) meneruskan paket ke security group bridge instance "
-"port (2) melalui pasangan ``veth``."
-
-msgid ""
-"The instance interface (1) forwards the packet to the self-service bridge "
-"instance port (2) via ``veth`` pair."
-msgstr ""
-"Instance interface (1)) meneruskan paket ke self-service bridge instance "
-"port (2) melalui pasangan ``veth``."
-
-msgid ""
-"The instance of a router on each compute node consumes an IPv4 address on "
-"the provider network on which it contains a gateway."
-msgstr ""
-"Instance router pada setiap node komputasi mengkonsumsi alamat IPv4 pada "
-"jaringan provider yang mengandung gateway."
-
-msgid "The instance resides on compute node 1 and uses provider network 1."
-msgstr ""
-"Instance berada pada komputasi node 1 dan menggunakan jaringan provider 1."
-
-msgid "The instance resides on compute node 1 and uses self-service network 1."
-msgstr ""
-"Instance berada pada menghitung node 1 dan menggunakan jaringan self-"
-"service 1."
-
-msgid "The instance sends a packet to a host on the Internet."
-msgstr "Instance mengirimkan sebuah paket ke host di Internet."
-
-msgid ""
-"The intent of separate branches is to allow invoking those safe migrations "
-"from the expand branch while the Neutron server is running and therefore "
-"reducing downtime needed to upgrade the service."
-msgstr ""
-"Maksud dari cabang terpisah adalah untuk memungkinkan permohonan migrasi "
-"mereka yang aman dari perluasan cabang ketika server Neutron berjalan dan "
-"karena itu mengurangi downtime yang dibutuhkan untuk meng-upgrade layanan."
-
-msgid ""
-"The internal DNS functionality offered by the Networking service and its "
-"interaction with the Compute service."
-msgstr ""
-"Fungsi DNS internal yang ditawarkan oleh layanan Networking dan interaksinya "
-"dengan layanan Compute."
-
-msgid "The load balancer now handles traffic on ports 80 and 443."
-msgstr "Penyeimbang beban sekarang menangani lalu lintas pada port 80 dan 443."
-
-msgid ""
-"The load balancer occupies a neutron network port and has an IP address "
-"assigned from a subnet."
-msgstr ""
-"Penyeimbang beban menempati port jaringan neutron dan memiliki alamat IP "
-"yang ditetapkan dari subnet."
-
-msgid "The master router may reside on network node 2."
-msgstr "Router utama mungkin berada pada network node 2."
-
-msgid ""
-"The mechanism driver is responsible for taking the information established "
-"by the type driver and ensuring that it is properly applied given the "
-"specific networking mechanisms that have been enabled."
-msgstr ""
-"Driver mekanisme bertanggung jawab untuk mengambil informasi yang ditetapkan "
-"oleh driver tipe dan memastikan bahwa itu benar diterapkan mengingat "
-"mekanisme jaringan tertentu yang telah diaktifkan."
-
-msgid ""
-"The migration may be paused, even for an extended period of time (for "
-"example, while testing or investigating issues) with some hypervisors on "
-"legacy networking and some on Networking, and Compute API remains fully "
-"functional. Individual hypervisors may be rolled back to legacy networking "
-"during this stage of the migration, although this requires an additional "
-"restart."
-msgstr ""
-"Migrasi dapat berhenti, bahkan untuk jangka waktu (misalnya, saat uji coba "
-"atau menyelidiki masalah) dengan beberapa hypervisors pada jaringan legacy "
-"dan beberapa di layanan Networking, dan Compute API tetap berfungsi secara "
-"penuh. Hypervisors individu dapat digulung kembali (rolled back) ke jaringan "
-"legacy selama tahap ini migrasi, meskipun ini membutuhkan restart tambahan."
-
-msgid ""
-"The migration process from the legacy nova-network networking service to "
-"OpenStack Networking (neutron) has some limitations and impacts on the "
-"operational state of the cloud. It is critical to understand them in order "
-"to decide whether or not this process is acceptable for your cloud and all "
-"users."
-msgstr ""
-"Proses migrasi dari layanan jaringan nova-network legacy ke OpenStack "
-"Networking (neutron) memiliki beberapa keterbatasan dan dampak pada keadaan "
-"operasional cloud. Hal ini penting untuk memahami mereka untuk memutuskan "
-"apakah proses ini dapat diterima atau tidak bagi cloud dan semua pengguna."
-
-msgid ""
-"The migration process is quite simple, it involves turning down the router "
-"by setting the router's ``admin_state_up`` attribute to ``False``, upgrading "
-"the router to L3 HA and then setting the router's ``admin_state_up`` "
-"attribute back to ``True``."
-msgstr ""
-"Proses migrasi cukup sederhana, proses melibatkan menolak router dengan "
-"menetapkan attribut router ``admin_state_up`` ke ``False``, upgrade router "
-"ke L3 HA dan kemudian menetapkan attribut router ``admin_state_up`` kembali "
-"ke ``True``."
-
-msgid ""
-"The most common application programming interface (API) for writing TCP-"
-"based applications is called *Berkeley sockets*, also known as *BSD sockets* "
-"or, simply, *sockets*. The sockets API exposes a *stream oriented* interface "
-"for writing TCP applications. From the perspective of a programmer, sending "
-"data over a TCP connection is similar to writing a stream of bytes to a "
-"file. It is the responsibility of the operating system's TCP/IP "
-"implementation to break up the stream of data into IP packets. The operating "
-"system is also responsible for automatically retransmitting dropped packets, "
-"and for handling flow control to ensure that transmitted data does not "
-"overrun the sender's data buffers, receiver's data buffers, and network "
-"capacity. Finally, the operating system is responsible for re-assembling the "
-"packets in the correct order into a stream of data on the receiver's side. "
-"Because TCP detects and retransmits lost packets, it is said to be a "
-"*reliable* protocol."
-msgstr ""
-"Application programming interface (API) yang paling umum untuk menulis "
-"aplikasi berbasis TCP disebut *Berkeley soket *, juga dikenal sebagai *BSD "
-"soket * atau, sederhananya, *soket *. Soket API mengekspos antarmuka *stream "
-"oriented* untuk menulis aplikasi TCP. Dari perspektif programmer, pengiriman "
-"data melalui koneksi TCP mirip dengan menulis aliran byte ke file. Ini "
-"adalah tanggung jawab penerapan TCP/IP di sistem operasi untuk memecah "
-"aliran data ke dalam paket IP. Sistem operasi ini juga bertanggung jawab "
-"untuk secara otomatis mentransmisi kembali paket terjatuh, dan untuk "
-"menangani kontrol aliran untuk memastikan bahwa data yang dikirimkan tidak "
-"dibanjiri buffer data pengirim, buffer data penerima, dan kapasitas "
-"jaringan. Akhirnya, sistem operasi bertanggung jawab untuk kembali merakit "
-"paket dalam urutan yang benar ke dalam aliran data di sisi penerima. Karena "
-"TCP mendeteksi dan mentransmisikan kembali paket yang hilang, maka dikatakan "
-"protokol *reliable*."
-
-msgid ""
-"The name assigned to the port by the Networking service internal DNS is now "
-"visible in the response in the ``dns_assignment`` attribute."
-msgstr ""
-"Nama yang ditugaskan ke port oleh layanan Jaringan DNS internal sekarang "
-"terlihat dalam respon dalam atribut ``dns_assignment``."
-
-msgid ""
-"The name given to the instance by the user, ``my_vm``, is sanitized by the "
-"Compute service and becomes ``my-vm`` as the port's ``dns_name``."
-msgstr ""
-"Nama yang diberikan untuk instance oleh pengguna, ``my_vm``, disterilkan "
-"oleh layanan Compute dan menjadi ``my-vm`` sebagai `` dns_name`` port."
-
-msgid ""
-"The namespace for router 1 from :ref:`deploy-lb-selfservice` should only "
-"appear on network node 1 because of creation prior to enabling VRRP."
-msgstr ""
-"Namespace untuk router 1 dari :ref:`deploy-lb-selfservice` seharusnya hanya "
-"muncul di jaringan node 1 karena penciptaan sebelum mengaktifkan VRRP."
-
-msgid ""
-"The namespace for router 1 from :ref:`deploy-ovs-selfservice` should also "
-"appear on network node 1 because of creation prior to enabling distributed "
-"routing."
-msgstr ""
-"Namespace untuk router 1 dari :ref:`deploy-ovs-selfservice` juga harus "
-"muncul di jaringan node 1 karena penciptaan sebelum mengaktifkan routing "
-"terdistribusikan."
-
-msgid ""
-"The native OVS firewall implementation requires kernel and user space "
-"support for *conntrack*, thus requiring minimum versions of the Linux kernel "
-"and Open vSwitch. All cases require Open vSwitch version 2.5 or newer."
-msgstr ""
-"Implementasi firewall OVS asli memerlukan kernel dan dukungan ruang pengguna "
-"untuk *conntrack *, sehingga membutuhkan versi minimum dari kernel Linux dan "
-"Open vSwitch. Semua case memerlukan Open vSwitch versi 2.5 atau yang lebih "
-"baru."
-
-msgid ""
-"The network address ranges in the examples of this guide should not be used "
-"for any purpose other than documentation."
-msgstr ""
-"Rentang alamat jaringan dalam contoh panduan ini tidak boleh digunakan untuk "
-"tujuan apapun selain dokumentasi."
-
-msgid ""
-"The network address ranges used in this guide are chosen in accordance with "
-"`RFC 5737 `_ and `RFC 3849 `_, and as such are restricted to the following:"
-msgstr ""
-"Rentang alamat jaringan yang digunakan dalam panduan ini dipilih sesuai "
-"dengan `RFC 5737 `_ dan `RFC 3849 "
-"`_, dan dengan demikian dibatasi "
-"sebagai berikut: "
-
-msgid "The network cannot have attribute ``router:external`` set to ``True``."
-msgstr ""
-"Jaringan tidak dapat memiliki atribut ``router:external`` atur ke ``True``."
-
-msgid ""
-"The network presented by the subport is the network of the associated port. "
-"When creating a subport, a ``segmentation-id`` may be required by the "
-"driver. ``segmentation-id`` defines the segmentation ID on which the subport "
-"network is presented to the instance. ``segmentation-type`` may be required "
-"by certain drivers like OVS, although at this time only ``vlan`` is "
-"supported as a ``segmentation-type``."
-msgstr ""
-"Jaringan disajikan oleh subport adalah jaringan dari port yang terkait. "
-"Ketika membuat subport, satu `` segmentasi-id`` mungkin diperlukan oleh "
-"driver. ``segmentasi-id`` mendefinisikan ID segmentasi dimana jaringan "
-"subport disajikan untuk instance. ``segmentasi-type`` mungkin diperlukan "
-"oleh driver tertentu seperti OVS, meskipun saat ini hanya ``vlan`` didukung "
-"sebagai ``segmentasi-type``."
-
-msgid ""
-"The network trunk service allows multiple networks to be connected to an "
-"instance using a single virtual NIC (vNIC). Multiple networks can be "
-"presented to an instance by connecting it to a single port."
-msgstr ""
-"Layanan trunk jaringan mengizinkan beberapa jaringan untuk terhubung ke "
-"sebuah instance menggunakan NIC virtual tunggal (vNIC). Beberapa jaringan "
-"dapat disajikan ke sebuah instance dengan menghubungkannya ke port tunggal."
-
-msgid "The network type can be FLAT, VLAN, GRE, VXLAN or GENEVE."
-msgstr "Tipe jaringan dapat FLAT, VLAN, GRE, VXLAN atau GENEVE."
-
-msgid ""
-"The newer FWaaS implementation, v2, provides a much more granular service. "
-"The notion of a firewall has been replaced with firewall group to indicate "
-"that a firewall consists of two policies: an ingress policy and an egress "
-"policy. A firewall group is applied not at the router level (all ports on a "
-"router) but at the port level. Currently, router ports can be specified. For "
-"Ocata, VM ports can also be specified."
-msgstr ""
-"Penerapan FWaaS yang lebih baru, v2, menyediakan layanan yang jauh lebih "
-"rinci. Gagasan firewall telah diganti dengan kelompok firewall untuk "
-"menunjukkan bahwa firewall terdiri dari dua kebijakan: kebijakan masuknya "
-"(ingress) dan kebijakan egress. Sekelompok firewall diterapkan tidak pada "
-"tingkat router (semua port pada router) tetapi di tingkat port. Saat ini, "
-"port router dapat ditentukan. Untuk Ocata, port VM juga dapat ditentukan."
-
-msgid ""
-"The operating system of the TCP client application automatically assigns a "
-"port number to the client. The client owns this port number until the TCP "
-"connection is terminated, after which the operating system reclaims the port "
-"number. These types of ports are referred to as *ephemeral ports*."
-msgstr ""
-"Sistem operasi dari aplikasi klien TCP secara otomatis memberikan nomor port "
-"untuk klien. Klien memiliki nomor port ini sampai koneksi TCP dihentikan, "
-"setelah sistem operasi mengambil kembali nomor port. Jenis port disebut "
-"sebagai *ephemeral ports*."
-
-msgid ""
-"The optional DHCP service manages IP addresses for instances on provider and "
-"self-service networks. The Networking service implements the DHCP service "
-"using an agent that manages ``qdhcp`` namespaces and the ``dnsmasq`` service."
-msgstr ""
-"Layanan DHCP opsional mengelola alamat IP untuk instance pada jaringan "
-"provider dan self-service. Layanan Networking mengimplementasikan layanan "
-"DHCP dengan menggunakan agen yang mengelola namespaces ``qdhcp`` dan layanan "
-"``dnsmasq``."
-
-msgid ""
-"The optional metadata service provides an API for instances to obtain "
-"metadata such as SSH keys."
-msgstr ""
-"Layanan metadata opsional menyediakan API instance untuk mendapatkan "
-"metadata seperti kunci SSH."
-
-msgid "The options used in the configuration file above are:"
-msgstr "Opsi yang digunakan dalam file konfigurasi di atas adalah:"
-
-msgid ""
-"The original FWaaS implementation, v1, provides protection for routers. When "
-"a firewall is applied to a router, all internal ports are protected."
-msgstr ""
-"Pelaksanaan FWaaS original, v1, memberikan perlindungan untuk router. Ketika "
-"firewall diterapkan untuk router, semua port internal dilindungi."
-
-msgid ""
-"The output of the :command:`route -n` and :command:`netstat -rn` commands "
-"are formatted in a slightly different way. This example shows how the same "
-"routes would be formatted using these commands:"
-msgstr ""
-"Output dari perintah :command:`route -n` and :command:`netstat -rn` diformat "
-"dengan cara yang sedikit berbeda. Contoh ini menunjukkan bagaimana rute yang "
-"sama akan diformat menggunakan perintah ini:"
-
-msgid ""
-"The output shows ``bridge-mapping`` and the number of virtual network "
-"devices on this L2 agent."
-msgstr ""
-"Output menunjukkan ``bridge-mapping`` dan jumlah perangkat jaringan virtual "
-"pada agen L2 ini."
-
-msgid ""
-"The output shows information for four agents. The ``alive`` field shows "
-"``True`` if the agent reported its state within the period defined by the "
-"``agent_down_time`` option in the ``neutron.conf`` file. Otherwise the "
-"``alive`` is ``False``."
-msgstr ""
-"Hasilnya menunjukkan informasi untuk empat agen. Field ``alive`` menunjukkan "
-"``True`` jika agen melaporkan keadaannya dalam periode yang ditentukan oleh "
-"opsi ``agent_down_time`` di file ``neutron.conf``. Jika tidak ``alive`` "
-"adalah ``False``."
-
-msgid ""
-"The output shows that the entry allows the action ``access_as_shared`` on "
-"object ``84a7e627-573b-49da-af66-c9a65244f3ce`` of type ``network`` to "
-"target_tenant ``*``, which is a wildcard that represents all projects."
-msgstr ""
-"Output menunjukkan bahwa entri menizinkan tindakan ``access_as_shared`` on "
-"object ``84a7e627-573b-49da-af66-c9a65244f3ce`` dari tipe ``network`` ke "
-"target_tenant ``*``, merupakan wildcard yang mewakili semua proyek."
-
-msgid ""
-"The physical network infrastructure (1) forwards the packet to the provider "
-"physical network interface (2)."
-msgstr ""
-"Infrastruktur jaringan fisik (1) meneruskan paket ke provider physical "
-"network interface (2)."
-
-msgid ""
-"The physical network interface (12) forwards the packet to the OVS provider "
-"bridge provider network port (13)."
-msgstr ""
-"Physical network interface (12) meneruskan paket ke OVS provider bridge "
-"provider network port (13)."
-
-msgid ""
-"The physical network interface (12) removes VLAN tag 102 from the packet and "
-"forwards it to the VLAN sub-interface port (13) on the provider bridge."
-msgstr ""
-"Physical network interface (12) menghapus VLAN tag 102 dari paket dan "
-"meneruskannya ke VLAN sub-interface port (13) di provider bridge."
-
-msgid ""
-"The physical network interface (14) for the VXLAN interface sends the packet "
-"to the compute node via the overlay network (15)."
-msgstr ""
-"Antarmuka jaringan fisik (14) untuk antarmuka VXLAN mengirimkan paket ke "
-"node komputasi melalui jaringan overlay (15)."
-
-msgid ""
-"The physical network interface (16) forwards the packet to the OVS provider "
-"bridge provider network port (17)."
-msgstr ""
-"Physical network interface (16) meneruskan paket ke OVS provider bridge "
-"provider network port (17)."
-
-msgid ""
-"The physical network interface (5) adds VLAN tag 101 to the packet and "
-"forwards it to the physical network infrastructure switch (6)."
-msgstr ""
-"Antarmuka jaringan fisik (5) menambahkan VLAN tag 101 untuk paket dan "
-"meneruskannya ke switch infrastruktur jaringan fisik (6)."
-
-msgid ""
-"The physical network interface (8) removes VLAN tag 101 from the packet and "
-"forwards it to the VLAN sub-interface port (9) on the provider bridge."
-msgstr ""
-"Antarmuka jaringan fisik (8) menghapus VLAN tag 101 dari paket dan "
-"meneruskannya ke VLAN sub-interface port (9) pada provider bridge."
-
-msgid ""
-"The physical network interface forwards the packet to the Internet via "
-"physical network infrastructure (23)."
-msgstr ""
-"Physical network interface meneruskan paket ke Internet melalui physical "
-"network infrastructure (23)."
-
-msgid ""
-"The physical network interface forwards the packet to the physical network "
-"infrastructure switch (10)."
-msgstr ""
-"Physical network interface meneruskan paket ke physical network "
-"infrastructure switch (10)."
-
-msgid ""
-"The port chain plug-in supports backing service providers including the OVS "
-"driver and a variety of SDN controller drivers. The common driver API "
-"enables different drivers to provide different implementations for the "
-"service chain path rendering."
-msgstr ""
-"Plug-in rantai port mendukung penyokong penyedia layanan termasuk driver "
-"OVS dan berbagai driver kontroler SDN. Driver umum API mengaktifkan driver "
-"yang lain untuk memberikan implementasi yang berbeda untuk rantai layanan "
-"path rendering."
-
-msgid ""
-"The port's ``dns_assignment`` attribute shows that its FQDN is ``my-vm."
-"example.org.`` in the Networking service internal DNS, which is the result "
-"of concatenating the port's ``dns_name`` with the value configured in the "
-"``dns_domain`` parameter in ``neutron.conf``, as explained previously."
-msgstr ""
-"Atribut ``dns_assignment`` port menunjukkan bahwa FQDN nya adalah ``my-vm."
-"example.org.`` dalam layanan Jaringan DNS internal, yang merupakan hasil "
-"dari penggabungan ``dns_name`` port dengan nilai yang dikonfigurasi dalam "
-"yang parameter ``dns_domain`` di ``neutron.conf``, seperti yang dijelaskan "
-"sebelumnya."
-
-msgid ""
-"The port's data was visible in the DNS service as soon as it was created."
-msgstr "Data milk port terlihat di layanan DNS segera setelah dibuat."
-
-msgid ""
-"The prefix delegation mechanism then sends a request via the external "
-"network to your prefix delegation server, which replies with the delegated "
-"prefix. The subnet is then updated with the new prefix, including issuing "
-"new IP addresses to all ports:"
-msgstr ""
-"Mekanisme prefix delegation kemudian mengirimkan permintaan melalui jaringan "
-"eksternal ke server prefix delegation Anda, yang menjawab dengan delegated "
-"prefix. Subnet tersebut kemudian diperbarui dengan prefix baru, termasuk "
-"menerbitkan alamat IP baru untuk semua port:"
-
-msgid ""
-"The provider bridge forwards the packet to the self-service router gateway "
-"port on the provider network (5)."
-msgstr ""
-"Provider bridge meneruskan paket ke self-service router gateway port pada "
-"jaringan provider (5)."
-
-msgid ""
-"The provider bridge instance port (11) forwards the packet to the instance 2 "
-"interface (12) via ``veth`` pair."
-msgstr ""
-"Provider bridge instance port (11) meneruskan paket ke instance 2 interface "
-"(12) melalui pasangan ``veth``."
-
-msgid ""
-"The provider bridge instance port (15) forwards the packet to the instance 2 "
-"interface (16) via ``veth`` pair."
-msgstr ""
-"Provider bridge instance port (15) meneruskan paket ke instance 2 interface "
-"(16) melalui pasangan ``veth``."
-
-msgid ""
-"The provider networks architecture example provides layer-2 connectivity "
-"between instances and the physical network infrastructure using VLAN "
-"(802.1q) tagging. It supports one untagged (flat) network and and up to 4095 "
-"tagged (VLAN) networks. The actual quantity of VLAN networks depends on the "
-"physical network infrastructure. For more information on provider networks, "
-"see :ref:`intro-os-networking-provider`."
-msgstr ""
-"Contoh arsitektur jaringan provider memberikan konektivitas lapisan-2 antar "
-"instance dan infrastruktur jaringan fisik dengan menggunakan VLAN (802.1q) "
-"tagging. Arsitektur ini mendukung satu jaringan untagged (flat) dan dan "
-"jaringan tagged (VLAN) sampai 4095. Jumlah sebenarnya dari jaringan VLAN "
-"tergantung pada infrastruktur jaringan fisik. Untuk informasi lebih lanjut "
-"tentang jaringan provider, lihat :ref:`intro-os-networking-provider`."
-
-msgid ""
-"The provider physical network interface (14) adds VLAN tag 101 to the packet "
-"and forwards it to the Internet via physical network infrastructure (15)."
-msgstr ""
-"Provider physical network interface (14) menambahkan VLAN tag 101 untuk "
-"paket dan meneruskannya ke Internet melalui infrastruktur jaringan fisik "
-"(15)."
-
-msgid ""
-"The provider physical network interface forwards the packet to the OVS "
-"provider bridge provider network port (3)."
-msgstr ""
-"Provider physical network interface meneruskan paket ke OVS provider bridge "
-"provider network port (3)."
-
-msgid ""
-"The provider physical network interface removes VLAN tag 101 and forwards "
-"the packet to the VLAN sub-interface on the provider bridge."
-msgstr ""
-"Antarmuka jaringan fisik provider menghapus VLAN tag 101 dan meneruskan "
-"paket ke sub-interface VLAN pada provider bridge."
-
-msgid ""
-"The purpose of VXLAN is to provide scalable network isolation. VXLAN is a "
-"Layer 2 overlay scheme on a Layer 3 network. It allows an overlay layer-2 "
-"network to spread across multiple underlay layer-3 network domains. Each "
-"overlay is termed a VXLAN segment. Only VMs within the same VXLAN segment "
-"can communicate."
-msgstr ""
-"Tujuan dari VXLAN adalah untuk memberikan isolasi jaringan scalable. VXLAN "
-"adalah skema overlay Layer 2 pada jaringan Layer 3. Hal ini mengizinkan "
-"jaringan lapisan-2 overlay untuk tersebar di beberapa domain jaringan "
-"lapisan-3 yang mendasarinya. Setiap overlay disebut segmen VXLAN. Hanya VM "
-"dalam segmen VXLAN yang sama dapat berkomunikasi."
-
-msgid ""
-"The purpose of this page is to describe how to enable SR-IOV functionality "
-"available in OpenStack (using OpenStack Networking). This functionality was "
-"first introduced in the OpenStack Juno release. This page intends to serve "
-"as a guide for how to configure OpenStack Networking and OpenStack Compute "
-"to create SR-IOV ports."
-msgstr ""
-"Tujuan dari halaman ini adalah untuk menjelaskan cara mengaktifkan fungsi SR-"
-"IOV yang tersedia di OpenStack (menggunakan OpenStack Networking). Fungsi "
-"ini pertama kali diperkenalkan di rilis OpenStack Juno. Halaman ini "
-"bermaksud untuk membantu sebagai panduan untuk mengetahui cara bagaimana "
-"mengkonfigurasi OpenStack Networking dan OpenStack Compute untuk membuat "
-"port SR-IOV."
-
-msgid ""
-"The router contains an interface on the self-service subnet and a gateway on "
-"the external network."
-msgstr ""
-"Router berisi sebuah antarmuka pada subnet self-service dan gateway pada "
-"jaringan eksternal."
-
-msgid ""
-"The router forwards the packet to the OVS integration bridge port for self-"
-"service network 2 (18)."
-msgstr ""
-"Router meneruskan paket ke OVS integration bridge port untuk self-service "
-"network 2 (18)."
-
-msgid ""
-"The router forwards the packet to the OVS integration bridge port for the "
-"provider network (18)."
-msgstr ""
-"Router meneruskan paket ke OVS integration bridge port untuk jaringan "
-"provider (18)."
-
-msgid ""
-"The router forwards the packet to the OVS integration bridge port for the "
-"self-service network (11)."
-msgstr ""
-"Router meneruskan paket ke OVS integration bridge port untuk jaringan self-"
-"service (11)."
-
-msgid ""
-"The router forwards the packet to the OVS integration bridge port for the "
-"self-service network (9)."
-msgstr ""
-"Router meneruskan paket ke OVS integration bridge port untuk jaringan self-"
-"service (9)."
-
-msgid "The router forwards the packet to the provider bridge router port (12)."
-msgstr "Router meneruskan paket ke provider bridge router port (12)."
-
-msgid ""
-"The router forwards the packet to the self-service bridge router port (7)."
-msgstr "Router meneruskan paket ke self-service bridge router port (7)."
-
-msgid ""
-"The router forwards the packet to the self-service network 2 bridge router "
-"port (12)."
-msgstr ""
-"Router meneruskan paket ke self-service network 2 bridge router port (12)."
-
-msgid "The router forwards the packet to the switch (10)."
-msgstr "Router meneruskan paket ke switch (10)."
-
-msgid "The router forwards the packet to the switch (14)."
-msgstr "Router meneruskan paket ke switch (14)."
-
-msgid ""
-"The router performs DNAT on the packet which changes the destination IP "
-"address to the instance IP address on the self-service network via the self-"
-"service network interface (10)."
-msgstr ""
-"Router melakukan DNAT pada paket yang mengubah alamat IP tujuan ke alamat IP "
-"instance di jaringan self-service melalui antarmuka jaringan self-service "
-"(10)."
-
-msgid ""
-"The router routes the packet from provider network 1 (12) to provider "
-"network 2 (13)."
-msgstr ""
-"Router me-rute paket dari provider network 1 (12) ke provider network 2 (13)."
-
-msgid ""
-"The router routes the packet from provider network 1 (8) to provider network "
-"2 (9)."
-msgstr ""
-"Router me-rute paket dari provider network 1 (8) ke provider network 2 (9)."
-
-msgid ""
-"The router routes the packet from the provider network (12) to the external "
-"network (13) and forwards the packet to the switch (14)."
-msgstr ""
-"Router me-rute paket dari provider network (12) ke external network (13) dan "
-"meneruskan paket ke switch (14)."
-
-msgid ""
-"The router routes the packet from the provider network (8) to the external "
-"network (9) and forwards the packet to the switch (10)."
-msgstr ""
-"Router me-rute paket dari jaringan operator (8) ke jaringan eksternal (9) "
-"dan meneruskan paket ke switch (10)."
-
-msgid ""
-"The router sends the packet to the next-hop IP address, typically the "
-"gateway IP address on self-service network 2, via the self-service network 2 "
-"interface (11)."
-msgstr ""
-"Router mengirimkan paket ke alamat IP hop berikutnya (next-hop), biasanya "
-"alamat IP gateway pada self-service network 2, melalui self-service network "
-"2 interface (11)."
-
-msgid ""
-"The router sends the packet to the next-hop IP address, typically the "
-"gateway IP address on self-service network 2, via the self-service network 2 "
-"interface (17)."
-msgstr ""
-"Router mengirimkan paket ke alamat IP next-hop, biasanya alamat IP gateway "
-"pada self-service network 2, melalui self-service network 2 interface (17)."
-
-msgid ""
-"The router with the floating IP address binding contains a gateway on an "
-"external network with the BGP speaker association."
-msgstr ""
-"Router dengan alamat IP mengambang yang terikat berisi gateway pada jaringan "
-"eksternal dengan asosiasi BGP speaker."
-
-msgid ""
-"The same can explicitly be accomplished by a user with administrative "
-"credentials setting the flags in the :command:`neutron router-create` "
-"command:"
-msgstr ""
-"Hal yang sama dapat secara eksplisit dicapai oleh pengguna dengan kredensial "
-"administratif pengaturan flag di perintah :command:`neutron router-create`:"
-
-msgid ""
-"The same neutron commands are used for LBaaS v2 with an agent or with "
-"Octavia."
-msgstr ""
-"Perintah neutron yang sama digunakan untuk LBaaS v2 dengan agen atau dengan "
-"Octavia."
-
-msgid ""
-"The second branch is called contract and is used to store those migration "
-"rules that are not safe to apply while Neutron server is running."
-msgstr ""
-"Cabang kedua disebut kontrak dan digunakan untuk menyimpan aturan-aturan "
-"migrasi yang tidak aman untuk menerapkannya ketika Neutron server berjalan."
-
-msgid ""
-"The security group bridge OVS port (4) forwards the packet to the OVS "
-"integration bridge security group port (5) via ``veth`` pair."
-msgstr ""
-"Security group bridge OVS port (4) meneruskan paket ke OVS integration "
-"bridge security group port (5) melalui pasangan ``veth``."
-
-msgid ""
-"The security group bridge instance port (15) forwards the packet to the "
-"instance interface (16) via ``veth`` pair."
-msgstr ""
-"Security group bridge instance port (15) meneruskan paket ke instance "
-"interface (16) melalui pasangan ``veth``."
-
-msgid ""
-"The security group bridge instance port (18) forwards the packet to the "
-"instance 2 interface (19) via ``veth`` pair."
-msgstr ""
-"Security group bridge instance port (18) meneruskan paket ke instance 2 "
-"interface (19) melalui pasangan ``veth``."
-
-msgid ""
-"The security group bridge instance port (19) forwards the packet to the "
-"instance 2 interface (20) via ``veth`` pair."
-msgstr ""
-"Security group bridge instance port (19) meneruskan paket ke instance 2 "
-"interface (20) melalui pasangan ``veth``."
-
-msgid ""
-"The security group bridge instance port (22) forwards the packet to the "
-"instance 2 interface (23) via ``veth`` pair."
-msgstr ""
-"Security group bridge instance port (22) meneruskan paket ke instance 2 "
-"interface (23 melalui pasangan ``veth``."
-
-msgid ""
-"The security group bridge instance port (22) forwards the packet to the "
-"instance interface (23) via ``veth`` pair."
-msgstr ""
-"Security group bridge instance port (22) meneruskan paket ke instance "
-"interface (23) melalui pasangan ``veth``."
-
-msgid ""
-"The security group bridge instance port (23) forwards the packet to the "
-"instance 2 interface (24) via ``veth`` pair."
-msgstr ""
-"Security group bridge instance port (23) meneruskan paket ke instance 2 "
-"interface (24) melalui pasangan ``veth``."
-
-msgid ""
-"The security group bridge instance port (31) forwards the packet to the "
-"instance interface (32) via ``veth`` pair."
-msgstr ""
-"Security group bridge instance port (31) meneruskan paket ke instance "
-"interface (32) melalui pasangan ``veth``."
-
-msgid ""
-"The self-service bridge forwards the packet to the VXLAN interface (4) which "
-"wraps the packet using VNI 101."
-msgstr ""
-"Self-service bridge meneruskan paket ke antarmuka VXLAN (4) yang membungkus "
-"paket menggunakan VNI 101."
-
-msgid ""
-"The self-service bridge forwards the packet to the VXLAN interface (8) which "
-"wraps the packet using VNI 101."
-msgstr ""
-"Jembatan self-service meneruskan paket ke interface VXLAN (8) yang "
-"membungkus paket menggunakan VNI 101."
-
-msgid ""
-"The self-service bridge instance port (10) forwards the packet to the "
-"instance 1 interface (11) via ``veth`` pair."
-msgstr ""
-"Self-service bridge instance port (10) meneruskan paket ke instance 1 "
-"interface (11) melalui pasangan ``veth``."
-
-msgid ""
-"The self-service bridge instance port (14) forwards the packet to the "
-"instance interface (15) via ``veth`` pair."
-msgstr ""
-"Self-service bridge instance port (14) meneruskan paket ke instance "
-"interface (15) melalui pasangan ``veth``."
-
-msgid ""
-"The self-service bridge instance port (19) forwards the packet to the "
-"instance 2 interface (20) via ``veth`` pair."
-msgstr ""
-"Self-service bridge instance port (19 meneruskan paket ke instance 2 "
-"interface (20) melalui pasangan ``veth``."
-
-msgid ""
-"The self-service bridge router port (9) forwards the packet to the self-"
-"service network 1 interface (10) in the router namespace."
-msgstr ""
-"Self-service bridge router port (9) meneruskan paket ke self-service network "
-"1 interface (10) ) dalam router namespace."
-
-msgid ""
-"The self-service bridge router port (9) forwards the packet to the self-"
-"service network interface (10) in the router namespace."
-msgstr ""
-"self-service bridge router port (9) meneruskan paket ke interface jaringan "
-"self-service (10) dalam namespace router."
-
-msgid ""
-"The self-service network 2 bridge forwards the packet to the VXLAN interface "
-"(13) which wraps the packet using VNI 102."
-msgstr ""
-"Self-service network 2 bridge meneruskan paket ke VXLAN interface (13) yang "
-"membungkus paket menggunakan VNI 102."
-
-msgid ""
-"The self-service network 2 interface in the distributed router namespace (8) "
-"forwards the packet to the OVS integration bridge port for self-service "
-"network 2 (9)."
-msgstr ""
-"Self-service network 2 interface dalam distributed router namespace (8) "
-"meneruskan paket ke OVS integration bridge port pada self-service network 2 "
-"(9)."
-
-msgid ""
-"The sriov nic switch agent configures PCI virtual functions to realize L2 "
-"networks for OpenStack instances. Network attachments for other resources "
-"like routers, DHCP, and so on are not supported."
-msgstr ""
-"Sriov nic switch agent mengkonfigurasi fungsi virtual PCI untuk mewujudkan "
-"jaringan L2 untuk instance OpenStack. Network attachment untuk sumber daya "
-"lain seperti router, DHCP, dan sebagainya tidak didukung."
-
-msgid ""
-"The steps detail how to create VFs using Mellanox ConnectX-4 and newer/Intel "
-"SR-IOV Ethernet cards on an Intel system. Steps may differ for different "
-"hardware configurations."
-msgstr ""
-"Langkah-langkah rinci bagaimana untuk membuat VF menggunakan kartu Mellanox "
-"ConnectX-4 dan kartu Intel SR-IOV Ethernet atau yang lebih baru pada sistem "
-"Intel. Langkah inimungkin berbeda untuk konfigurasi hardware yang berbeda."
-
-msgid ""
-"The subnet is initially created with a temporary CIDR before one can be "
-"assigned by prefix delegation. Any number of subnets with this temporary "
-"CIDR can exist without raising an overlap error. The subnetpool_id is "
-"automatically set to ``prefix_delegation``."
-msgstr ""
-"Subnet pada awalnya dibuat dengan CIDR sementara sebelum subnet dapat "
-"diberikan oleh prefix delegation. Jumlah subnet dengan CIDR sementara ini "
-"bisa eksis tanpa menimbulkan error tumpang tindih. subnetpool_id secara "
-"otomatis diatur untuk ``prefix_delegation``."
-
-msgid ""
-"The suggested way of making PCI SYS settings persistent is through the "
-"``sysfsutils`` tool. However, this is not available by default on many major "
-"distributions."
-msgstr ""
-"Cara yang disarankan untuk membuat pengaturan PCI SYS persisten adalah "
-"melalui alat ``sysfsutils``. Namun, ini tidak tersedia secara default pada "
-"banyak distribusi utama."
-
-msgid ""
-"The support for SR-IOV with InfiniBand allows a Virtual PCI device (VF) to "
-"be directly mapped to the guest, allowing higher performance and advanced "
-"features such as RDMA (remote direct memory access). To use this feature, "
-"you must:"
-msgstr ""
-"Dukungan untuk SR-IOV dengan InfiniBand memungkinkan perangkat Virtual PCI "
-"(VF) untuk langsung dipetakan ke guest, yang memungkinkan kinerja yang lebih "
-"tinggi dan fitur-fitur canggih seperti RDMA (remote direct memory access). "
-"Untuk menggunakan fitur ini, Anda harus:"
-
-msgid ""
-"The switch adds VLAN tag 102 to the packet and forwards it to compute node 1 "
-"(11)."
-msgstr ""
-"Switch menambahkan VLAN tag 102 untuk paket dan meneruskannya ke compute "
-"node 1 (11)."
-
-msgid ""
-"The switch adds VLAN tag 102 to the packet and forwards it to compute node 1 "
-"(15)."
-msgstr ""
-"Switch menambahkan VLAN tag 102 ke paket dan meneruskannya ke komputasi "
-"node 1 (15)."
-
-msgid ""
-"The switch forwards the packet from compute node 1 to compute node 2 (11)."
-msgstr ""
-"Switch meneruskan paket dari komputasi node 1 ke komputasi node 2 (11)."
-
-msgid ""
-"The switch forwards the packet from compute node 1 to compute node 2 (7)."
-msgstr "Switch meneruskan paket dari komputasi node 1 ke komputasi node 2 (7)."
-
-msgid "The switch forwards the packet to the external network (11)."
-msgstr "Switch meneruskan paket ke jaringan eksternal (11)."
-
-msgid "The switch forwards the packet to the external network (15)."
-msgstr "Switch meneruskan paket ke external network (15)."
-
-msgid ""
-"The switch removes VLAN tag 101 from the packet and forwards it to the "
-"router (11)."
-msgstr ""
-"Switch menghapus VLAN tag 101 dari paket dan meneruskannya ke router (11)."
-
-msgid ""
-"The switch removes VLAN tag 101 from the packet and forwards it to the "
-"router (7)."
-msgstr ""
-"Switch menghapus VLAN tag 101 dari paket dan meneruskannya ke router (7)."
-
-msgid ""
-"The tool needs to access the database connection string, which is provided "
-"in the ``neutron.conf`` configuration file in an installation. The tool "
-"automatically reads from ``/etc/neutron/neutron.conf`` if it is present. If "
-"the configuration is in a different location, use the following command:"
-msgstr ""
-"Alat ini perlu mengakses string koneksi database, yang disediakan dalam file "
-"konfigurasi ``neutron.conf`` dalam instalasi. Alat ini secara otomatis "
-"membaca dari ``/etc/neutron/neutron.conf`` jika ada. Jika konfigurasi ini "
-"ada di lokasi yang berbeda, gunakan perintah berikut:"
-
-msgid "The tool takes some options followed by some commands:"
-msgstr ""
-"Alat ini mengambil beberapa pilihan yang diikuti oleh beberapa perintah:"
-
-msgid ""
-"The tool usage examples below do not show the options. It is assumed that "
-"you use the options that you need for your environment."
-msgstr ""
-"Contoh penggunaan alat di bawah ini tidak menunjukkan pilihan. Hal ini "
-"diasumsikan bahwa Anda menggunakan pilihan yang Anda butuhkan untuk "
-"lingkungan Anda."
-
-msgid ""
-"The trunk is ``ACTIVE`` when both the logical and physical resources have "
-"been created. This means that all operations within the Networking and "
-"Compute services have completed and the trunk is ready for use."
-msgstr ""
-"Trunk menjadi ``ACTIVE`` ketika kedua sumber logis dan fisik telah "
-"diciptakan. Ini berarti bahwa semua operasi dalam layanan Networking dan "
-"Compute telah selesai dan trunk siap digunakan."
-
-msgid ""
-"The underlying physical interface (11) for overlay networks forwards the "
-"packet to the OVS tunnel bridge (12)."
-msgstr ""
-"Underlying physical interface (11) untuk overlay network meneruskan paket "
-"ke OVS tunnel bridge (12)."
-
-msgid ""
-"The underlying physical interface (11) for the VXLAN interface forwards the "
-"packet to the VXLAN interface (12) which unwraps the packet."
-msgstr ""
-"Antarmuka fisik yang mendasari (11) untuk antarmuka VXLAN meneruskan paket "
-"ke interface VXLAN (12) yang membuka paket."
-
-msgid ""
-"The underlying physical interface (13) for overlay networks forwards the "
-"packet to compute node 2 via the overlay network (14)."
-msgstr ""
-"Underlying physical interface (13) untuk overlay network meneruskan paket ke "
-"compute node 2 melalui overlay network (14)."
-
-msgid ""
-"The underlying physical interface (13) for overlay networks forwards the "
-"packet to the network node via the overlay network (14)."
-msgstr ""
-"Underlying physical interface (13) untuk overlay network meneruskan paket ke "
-"network node melalui overlay network (14)."
-
-msgid ""
-"The underlying physical interface (15) for overlay networks forwards the "
-"packet to the OVS tunnel bridge (16)."
-msgstr ""
-"underlying physical interface (15) untuk jaringan overlay meneruskan paket "
-"ke OVS tunnel bridge (16)."
-
-msgid ""
-"The underlying physical interface (16) for the VXLAN interface sends the "
-"packet to the VXLAN interface (17) which unwraps the packet."
-msgstr ""
-"Antarmuka fisik yang mendasari (16) untuk antarmuka VXLAN mengirimkan paket "
-"ke interface VXLAN (17) yang membuka paket."
-
-msgid ""
-"The underlying physical interface (22) for overlay networks forwards the "
-"packet to the compute node via the overlay network (23)."
-msgstr ""
-"Antarmuka fisik yang mendasari (22) untuk jaringan overlay meneruskan paket "
-"ke node komputasi melalui jaringan overlay (23)."
-
-msgid ""
-"The underlying physical interface (24) for overlay networks forwards the "
-"packet to the OVS tunnel bridge (25)."
-msgstr ""
-"Antarmuka fisik yang mendasari (24) untuk jaringan overlay meneruskan paket "
-"ke OVS tunnel bridge (25)."
-
-msgid ""
-"The underlying physical interface (5) for the VXLAN interface forwards the "
-"packet to compute node 2 via the overlay network (6)."
-msgstr ""
-"Antarmuka fisik yang mendasari (5) untuk antarmuka VXLAN meneruskan paket ke "
-"compute node 2 melalui jaringan overlay (6)."
-
-msgid ""
-"The underlying physical interface (5) for the VXLAN interface forwards the "
-"packet to the network node via the overlay network (6)."
-msgstr ""
-"Antarmuka fisik yang mendasari (5) untuk antarmuka VXLAN meneruskan paket ke "
-"node jaringan melalui jaringan overlay (6)."
-
-msgid ""
-"The underlying physical interface (7) for the VXLAN interface forwards the "
-"packet to the VXLAN interface (8) which unwraps the packet."
-msgstr ""
-"Antarmuka fisik yang mendasari (7) untuk antarmuka VXLAN meneruskan paket ke "
-"interface VXLAN (8) yang membuka paket."
-
-msgid ""
-"The underlying physical interface (9) for overlay networks forwards the "
-"packet to compute node 2 via the overlay network (10)."
-msgstr ""
-"Underlying physical interface (9) untuk overlay networks meneruskan paket ke "
-"komputasi node 2 melalui jaringan overlay (10)."
-
-msgid ""
-"The underlying physical interface (9) for overlay networks forwards the "
-"packet to the network node via the overlay network (10)."
-msgstr ""
-"Underlying physical interface (9) untuk overlay network meneruskan paket ke "
-"network node melalui overlay network (10)."
-
-msgid ""
-"The underlying physical interface (9) for the VXLAN interface forwards the "
-"packet to the network node via the overlay network (10)."
-msgstr ""
-"Antarmuka fisik yang mendasari (9) untuk antarmuka VXLAN meneruskan paket ke "
-"node jaringan melalui jaringan overlay (10)."
-
-msgid ""
-"The upgrade of the Networking service database is implemented with Alembic "
-"migration chains. The migrations in the ``alembic/versions`` contain the "
-"changes needed to migrate from older Networking service releases to newer "
-"ones."
-msgstr ""
-"Upgrade dari database layanan Networking diimplementasikan dengan rantai "
-"migrasi Alembic. Migrasi di ``alembic /versions`` berisi perubahan yang "
-"dibutuhkan untuk bermigrasi dari rilis layanan Networking tua ke yang lebih "
-"baru."
-
-msgid ""
-"The upstream router can send an RA and the neutron router will automatically "
-"learn the next-hop LLA, provided again that no subnet is assigned and the "
-"``ipv6_gateway`` flag is not set."
-msgstr ""
-"Router hulu dapat mengirim RA dan router neutron otomatis akan mempelajari "
-"LLA next-hop, tersedia lagi bahwa tidak ada subnet ditugaskan dan flag "
-"``ipv6_gateway`` tidak diatur."
-
-msgid ""
-"The validation option behaves identically for all users. However, it is "
-"considered primarily an admin or service utility since it is the operator "
-"who must set up the requirements."
-msgstr ""
-"Opsi validasi berperilaku identik untuk semua pengguna. Namun, validasi itu "
-"dianggap utama bagi admin atau utilitas layanan karena merupakan operator "
-"yang harus menyiapkan dan memenuhi persyaratan itu."
-
-msgid ""
-"The value of ``METADATA_SECRET`` must match the value of the same option in "
-"the ``[neutron]`` section of the ``nova.conf`` file."
-msgstr ""
-"Nilai ``METADATA_SECRET`` harus sesuai dengan nilai opsi yang sama dalam "
-"bagian ``[neutron] `` dari file ``nova.conf``."
-
-msgid "The variables used in the script file above are:"
-msgstr "Variabel yang digunakan dalam file script di atas adalah:"
-
-msgid ""
-"There are certain scenarios where l2pop and distributed HA routers do not "
-"interact in an expected manner. These situations are the same that affect HA "
-"only routers and l2pop."
-msgstr ""
-"Ada skenario tertentu di mana l2pop dan router HA didistribusikan tidak "
-"berinteraksi dengan cara yang diharapkan. Situasi ini adalah sama yang "
-"mempengaruhi router hanya HA dan l2pop."
-
-msgid ""
-"There are multiple variations of NAT, and here we describe three kinds "
-"commonly found in OpenStack deployments."
-msgstr ""
-"Ada beberapa variasi dari NAT, dan disini kami menjelaskan tiga jenis umum "
-"yang ditemukan dalam pengerahan OpenStack."
-
-msgid "There are two IPv6 attributes:"
-msgstr "Ada dua atribut IPv6:"
-
-msgid ""
-"There are two reference implementations of LBaaS v2. The one is an agent "
-"based implementation with HAProxy. The agents handle the HAProxy "
-"configuration and manage the HAProxy daemon. Another LBaaS v2 "
-"implementation, `Octavia `_, "
-"has a separate API and separate worker processes that build load balancers "
-"within virtual machines on hypervisors that are managed by the Compute "
-"service. You do not need an agent for Octavia."
-msgstr ""
-"Ada dua implementasi referensi dari LBaaS v2. Yang satu adalah implementasi "
-"berdasarkan agen dengan HAProxy. Para agen menangani konfigurasi HAProxy dan "
-"mengelola daemon HAProxy. Implementasi LBaaS v2 lain, `Octavia `_, memiliki API terpisah dan proses "
-"pekerja terpisah yang membangun penyeimbang beban dalam mesin virtual pada "
-"hypervisors yang dikelola oleh layanan Compute. Anda tidak perlu agen untuk "
-"Octavia."
-
-msgid "There are two syntaxes for expressing a netmask:"
-msgstr "Ada dua sintaks untuk mengekspresikan netmask:"
-
-msgid ""
-"There are two ways to attach VFs to an instance. You can create an SR-IOV "
-"port or use the ``pci_alias`` in the Compute service. For more information "
-"about using ``pci_alias``, refer to `nova-api configuration `__."
-msgstr ""
-"Ada dua cara untuk menghubungkan VFS ke sebuah instance. Anda dapat membuat "
-"port SR-IOV atau menggunakan ``pci_alias`` dalam pelayanan Compute. Untuk "
-"informasi lebih lanjut tentang menggunakan ``pci_alias``, mengacu pada "
-"`nova-api configuration `__."
-
-msgid ""
-"There is a known bug with ``keepalived`` v1.2.15 and earlier which can cause "
-"packet loss when ``max_l3_agents_per_router`` is set to 3 or more. "
-"Therefore, we recommend that you upgrade to ``keepalived`` v1.2.16 or "
-"greater when using this feature."
-msgstr ""
-"Ada bug yang dikenal dengan ``keepalived`` v1.2.15 dan sebelumnya yang "
-"dapat menyebabkan kehilangan paket (packet loss) ketika `` "
-"max_l3_agents_per_router`` diatur ke 3 atau lebih. Oleh karena itu, kami "
-"menyarankan Anda meng-upgrade ke ``keepalived`` v1.2.16 atau lebih besar "
-"saat penggunaan fitur ini."
-
-msgid ""
-"There is no need to specify any value if you wish to use the reference "
-"driver, though specifying ``internal`` will explicitly choose the reference "
-"driver. The documentation for any alternate drivers will include the value "
-"to use when specifying that driver."
-msgstr ""
-"Tidak perlu untuk menentukan nilai apapun jika Anda ingin menggunakan driver "
-"referensi, meskipun menentukan ``internal`` secara eksplisit akan memilih "
-"driver referensi. Dokumentasi untuk setiap driver alternatif akan mencakup "
-"nilai untuk digunakan saat menentukan driver tersebut."
-
-msgid "There will be three hosts in the setup."
-msgstr "Akan ada tiga host saat penyiapan."
-
-msgid ""
-"These IP addresses are not publicly routable, meaning that a host on the "
-"public Internet can not send an IP packet to any of these addresses. Private "
-"IP addresses are widely used in both residential and corporate environments."
-msgstr ""
-"Alamat IP ini tidak routable publik, yang berarti bahwa host di Internet "
-"publik tidak dapat mengirim paket IP ke salah satu alamat ini. Alamat IP "
-"private yang banyak digunakan dalam lingkungan perumahan maupun perusahaan."
-
-msgid "These attributes can be set to the following values:"
-msgstr "Atribut ini dapat diatur untuk nilai berikut:"
-
-msgid "These commands provide no output."
-msgstr "Perintah ini tidak memberikan output."
-
-msgid ""
-"These figures omit the controller node because it does not handle instance "
-"network traffic."
-msgstr ""
-"Angka ini menghilangkan controller node karena tidak menangani lalu lintas "
-"jaringan instance."
-
-msgid ""
-"These steps need to be performed on each logical segment that a VM needs to "
-"be connected to, and may require networking knowledge the user might not "
-"have."
-msgstr ""
-"Langkah ini perlu dilakukan pada setiap segmen logis dimana VM perlu "
-"terhubung ke segmen itu, dan mungkin memerlukan pengetahuan jaringan dimana "
-"pengguna mungkin tidak memilikinya."
-
-msgid ""
-"Third-party drivers must provide their own migration mechanisms to convert "
-"existing OpenStack installations to their IPAM."
-msgstr ""
-"Driver pihak ketiga harus menyediakan mekanisme migrasi mereka sendiri untuk "
-"mengkonversi instalasi OpenStack yang ada ke IPAM mereka."
-
-msgid ""
-"This architecture example augments :ref:`deploy-lb-provider` to support a "
-"nearly limitless quantity of entirely virtual networks. Although the "
-"Networking service supports VLAN self-service networks, this example focuses "
-"on VXLAN self-service networks. For more information on self-service "
-"networks, see :ref:`intro-os-networking-selfservice`."
-msgstr ""
-"Contoh arsitektur ini menambah :ref:`deploy-lb-provider` untuk mendukung "
-"sejumlah hampir tak terbatas dari jaringan virtual sepenuhnya. Meskipun "
-"layanan Networking mendukung jaringan self-service VLAN, contoh ini berfokus "
-"pada jaringan self-service VXLAN. Untuk informasi lebih lanjut tentang "
-"jaringan self-service, lihat :ref:`intro-os-networking-selfservice`."
-
-msgid ""
-"This architecture example augments :ref:`deploy-ovs-provider` to support a "
-"nearly limitless quantity of entirely virtual networks. Although the "
-"Networking service supports VLAN self-service networks, this example focuses "
-"on VXLAN self-service networks. For more information on self-service "
-"networks, see :ref:`intro-os-networking-selfservice`."
-msgstr ""
-"Contoh arsitektur ini menambah :ref:`deploy-ovs-provider` untuk mendukung "
-"jumlah hampir tak terbatas dari jaringan virtual sepenuhnya. Meskipun "
-"layanan Networking mendukung jaringan self-service VLAN, contoh ini berfokus "
-"pada jaringan self-service VXLAN. Untuk informasi lebih lanjut tentang "
-"jaringan self-service, lihat :ref:`intro-os-networking-selfservice`."
-
-msgid ""
-"This architecture example augments the self-service deployment example with "
-"a high-availability mechanism using the Virtual Router Redundancy Protocol "
-"(VRRP) via ``keepalived`` and provides failover of routing for self-service "
-"networks. It requires a minimum of two network nodes because VRRP creates "
-"one master (active) instance and at least one backup instance of each router."
-msgstr ""
-"Contoh arsitektur ini menambah contoh pengerahan self-service dengan "
-"mekanisme high-availability menggunakan Virtual Router Redundancy Protocol "
-"(VRRP) melalui ``keepalived`` dan menyediakan failover routing untuk "
-"jaringan self-service. Hal ini membutuhkan minimal dua node jaringan karena "
-"VRRP menciptakan satu instance master (aktif) dan setidaknya satu instance "
-"cadangan dari setiap router."
-
-msgid ""
-"This architecture example augments the self-service deployment example with "
-"the Distributed Virtual Router (DVR) high-availability mechanism that "
-"provides connectivity between self-service and provider networks on compute "
-"nodes rather than network nodes for specific scenarios. For instances with a "
-"floating IPv4 address, routing between self-service and provider networks "
-"resides completely on the compute nodes to eliminate single point of failure "
-"and performance issues with network nodes. Routing also resides completely "
-"on the compute nodes for instances with a fixed or floating IPv4 address "
-"using self-service networks on the same distributed virtual router. However, "
-"instances with a fixed IP address still rely on the network node for routing "
-"and SNAT services between self-service and provider networks."
-msgstr ""
-"Contoh arsitektur ini menambah contoh pengerahan self-service dengan "
-"mekanisme ketersediaan tinggi Distributed Virtual Router (DVR) yang "
-"menyediakan konektivitas antara jaringan self-service dan jaringan provider "
-"pada node komputasi bukannya pada node jaringan untuk skenario tertentu. "
-"Untuk instance dengan alamat IPv4 mengambang, routing antara jaringan self-"
-"service dan jaringan provider berada sepenuhnya pada node komputasi untuk "
-"menghilangkan titik tunggal kegagalan (single point of failure) dan masalah "
-"kinerja dengan node jaringan. Routing juga berada sepenuhnya pada node "
-"komputasi untuk instance dengan alamat IPv4 tetap atau mengambang dengan "
-"menggunakan jaringan self-service dari router virtual terdistribusikan yang "
-"sama. Namun, instance dengan alamat IP tetap masih mengandalkan node "
-"jaringan untuk routing dan layanan SNAT antara jaringan self-service dan "
-"provider."
-
-msgid ""
-"This architecture example provides layer-2 connectivity between instances "
-"and the physical network infrastructure using VLAN (802.1q) tagging. It "
-"supports one untagged (flat) network and up to 4095 tagged (VLAN) networks. "
-"The actual quantity of VLAN networks depends on the physical network "
-"infrastructure. For more information on provider networks, see :ref:`intro-"
-"os-networking-provider`."
-msgstr ""
-"This architecture example provides layer-2 connectivity between instances "
-"and the physical network infrastructure using VLAN (802.1q) tagging. It "
-"supports one untagged (flat) network and up to 4095 tagged (VLAN) networks. "
-"The actual quantity of VLAN networks depends on the physical network "
-"infrastructure. For more information on provider networks, see :ref:`intro-"
-"os-networking-provider`."
-
-msgid ""
-"This command is the sibling command for the previous one. Remove ``net2`` "
-"from the DHCP agent for HostA:"
-msgstr ""
-"Perintah ini adalah perintah sibling (saudara kandung) untuk perintah "
-"sebelumnya. Hapus ``net2`` dari agen DHCP untuk HostA:"
-
-msgid "This command is to show which networks a given dhcp agent is managing."
-msgstr ""
-"Perintah ini adalah untuk menunjukkan jaringan dimana agen dhcp diberikan "
-"sedang mengelola."
-
-msgid "This command provides no output."
-msgstr "Perintah ini tidak memberikan output."
-
-msgid ""
-"This command requires additional options to successfully launch an instance. "
-"See the `CLI reference `_ for more information."
-msgstr ""
-"Perintah ini memerlukan opsi tambahan untuk berhasil meluncurkan sebuah "
-"instance. Lihat `CLI reference `_ untuk informasi lebih lanjut."
-
-msgid "This command requires other options outside the scope of this content."
-msgstr "Perintah ini membutuhkan opsi lain di luar lingkup konten ini."
-
-msgid ""
-"This example shows how to check the connectivity between networks with "
-"address scopes."
-msgstr ""
-"Contoh ini menunjukkan bagaimana untuk memeriksa konektivitas antara "
-"jaringan dengan lingkup alamat."
-
-msgid ""
-"This example uses again the private network, ``demo-net1`` "
-"(b5b729d8-31cc-4d2c-8284-72b3291fec02) which was created in `Example 1 - "
-"Proof-of-concept`_."
-msgstr ""
-"Contoh ini menggunakan lagi jaringan private, ``demo-net1`` "
-"(b5b729d8-31cc-4d2c-8284-72b3291fec02) yang diciptakan di `Example 1 - Proof-"
-"of-concept`_."
-
-msgid ""
-"This feature is designed to automate the basic networking provisioning for "
-"projects. The steps to provision a basic network are run during instance "
-"boot, making the networking setup hands-free."
-msgstr ""
-"Fitur ini dirancang untuk mengotomatisasi penyediaan jaringan dasar untuk "
-"proyek. Langkah-langkah untuk menyediakan jaringan dasar dijalankan saat "
-"boot, membuat pengaturan jaringan hands-free."
-
-msgid ""
-"This feature is only supported when using the libvirt compute driver, and "
-"the KVM/QEMU hypervisor."
-msgstr ""
-"Fitur ini hanya didukung bila menggunakan libvirt compute driver, dan KVM/"
-"QEMU hypervisor."
-
-msgid ""
-"This following example is not typical of an actual deployment. It is shown "
-"to allow users to experiment with configuring service subnets."
-msgstr ""
-"Ini contoh berikut ini tidak khas dari pengerahan yang nyata. Hal ini "
-"ditunjukkan untuk memungkinkan pengguna untuk bereksperimen dengan "
-"konfigurasi subnet layanan."
-
-msgid ""
-"This generates a prepopulated template with the changes needed to match the "
-"database state with the models."
-msgstr ""
-"Ini menghasilkan prepopulated template dengan perubahan yang diperlukan "
-"untuk mencocokkan keadaan database dengan model."
-
-msgid ""
-"This guide assumes that you are running a Dibbler server on the network node "
-"where the external network bridge exists. If you already have a prefix "
-"delegation capable DHCPv6 server in place, then you can skip the following "
-"section."
-msgstr ""
-"Panduan ini mengasumsikan bahwa Anda menjalankan server Dibbler pada node "
-"jaringan dimana jembatan jaringan eksternal ada. Jika Anda sudah memiliki "
-"server DHCPv6 yang mampu prefix delegation di tempat, maka anda dapat "
-"melewati (skip) bagian berikut."
-
-msgid ""
-"This guide characterizes the L2 reference implementations that currently "
-"exist."
-msgstr "Panduan ini mencirikan implementasi referensi L2 yang saat ini ada."
-
-msgid "This guide documents the OpenStack Ocata release."
-msgstr "Panduan ini mendokumentasikan rilis OpenStack Ocata."
-
-msgid ""
-"This guide targets OpenStack administrators seeking to deploy and manage "
-"OpenStack Networking (neutron)."
-msgstr ""
-"Panduan ini menargetkan administrator OpenStack untuk berusaha mengerahkan "
-"dan mengelola OpenStack Networking (neutron)."
-
-msgid ""
-"This high-availability mechanism configures VRRP using the same priority for "
-"all routers. Therefore, VRRP promotes the backup router with the highest IP "
-"address to the master router."
-msgstr ""
-"Mekanisme high-availability ini mengkonfigurasi VRRP menggunakan prioritas "
-"yang sama untuk semua router. Oleh karena itu, VRRP mempromosikan router "
-"cadangan dengan alamat IP tertinggi ke router utama."
-
-msgid ""
-"This high-availability mechanism is not compatible with the layer-2 "
-"population mechanism. You must disable layer-2 population in the "
-"``linuxbridge_agent.ini`` file and restart the Linux bridge agent on all "
-"existing network and compute nodes prior to deploying the example "
-"configuration."
-msgstr ""
-"Mekanisme high-availability ini tidak kompatibel dengan mekanisme populasi "
-"lapisan-2. Anda harus menonaktifkan populasi lapisan-2 di file "
-"``linuxbridge_agent.ini`` dan restart agen Linux bridge pada semua jaringan "
-"dan komputasi node yang ada sebelum pengerahan konfigurasi contoh."
-
-msgid ""
-"This high-availability mechanism simply augments :ref:`deploy-lb-"
-"selfservice` with failover of layer-3 services to another router if the "
-"master router fails. Thus, you can reference :ref:`Self-service network "
-"traffic flow ` for normal "
-"operation."
-msgstr ""
-"Mekanisme high-availabilityi ini hanya menambah :ref:`deploy-lb-selfservice` "
-"dengan failover layanan lapisan-3 kepada router lain jika router master "
-"gagal. Dengan demikian, Anda dapat referensi :ref:`Self-service network "
-"traffic flow ` untuk operasi "
-"normal."
-
-msgid ""
-"This high-availability mechanism simply augments :ref:`deploy-ovs-"
-"selfservice` with failover of layer-3 services to another router if the "
-"master router fails. Thus, you can reference :ref:`Self-service network "
-"traffic flow ` for normal "
-"operation."
-msgstr ""
-"Mekanisme high-availability ini hanya menambah :ref:`deploy-ovs-selfservice` "
-"dengan failover layanan lapisan-3 kepada router lain jika master router "
-"gagal. Dengan demikian, Anda dapat referensi :ref:`Self-service network "
-"traffic flow ` untuk operasi "
-"normal."
-
-msgid ""
-"This is the equivalent of creating a policy on the network that permits "
-"every project to perform the action ``access_as_shared`` on that network. "
-"Neutron treats them as the same thing, so the policy entry for that network "
-"should be visible using the :command:`openstack network rbac list` command:"
-msgstr ""
-"Ini adalah setara dengan pembuatan kebijakan di jaringan yang mengizinkan "
-"setiap proyek untuk melakukan aksi ``access_as_shared`` pada jaringan itu. "
-"Neutron memperlakukan mereka sebagai hal yang sama, sehingga masuknya "
-"kebijakan untuk jaringan itu harus terlihat dengan menggunakan perintah :"
-"command:`openstack network rbac list`:"
-
-msgid ""
-"This mechanism driver simply changes the virtual network interface driver "
-"for instances. Thus, you can reference the ``Create initial networks`` "
-"content for the prerequisite deployment example."
-msgstr ""
-"Driver mekanisme ini hanya merubah driver antarmuka jaringan virtual untuk "
-"instance. Dengan demikian, Anda dapat referensi konten ``Create initial "
-"networks` untuk contoh pengerahan prasyarat."
-
-msgid ""
-"This mechanism driver simply changes the virtual network interface driver "
-"for instances. Thus, you can reference the ``Verify network operation`` "
-"content for the prerequisite deployment example."
-msgstr ""
-"Driver mekanisme ini hanya merubah driver antarmuka jaringan virtual untuk "
-"instance. Dengan demikian, Anda dapat referensi konten ``Verify network "
-"operation`` untuk contoh pengerahan prasyarat."
-
-msgid ""
-"This mechanism driver simply removes the Linux bridge handling security "
-"groups on the compute nodes. Thus, you can reference the network traffic "
-"flow scenarios for the prerequisite deployment example."
-msgstr ""
-"Driver mekanisme ini hanya menghilangkan jembatan Linux penanganan kelompok "
-"keamanan pada node komputasi. Dengan demikian, Anda dapat referensi skenario "
-"arus lalu lintas jaringan untuk contoh pengerahan prasyarat."
-
-msgid ""
-"This method entails creating a trunk, then adding subports to the trunk "
-"after it has already been created."
-msgstr ""
-"Metode ini memerlukan pembuatan trunk, kemudian menambahkan subports ke "
-"trunk setelah trunk dibuat."
-
-msgid ""
-"This method entails creating the trunk with subports specified at trunk "
-"creation."
-msgstr ""
-"Metode ini memerlukan pembuatan trunk dengan subports yang spesifik pada "
-"pembuatan trunk."
-
-msgid ""
-"This page serves as a guide for how to use the DNS integration functionality "
-"of the Networking service. The functionality described covers DNS from two "
-"points of view:"
-msgstr ""
-"Halaman ini berfungsi sebagai panduan bagaimana menggunakan fungsi integrasi "
-"DNS dari layanan Networking. Fungsionalitas yang dijelaskan meliputi DNS "
-"dari dua sudut pandang:"
-
-msgid ""
-"This page serves as a guide for how to use the OVS with DPDK datapath "
-"functionality available in the Networking service as of the Mitaka release."
-msgstr ""
-"Halaman ini berfungsi sebagai panduan untuk bagaimana menggunakan OVS dengan "
-"DPDK datapath secara fungsinal yang tersedia di layanan Networking sebagai "
-"rilis Mitaka."
-
-msgid ""
-"This process can be repeated any number of times to make a network available "
-"as external to an arbitrary number of projects."
-msgstr ""
-"Proses ini dapat diulang beberapa kali untuk membuat jaringan yang tersedia "
-"sebagai eksternal sampai jumlah proyek berapapun."
-
-msgid ""
-"This process can be repeated any number of times to share a network with an "
-"arbitrary number of projects."
-msgstr ""
-"Proses ini dapat diulang beberapa kali untuk berbagi jaringan dengan jumlah "
-"proyek berapapun."
-
-msgid ""
-"This process can be repeated any number of times to share a qos-policy with "
-"an arbitrary number of projects."
-msgstr ""
-"Proses ini dapat diulang beberapa kali untuk berbagi qos-policy dengan "
-"jumlah proyek berapapun."
-
-msgid ""
-"This scenario places the instances on different compute nodes to show the "
-"most complex situation."
-msgstr ""
-"Skenario ini menempatkan instance pada node komputasi yang berbeda untuk "
-"menunjukkan situasi yang paling kompleks."
-
-msgid ""
-"This section describes how to use the agent management (alias agent) and "
-"scheduler (alias agent_scheduler) extensions for DHCP agents scalability and "
-"HA."
-msgstr ""
-"Bagian ini menjelaskan cara menggunakan manajemen agen (agent alias) dan "
-"ekstensi scheduler (alias agent_scheduler) untuk skalabilitas agen DHCP dan "
-"HA."
-
-msgid ""
-"This section describes the process of migrating clouds based on the legacy "
-"networking model to the OpenStack Networking model. This process requires "
-"additional changes to both compute and networking to support the migration. "
-"This document describes the overall process and the features required in "
-"both Networking and Compute."
-msgstr ""
-"Bagian ini menjelaskan proses migrasi cloud didasarkan pada model legacy "
-"networking ke model OpenStack Networking. Proses ini membutuhkan perubahan "
-"tambahan untuk komputasi maupun jaringan untuk mendukung migrasi. Dokumen "
-"ini menjelaskan proses secara keseluruhan dan fitur yang diperlukan di "
-"Networking maupun Compute."
-
-msgid ""
-"This section describes the process of migrating from a classic router to an "
-"L3 HA router, which is available starting from the Mitaka release."
-msgstr ""
-"Bagian ini menjelaskan proses migrasi dari router klasik ke sebuah router L3 "
-"HA, yang tersedia mulai dari rilis Mitaka."
-
-msgid ""
-"This section explains how to get high availability with the availability "
-"zone for L3 and DHCP. You should naturally set above configuration options "
-"for the availability zone."
-msgstr ""
-"Bagian ini menjelaskan bagaimana untuk mendapatkan ketersediaan tinggi "
-"dengan zona ketersediaan L3 dan DHCP. Anda secara alami harus mengatur opsi "
-"konfigurasi di atas untuk zona ketersediaan."
-
-msgid ""
-"This section illustrates how you can get the Network IP address availability "
-"through the command-line interface."
-msgstr ""
-"Bagian ini menggambarkan bagaimana Anda bisa mendapatkan ketersediaan alamat "
-"IP jaringan melalui command-line interface."
-
-msgid ""
-"This section only contains flow scenarios that benefit from distributed "
-"virtual routing or that differ from conventional operation. For other flow "
-"scenarios, see :ref:`deploy-ovs-selfservice-networktrafficflow`."
-msgstr ""
-"Bagian ini hanya berisi skenario aliran yang menguntungkan dari routing "
-"virtual yang didistribusikan atau yang berbeda dari operasi konvensional. "
-"Untuk skenario aliran lainnya, lihat :ref:`deploy-ovs-selfservice-"
-"networktrafficflow`."
-
-msgid ""
-"This section references parts of :ref:`deploy-lb-ha-vrrp` and :ref:`deploy-"
-"ovs-ha-vrrp`. For details regarding needed infrastructure and configuration "
-"to allow actual L3 HA deployment, read the relevant guide before continuing "
-"with the migration process."
-msgstr ""
-"Bagian ini merujuk bagian dari :ref:`deploy-lb-ha-vrrp` dan :ref:`deploy-ovs-"
-"ha-vrrp`. Untuk rincian mengenai infrastruktur yang diperlukan dan "
-"konfigurasi untuk melancarkan pengerahan L3 HA aktual, silahkan baca buku "
-"yang relevan sebelum melanjutkan dengan proses migrasi."
-
-msgid ""
-"This section shows how non-privileged users can use address scopes to route "
-"straight to an external network without NAT."
-msgstr ""
-"Bagian ini menunjukkan bagaimana pengguna non-hak istimewa dapat menggunakan "
-"lingkup alamat untuk rute langsung ke jaringan eksternal tanpa NAT (Network "
-"Address Translation). "
-
-msgid ""
-"This section shows how to set up shared address scopes to allow simple "
-"routing for project networks with the same subnet pools."
-msgstr ""
-"Bagian ini menunjukkan cara mengatur lingkup alamat bersama untuk "
-"memungkinkan routing sederhana untuk jaringan proyek dengan kolam subnet "
-"yang sama."
-
-msgid ""
-"This step ensures that Dashboard can find the plug-in when it enumerates all "
-"of its available panels."
-msgstr ""
-"Langkah ini memastikan bahwa Dashboard dapat menemukan plug-in ketika "
-"menjumlahkan semua panel yang tersedia."
-
-msgid ""
-"This tells OpenStack Networking to use the prefix delegation mechanism for "
-"subnet allocation when the user does not provide a CIDR or subnet pool id "
-"when creating a subnet."
-msgstr ""
-"Ini memberitahu OpenStack Networking supaya menggunakan mekanisme prefix "
-"delegation untuk alokasi subnet ketika pengguna tidak menyediakan CIDR atau "
-"id kolam subnet saat membuat subnet."
-
-msgid ""
-"This tells the Compute service that all VFs belonging to ``eth3`` are "
-"allowed to be passed through to instances and belong to the provider network "
-"``physnet2``."
-msgstr ""
-"Ini memberitahu layanan Compute dimana semua VF milik ``eth3`` diizinkan "
-"untuk dilewatkan ke instance dan menjadi milik jaringan penyedia "
-"``physnet2``."
-
-msgid "Three instances, one per compute node, each with a floating IP address."
-msgstr ""
-"Tiga instance, salah satu per node komputasi, masing-masing dengan alamat IP "
-"mengambang."
-
-msgid "Three network interfaces: management, overlay, and provider."
-msgstr "Tiga antarmuka jaringan: manajemen, overlay, dan provider."
-
-msgid "Three network interfaces: management, provider, and overlay."
-msgstr "Tiga antarmuka jaringan: manajemen, penyedia (provider), dan overlay."
-
-msgid ""
-"Three routers. Each router connects one self-service network to the provider "
-"network."
-msgstr ""
-"Tiga router. Setiap router menghubungkan satu jaringan self-service ke "
-"jaringan provider."
-
-msgid "Three self-service networks."
-msgstr "Tiga jaringan self-service."
-
-msgid ""
-"Throughout this guide, ``eth3`` is used as the PF and ``physnet2`` is used "
-"as the provider network configured as a VLAN range. These ports may vary in "
-"different environments."
-msgstr ""
-"Dalam panduan ini, ``eth3`` digunakan sebagai PF dan ``physnet2`` digunakan "
-"sebagai jaringan penyedia yang dikonfigurasi sebagai rentang VLAN. Port ini "
-"dapat bervariasi dalam lingkungan yang berbeda."
-
-msgid "To activate the network after it has been deactivated:"
-msgstr "Untuk mengaktifkan jaringan setelah jaringan telah dinonaktifkan:"
-
-msgid "To add a single rule, use the insert-rule operation."
-msgstr "Untuk menambahkan aturan tunggal, gunakan operasi insert-rule."
-
-msgid "To add another DHCP agent to host the network, run this command:"
-msgstr ""
-"Untuk menambahkan agen DHCP lain supaya menjadi host jaringan, jalankan "
-"perintah ini:"
-
-msgid "To add multiple rules, use the update operation."
-msgstr ""
-"Untuk menambahkan beberapa aturan (multiple rule), gunakan operasi update."
-
-msgid ""
-"To allow the neutron-pd-agent to communicate with prefix delegation servers, "
-"you must set which network interface to use for external communication. In "
-"DevStack the default for this is ``br-ex``:"
-msgstr ""
-"Untuk memungkinkan neutron-pd-agen untuk berkomunikasi dengan server prefix "
-"delegation, Anda harus mengatur jaringan yang antarmukanya digunakan untuk "
-"komunikasi eksternal. Dalam DevStack default untuk ini adalah ``br-ex``:"
-
-msgid "To apply the expansion migration rules, use the following command:"
-msgstr "Untuk menerapkan aturan migrasi ekspansi, gunakan perintah berikut:"
-
-msgid "To apply the non-expansive migration rules, use the following command:"
-msgstr ""
-"Untuk menerapkan aturan migrasi non-expansive, gunakan perintah berikut:"
-
-msgid ""
-"To calculate the network number of an IP address, you must know the "
-"*netmask* associated with the address. A netmask indicates how many of the "
-"bits in the 32-bit IP address make up the network number."
-msgstr ""
-"Untuk menghitung jumlah jaringan alamat IP, Anda harus tahu *netmask * "
-"terkait dengan alamat. Sebuah netmask menunjukkan berapa banyak bit dalam "
-"alamat IP 32-bit yang menyusun jumlah jaringan."
-
-msgid ""
-"To check if any contract migrations are pending and therefore if offline "
-"migration is required, use the following command:"
-msgstr ""
-"Untuk memeriksa apakah ada migrasi kontrak yang tertunda dan oleh karena itu "
-"jika migrasi secara offline diperlukan, gunakan perintah berikut:"
-
-msgid ""
-"To configure a driver other than the reference driver, specify it in the "
-"``neutron.conf`` file. Do this after the migration is complete. There is no "
-"need to specify any value if you wish to use the reference driver."
-msgstr ""
-"Untuk mengkonfigurasi driver selain driver referensi, menetapkannya dalam "
-"file ``neutron.conf``. Lakukan ini setelah migrasi selesai. Tidak perlu "
-"untuk menentukan nilai apapun jika Anda ingin menggunakan driver referensi."
-
-msgid ""
-"To configure rich network topologies, you can create and configure networks "
-"and subnets and instruct other OpenStack services like Compute to attach "
-"virtual devices to ports on these networks. OpenStack Compute is a prominent "
-"consumer of OpenStack Networking to provide connectivity for its instances. "
-"In particular, OpenStack Networking supports each project having multiple "
-"private networks and enables projects to choose their own IP addressing "
-"scheme, even if those IP addresses overlap with those that other projects "
-"use. There are two types of network, project and provider networks. It is "
-"possible to share any of these types of networks among projects as part of "
-"the network creation process."
-msgstr ""
-"Untuk mengkonfigurasi topologi jaringan yang beragam, Anda dapat membuat dan "
-"mengkonfigurasi jaringan dan subnet dan menginstruksikan layanan OpenStack "
-"lain seperti Compute untuk menghubungkan perangkat virtual ke port pada "
-"jaringan ini. OpenStack Compute adalah konsumen yang menonjol dari OpenStack "
-"Networking untuk menyediakan konektivitas untuk instances nya. Secara "
-"khusus, OpenStack Networking mendukung setiap proyek yang memiliki beberapa "
-"jaringan private dan mengaktifkan proyek untuk memilih skema pengalamatan IP "
-"mereka sendiri, bahkan jika alamat IP proyek mereka terjadi tumpang tindih "
-"dengan proyek lain menggunakan. Ada dua jenis jaringan, jaringan proyek dan "
-"jaringan provider. Hal ini dimungkinkan untuk berbagi salah satu jenis "
-"jaringan antara proyek-proyek sebagai bagian dari proses pembuatan jaringan."
-
-msgid "To confirm the agent's availability zone:"
-msgstr "Untuk mengkonfirmasi zona ketersediaan agen:"
-
-msgid "To confirm the availability zone defined by the system:"
-msgstr "Untuk mengkonfirmasi zona ketersediaan yang didefinisikan oleh sistem:"
-
-msgid ""
-"To create a VLAN network instead of a flat network, change ``--provider:"
-"network_type flat`` to ``--provider-network-type vlan`` and add ``--provider-"
-"segment`` with a value referencing the VLAN ID."
-msgstr ""
-"Untuk membuat jaringan VLAN bukannya jaringan datar, rubahlah ``--provider:"
-"network_type flat`` ke ``--provider-network-type vlan`` and add ``--provider-"
-"segment`` dengan nilai mereferensi VLAN ID."
-
-msgid "To deactivate the libvirt network named ``default``:"
-msgstr "Untuk menonaktifkan jaringan libvirt bernama ``default``:"
-
-msgid "To enable DSCP marking rule:"
-msgstr "Mengaktifkan aturan DSCP marking:"
-
-msgid ""
-"To enable advertising IPv6 prefixes, create an address scope with "
-"``ip_version=6`` and a BGP speaker with ``ip_version=6``."
-msgstr ""
-"Untuk mengaktifkan penyiaran update dan perubahan awalan IPv6, buatlah "
-"lingkup alamat dengan ``ip_version=6`` dan BGP speaker dengan "
-"``ip_version=6``."
-
-msgid "To enable bandwidth limit rule:"
-msgstr "Untuk mengaktifkan aturan batas bandwidth:"
-
-msgid ""
-"To enable mechanism drivers in the ML2 plug-in, edit the ``/etc/neutron/"
-"plugins/ml2/ml2_conf.ini`` file on the neutron server:"
-msgstr ""
-"Untuk mengaktifkan driver mekanisme di ML2 plug-in, edit file ``/etc/neutron/"
-"plugins/ml2/ml2_conf.ini`` di server neutron:"
-
-msgid ""
-"To enable peering via IPv6, create a BGP peer and use an IPv6 address for "
-"``peer_ip``."
-msgstr ""
-"Untuk mengaktifkan peering (kebersamaan) melalui IPv6, membuat rekan (peer) "
-"BGP dan menggunakan alamat IPv6 untuk ``peer_ip``."
-
-msgid ""
-"To enable the driver for the dhcpv6_pd_agent, set pd_dhcp_driver to this in "
-"``/etc/neutron/neutron.conf``:"
-msgstr ""
-"Untuk mengaktifkan driver untuk ini dhcpv6_pd_agent, set pd_dhcp_driver "
-"dalam ``/etc/neutron/neutron.conf``:"
-
-msgid "To enable the service, follow the steps below:"
-msgstr "Untuk mengaktifkan layanan ini, ikuti langkah-langkah di bawah ini:"
-
-msgid "To enable this feature, edit the ``l3_agent.ini`` file:"
-msgstr "Untuk mengaktifkan fitur ini, edit file ``l3_agent.ini``:"
-
-msgid ""
-"To enable type drivers in the ML2 plug-in. Edit the ``/etc/neutron/plugins/"
-"ml2/ml2_conf.ini`` file:"
-msgstr ""
-"Untuk mengaktifkan driver tipe dalam ML2 plug-in. Edit file ``/etc/neutron/"
-"plugins/ml2/ml2_conf.ini``:"
-
-msgid "To experiment, you need VMs and a neutron network:"
-msgstr "Untuk bereksperimen, Anda perlu VM dan jaringan neutron:"
-
-msgid ""
-"To find the panel, click on :guilabel:`Project` in Dashboard, then click "
-"the :guilabel:`Network` drop-down menu and select :guilabel:`Load Balancers`."
-msgstr ""
-"Untuk menemukan panel, klik pada :guilabel:`Project` di Dashboard, lalu klik "
-"drop-down menu :guilabel:`Network` dan pilih :guilabel:`Load Balancers`."
-
-msgid ""
-"To generate a script of the command instead of operating immediately on the "
-"database, use the following command:"
-msgstr ""
-"Untuk menghasilkan script perintah sebagai ganti operasi segera pada "
-"database, gunakan perintah berikut:"
-
-msgid ""
-"To make a network available as an external network for specific projects "
-"rather than all projects, use the ``access_as_external`` action."
-msgstr ""
-"Untuk membuat sebuah jaringan tersedia sebagai jaringan eksternal untuk "
-"proyek-proyek tertentu bukan untuk semua proyek, gunakan aksi "
-"``access_as_external``."
-
-msgid ""
-"To make this possible, provide a default external network and default "
-"subnetpools (one for IPv4, or one for IPv6, or one of each) so that the "
-"Networking service can choose what to do in lieu of input. Once these are in "
-"place, users can boot their VMs without specifying any networking details. "
-"The Compute service will then use this feature automatically to wire user "
-"VMs."
-msgstr ""
-"Untuk membuat ini memungkinkan, berikan jaringan eksternal default dan "
-"subnetpool standar (satu untuk IPv4, atau satu untuk IPv6, atau satu dari "
-"masing-masing) sehingga layanan Networking dapat memilih apa yang harus "
-"dilakukan sebagai pengganti masukan. Setelah ini di tempat, pengguna dapat "
-"boot VM mereka tanpa menentukan rincian jaringan. Layanan Compute kemudian "
-"akan menggunakan fitur ini secara otomatis ke wire user VM."
-
-msgid ""
-"To migrate between specific migration versions, use the following command:"
-msgstr ""
-"Untuk bermigrasi antara versi migrasi tertentu, gunakan perintah berikut:"
-
-msgid "To prevent the network from automatically starting on boot:"
-msgstr "Untuk mencegah jaringan dari permulaan secara otomatis saat boot:"
-
-msgid ""
-"To provide external network access to your instances, your Dibbler server "
-"also needs to create new routes for each delegated prefix. This is done "
-"using the script file named in the config file above. Edit the ``/var/lib/"
-"dibbler/pd-server.sh`` file:"
-msgstr ""
-"Untuk menyediakan akses jaringan eksternal untuk instance Anda, server "
-"Dibbler Anda juga perlu membuat rute baru untuk setiap awalan yang "
-"didelegasikan. Hal ini dilakukan dengan menggunakan file script di dalam "
-"file konfigurasi di atas. Edit file ``/var/lib/dibbler/pd-server.sh``:"
-
-msgid ""
-"To reduce clutter, this guide removes command output without relevance to "
-"the particular action."
-msgstr ""
-"Untuk mengurangi kekacauan (clutter), panduan ini menghilangkan output "
-"perintah tanpa relevansi dengan tindakan tertentu."
-
-msgid ""
-"To reduce the number of ARP requests, operating systems maintain an ARP "
-"cache that contains the mappings of IP addresses to MAC address. On a Linux "
-"machine, you can view the contents of the ARP cache by using the :command:"
-"`arp` command:"
-msgstr ""
-"Untuk mengurangi jumlah permintaan ARP, sistem operasi mempertahankan cache "
-"ARP yang berisi pemetaan dari alamat IP ke alamat MAC. Pada mesin Linux, "
-"Anda dapat melihat isi dari cache ARP dengan menggunakan perintah :command:"
-"`arp`:"
-
-msgid ""
-"To remove access for that project, delete the RBAC policy that allows it "
-"using the :command:`openstack network rbac delete` command:"
-msgstr ""
-"Untuk menghapus akses untuk proyek itu, hapuslah kebijakan RBAC yang "
-"mengizinkan penggunaan perintah :command:`openstack network rbac delete`:"
-
-msgid ""
-"To remove access for that project, delete the policy that allows it using "
-"the :command:`openstack network rbac delete` command:"
-msgstr ""
-"Untuk menghapus akses untuk proyek itu, hapus kebijakan yang mengizinkan "
-"penggunaan perintah :command:`openstack network rbac delete`:"
-
-msgid ""
-"To request the list of networks that do not have at least one of a list of "
-"tags, the ``not-tags-any`` argument should be set to the list of tags, "
-"separated by commas. In this case, only the networks that do not have at "
-"least one of the given tags will be included in the query result. Example "
-"that returns the networks that do not have the \"red\" tag, or do not have "
-"the \"blue\" tag::"
-msgstr ""
-"Untuk meminta daftar jaringan yang tidak memiliki setidaknya satu dari "
-"daftar tag, argumen ``not-tags-any`` harus ditetapkan ke daftar tag, "
-"dipisahkan dengan koma. Dalam hal ini, hanya jaringan yang tidak memiliki "
-"setidaknya salah satu tag yang diberikan akan dimasukkan dalam hasil query. "
-"argumen yang mengembalikan jaringan yang tidak memiliki tag \"merah\", atau "
-"tidak memiliki tag \"biru\" ::"
-
-msgid ""
-"To request the list of networks that do not have one or more tags, the ``not-"
-"tags`` argument should be set to the list of tags, separated by commas. In "
-"this case, only the networks that do not have any of the given tags will be "
-"included in the query results. Example that returns the networks that do not "
-"have either \"red\" or \"blue\" tag::"
-msgstr ""
-"Untuk meminta daftar jaringan yang tidak memiliki satu atau lebih tag, "
-"argumen ``not-tags`` harus ditetapkan ke daftar tag, dipisahkan dengan koma. "
-"Dalam hal ini, hanya jaringan yang tidak memiliki tag yang diberikan akan "
-"dimasukkan dalam hasil query. Contoh argumen mengembalikan jaringan yang "
-"tidak memiliki tag \"merah\" ataupun \"biru\" ::"
-
-msgid ""
-"To request the list of networks that have a single tag, ``tags`` argument "
-"should be set to the desired tag name. Example::"
-msgstr ""
-"Untuk meminta daftar jaringan yang memiliki tag tunggal, argumen ``tags`` "
-"harus ditetapkan dengan nama tag yang diinginkan. Contoh::"
-
-msgid ""
-"To request the list of networks that have one or more of a list of given "
-"tags, the ``tags-any`` argument should be set to the list of tags, separated "
-"by commas. In this case, as long as one of the given tags is present, the "
-"network will be included in the query result. Example that returns the "
-"networks that have the \"red\" or the \"blue\" tag::"
-msgstr ""
-"Untuk meminta daftar jaringan yang memiliki satu atau lebih dari daftar tag "
-"yang diberikan, argumen ``tag-any`` harus ditetapkan ke daftar tag, "
-"dipisahkan dengan koma. Dalam hal ini, selama salah satu tag yang diberikan "
-"hadir, jaringan akan dimasukkan dalam hasil query. Contoh argumen "
-"mengembalikan jaringan yang memiliki tag \"red\" atau \"blue\" ::"
-
-msgid ""
-"To request the list of networks that have two or more tags, the ``tags`` "
-"argument should be set to the list of tags, separated by commas. In this "
-"case, the tags given must all be present for a network to be included in the "
-"query result. Example that returns networks that have the \"red\" and \"blue"
-"\" tags::"
-msgstr ""
-"Untuk meminta daftar jaringan yang memiliki dua atau lebih tag, argumen "
-"``tags`` harus ditetapkan ke daftar tag, dipisahkan dengan koma. Dalam hal "
-"ini, tag yang diberikan harus semua hadir untuk jaringan untuk dimasukkan "
-"dalam hasil query. Contoh argumen mengembalikan jaringan yang memiliki tag "
-"\"red\" and \"blue\"::"
-
-msgid ""
-"To return to classic mode, turn down the router again, turning off L3 HA and "
-"starting the router again."
-msgstr ""
-"Untuk kembali ke modus klasik, kecilkan lagi router, matikan L3 HA dan mulai "
-"router lagi."
-
-msgid ""
-"To see if your cloud has this feature available, you can check that it is "
-"listed in the supported aliases. You can do this with the OpenStack client."
-msgstr ""
-"Untuk melihat apakah cloud Anda memiliki fitur ini tersedia, Anda dapat "
-"memeriksa dimana fitur tercantum dalam alias didukung. Anda dapat melakukan "
-"ini dengan klien OpenStack."
-
-msgid "To start your Dibbler server, run:"
-msgstr "Untuk memulai server Dibbler Anda, jalankan:"
-
-msgid ""
-"To support integration with the deployment examples, this content configures "
-"the Macvtap mechanism driver to use the overlay network for untagged (flat) "
-"or tagged (VLAN) networks in addition to overlay networks such as VXLAN. "
-"Your physical network infrastructure must support VLAN (802.1q) tagging on "
-"the overlay network."
-msgstr ""
-"Untuk mendukung integrasi dengan contoh pengerahan, konten ini "
-"mengkonfigurasi driver mekanisme Macvtap untuk menggunakan jaringan overlay "
-"untuk jaringan untagged (flat) atau tag (VLAN) dan juga jaringan overlay "
-"seperti VXLAN. Infrastruktur jaringan fisik Anda harus mendukung VLAN "
-"(802.1q) tagging pada jaringan overlay."
-
-msgid "To test the HA of DHCP agent:"
-msgstr "Untuk menguji HA agen DHCP:"
-
-msgid ""
-"To trigger the prefix delegation process, create a router interface between "
-"this subnet and a router with an active interface on the external network:"
-msgstr ""
-"Untuk memicu proses prefix delegation, buat antarmuka router antara subnet "
-"ini dan router dengan interface yang aktif pada jaringan eksternal:"
-
-msgid ""
-"To understand how VLANs work, let's consider VLAN applications in a "
-"traditional IT environment, where physical hosts are attached to a physical "
-"switch, and no virtualization is involved. Imagine a scenario where you want "
-"three isolated networks but you only have a single physical switch. The "
-"network administrator would choose three VLAN IDs, for example, 10, 11, and "
-"12, and would configure the switch to associate switchports with VLAN IDs. "
-"For example, switchport 2 might be associated with VLAN 10, switchport 3 "
-"might be associated with VLAN 11, and so forth. When a switchport is "
-"configured for a specific VLAN, it is called an *access port*. The switch is "
-"responsible for ensuring that the network traffic is isolated across the "
-"VLANs."
-msgstr ""
-"Untuk memahami bagaimana VLAN bekerja, mari kita pertimbangkan aplikasi VLAN "
-"dalam lingkungan TI tradisional, dimana host fisik yang melekat pada switch "
-"fisik, dan tidak ada virtualisasi yang terlibat. Bayangkan sebuah skenario "
-"dimana Anda ingin tiga jaringan terisolasi tetapi Anda hanya memiliki switch "
-"fisik tunggal. Administrator jaringan akan memilih tiga ID VLAN, misalnya, "
-"10, 11, dan 12, dan akan mengkonfigurasi switch untuk mengasosiasikan "
-"switchport dengan VLAN ID. Misalnya, switchport 2 mungkin terkait dengan "
-"VLAN 10, switchport 3 mungkin terkait dengan VLAN 11, dan sebagainya. Ketika "
-"switchport dikonfigurasi untuk VLAN tertentu, hal itu disebut *access port*. "
-"Switch bertanggung jawab untuk memastikan bahwa lalu lintas jaringan "
-"terisolasi di seluruh VLAN."
-
-msgid "To upgrade the database incrementally, use the following command:"
-msgstr "Untuk meng-upgrade database secara bertahap, gunakan perintah berikut:"
-
-msgid ""
-"To use this feature, the neutron service must have the following extensions "
-"enabled:"
-msgstr ""
-"Untuk menggunakan fitur ini, layanan neutron harus memiliki ekstensi yang "
-"diaktifkan berikut:"
-
-msgid ""
-"To use this feature, you need a prefix delegation capable DHCPv6 server that "
-"is reachable from your OpenStack Networking node(s). This could be software "
-"running on the OpenStack Networking node(s) or elsewhere, or a physical "
-"router. For the purposes of this guide we are using the open-source DHCPv6 "
-"server, Dibbler. Dibbler is available in many Linux package managers, or "
-"from source at `tomaszmrugalski/dibbler `_."
-msgstr ""
-"Untuk menggunakan fitur ini, Anda perlu delegasi awalan (prefix delegation) "
-"yang mampu server DHCPv6 yang dicapai dari node OpenStack Networking Anda. "
-"Ini bisa menjadi perangkat lunak yang berjalan pada node OpenStack "
-"Networking atau di tempat lain, atau router fisik. Untuk tujuan panduan ini "
-"kita menggunakan Dibbler, open source Server DHCPv6. Dibbler tersedia di "
-"banyak manajer paket Linux, atau dari sumber di `tomaszmrugalski/dibbler "
-"`_."
-
-msgid ""
-"To validate that the required resources are correctly set up for auto-"
-"allocation, without actually provisioning anything, use the ``--check-"
-"resources`` option:"
-msgstr ""
-"Untuk memvalidasi bahwa sumber daya yang dibutuhkan ditetapkan dengan benar "
-"untuk alokasi otomatis, tanpa benar-benar menyediakan apapun, gunakan opsi "
-"``--check-resources``:"
-
-msgid "To view the defined libvirt networks and their state:"
-msgstr "Untuk melihat jaringan libvirt yang didefinisikan dan keadaan mereka:"
-
-msgid "Trunk states"
-msgstr "Status Trunk"
-
-msgid "Trunking"
-msgstr "Trunking"
-
-msgid ""
-"Trunking is used to connect between different switches. Each trunk uses a "
-"tag to identify which VLAN is in use. This ensures that switches on the same "
-"VLAN can communicate."
-msgstr ""
-"Trunking digunakan untuk menghubungkan antara switch yang berbeda. Setiap "
-"trunk menggunakan tag untuk mengidentifikasi VLAN yang sedang digunakan. Hal "
-"ini memastikan bahwa switch pada VLAN yang sama dapat berkomunikasi."
-
-msgid "Trusted projects policy.json configuration"
-msgstr "Konfigurasi policy.json proyek terpercaya"
-
-msgid ""
-"Tunneling is a mechanism that makes transfer of payloads feasible over an "
-"incompatible delivery network. It allows the network user to gain access to "
-"denied or insecure networks. Data encryption may be employed to transport "
-"the payload, ensuring that the encapsulated user network data appears as "
-"public even though it is private and can easily pass the conflicting network."
-msgstr ""
-"Tunneling (terowongan) adalah sebuah mekanisme yang membuat pemidahan "
-"payload feasible melalui jaringan pengiriman tidak kompatibel. Tunneling ini "
-"mengizinkan pengguna jaringan untuk mendapatkan akses ke jaringan tidak aman "
-"atau ditolak. Enkripsi data dapat digunakan untuk mengangkut payload, "
-"memastikan bahwa jaringan data pengguna yang dikemas akan muncul sebagai "
-"publik meskipun sebenarnya private, dan dapat dengan mudah melewati jaringan "
-"yang saling bertentangan."
-
-msgid "Two compute nodes with the following components:"
-msgstr "Dua node komputasi dengan komponen-komponen berikut:"
-
-msgid "Two network interfaces: management and provider."
-msgstr "Dua antarmuka jaringan: manajemen dan provider."
-
-msgid ""
-"Two networking models exist in OpenStack. The first is called legacy "
-"networking (:term:`nova-network`) and it is a sub-process embedded in the "
-"Compute project (nova). This model has some limitations, such as creating "
-"complex network topologies, extending its back-end implementation to vendor-"
-"specific technologies, and providing project-specific networking elements. "
-"These limitations are the main reasons the OpenStack Networking (neutron) "
-"model was created."
-msgstr ""
-"Dua model jaringan ada di OpenStack. Yang pertama disebut legacy networking "
-"(:term:`nova-network`) dan itu adalah sub-process tertanam dalam proyek "
-"Compute (nova). Model ini memiliki beberapa keterbatasan, seperti dalam "
-"pembuatan topologi jaringan yang kompleks, peluasan implementasi back-end "
-"untuk teknologi vendor-specific, dan penyediaan elemen jaringan project-"
-"specific. Keterbatasan ini adalah alasan utama model OpenStack Networking "
-"(neutron) dibuat."
-
-msgid "Type drivers"
-msgstr "Tipe driver"
-
-msgid ""
-"Typically, one uses this mechanism to delete networking resources for a "
-"defunct project regardless of its existence in the Identity service."
-msgstr ""
-"Biasanya, orang menggunakan mekanisme ini untuk menghapus sumber daya "
-"jaringan untuk proyek mati (defunct) terlepas dari keberadaannya dalam "
-"layanan Identity."
-
-msgid ""
-"UDP has support for one-to-many communication: sending a single packet to "
-"multiple hosts. An application can broadcast a UDP packet to all of the "
-"network hosts on a local network by setting the receiver IP address as the "
-"special IP broadcast address ``255.255.255.255``. An application can also "
-"send a UDP packet to a set of receivers using *IP multicast*. The intended "
-"receiver applications join a multicast group by binding a UDP socket to a "
-"special IP address that is one of the valid multicast group addresses. The "
-"receiving hosts do not have to be on the same local network as the sender, "
-"but the intervening routers must be configured to support IP multicast "
-"routing. VXLAN is an example of a UDP-based protocol that uses IP multicast."
-msgstr ""
-"UDP memiliki dukungan untuk komunikasi one-to-many: mengirimkan paket ke "
-"beberapa host. Sebuah aplikasi dapat menyiarkan paket UDP ke semua host "
-"jaringan pada jaringan lokal dengan menetapkan alamat IP penerima sebagai "
-"alamat broadcast IP khusus ``255.255.255.255``. Sebuah aplikasi juga dapat "
-"mengirim paket UDP ke satu set receiver menggunakan *IP multicast *. "
-"Aplikasi penerima yang dimaksudkan akan menggabungkan kelompok multicast "
-"yang terikat soket UDP ke alamat IP khusus yang merupakan salah satu alamat "
-"grup multicast yang valid. Host menerima tidak harus berada di jaringan "
-"lokal yang sama sebagai pengirim, tetapi router intervensi harus "
-"dikonfigurasi untuk mendukung IP multicast routing. VXLAN adalah contoh dari "
-"protokol berbasis UDP yang menggunakan IP multicast."
-
-msgid ""
-"UDP, like TCP, uses the notion of ports to distinguish between different "
-"applications running on the same system. Note, however, that operating "
-"systems treat UDP ports separately from TCP ports. For example, it is "
-"possible for one application to be associated with TCP port 16543 and a "
-"separate application to be associated with UDP port 16543."
-msgstr ""
-"UDP, seperti TCP, menggunakan gagasan (notion) port untuk membedakan antara "
-"aplikasi yang berbeda berjalan pada sistem yang sama. Catatan, bagaimanapun, "
-"bahwa sistem operasi memperlakukan port UDP secara terpisah dari port TCP. "
-"Sebagai contoh, ada kemungkinan satu aplikasi untuk dihubungkan dengan TCP "
-"port 16543 dan aplikasi terpisah untuk dihubungkan dengan port UDP 16.543."
-
-msgid "Unique physical network name"
-msgstr "Unique physical network name (nama jaringan fisik unik)"
-
-msgid ""
-"Unlike conventional provider networks, a DHCP agent cannot support more than "
-"one segment within a network. The operator must deploy at least one DHCP "
-"agent per segment. Consider deploying DHCP agents on compute nodes "
-"containing the segments rather than one or more network nodes to reduce node "
-"count."
-msgstr ""
-"Tidak seperti jaringan penyedia konvensional, agen DHCP tidak dapat "
-"mendukung lebih dari satu segmen dalam jaringan. Operator harus menggunakan "
-"setidaknya satu agen DHCP per segmen. Pertimbangkan penggelaran agen DHCP "
-"pada node komputasi yang berisi segmen daripada satu atau lebih node "
-"jaringan untuk mengurangi jumlah node."
-
-msgid ""
-"Unlike most agents, BGP speakers require manual scheduling to an agent. BGP "
-"speakers only form peering sessions and begin prefix advertisement after "
-"scheduling to an agent. Schedule the BGP speaker to agent "
-"``37729181-2224-48d8-89ef-16eca8e2f77e``."
-msgstr ""
-"Tidak seperti kebanyakan agen, speaker BGP memerlukan penjadwalan manual "
-"untuk agen. Speaker BGP hanya membentuk sesi kebersamaan dan mulai "
-"penyiaran update dan perubahan awal prefix setelah penjadwalan untuk agen. "
-"Jadwalkan speaker BGP untuk agen ``37729181-2224-48d8-89ef-16eca8e2f77e``."
-
-msgid "Unlimited address overlap is allowed."
-msgstr "Tak terbatas tumpang tindih alamat diperbolehkan."
-
-msgid "Update a port chain or port pair group"
-msgstr "Memperbarui rantai port atau kelompok pasangan port"
-
-msgid "Update the DHCP configuration file ``/etc/neutron/dhcp_agent.ini``:"
-msgstr "Update file konfigurasi DHCP ``/etc/neutron/dhcp_agent.ini``:"
-
-msgid "Update the nova configuration file ``/etc/nova/nova.conf``:"
-msgstr "Update file konfigurasi nova ``/etc/nova/nova.conf``:"
-
-msgid ""
-"Update the plug-in configuration file ``/etc/neutron/plugins/linuxbridge/"
-"linuxbridge_conf.ini``:"
-msgstr ""
-"Update file konfigurasi plug-in ``/etc/neutron/plugins/linuxbridge/"
-"linuxbridge_conf.ini``:"
-
-msgid ""
-"Update the provider network to support external connectivity for self-"
-"service networks."
-msgstr ""
-"Lakukan update jaringan provider untuk mendukung konektivitas eksternal "
-"untuk jaringan self-service."
-
-msgid ""
-"Update the security group to allow traffic to reach the new load balancer. "
-"Create a new security group along with ingress rules to allow traffic into "
-"the new load balancer. The neutron port for the load balancer is shown as "
-"``vip_port_id`` above."
-msgstr ""
-"Update grup keamanan untuk mengizinkan lalu lintas untuk mencapai "
-"penyeimbang beban baru. Buat grup keamanan baru bersama dengan aturan "
-"masuknya untuk mengizinkan lalu lintas ke penyeimbang beban baru. Port "
-"neutron untuk penyeimbang beban ditampilkan sebagai ``vip_port_id`` di atas."
-
-msgid "Uplink the router on an external network"
-msgstr "Uplink router pada jaringan eksternal"
-
-msgid "Usage"
-msgstr "Usage (penggunaan)"
-
-msgid "Use InfiniBand enabled network adapters."
-msgstr "Gunakan InfiniBand yang diaktifkan adapter jaringan."
-
-msgid ""
-"Use ``availability_zone_hints`` to specify the zone in which the resource is "
-"hosted:"
-msgstr ""
-"Gunakan ``availability_zone_hints`` untuk menentukan zona dimana sumber "
-"akan disimpan (hosted):"
-
-msgid "Use case"
-msgstr "Use case"
-
-msgid "Use case 1: Ports are published directly in the external DNS service"
-msgstr ""
-"Use case 1: Port yang diterbitkan secara langsung dalam layanan DNS eksternal"
-
-msgid ""
-"Use case 2: Floating IPs are published with associated port DNS attributes"
-msgstr "Use case 2: IP mengambang diterbitkan dengan atribut DNS port terkait"
-
-msgid "Use case 3: Floating IPs are published in the external DNS service"
-msgstr "Use case 3: IP mengambang diterbitkan dalam layanan DNS eksternal"
-
-msgid "Use cases"
-msgstr "Use case (kasus penggunaan)"
-
-msgid ""
-"Use of ``macvtap`` is arbitrary. Only the self-service deployment examples "
-"require VLAN ID ranges. Replace ``VLAN_ID_START`` and ``VLAN_ID_END`` with "
-"appropriate numerical values."
-msgstr ""
-"Penggunaan ``macvtap`` ini sembarang. Hanya contoh pengerahan self-service "
-"memerlukan rentang ID VLAN. Gantikan ``VLAN_ID_START`` dan ``VLAN_ID_END`` "
-"dengan nilai-nilai numerik yang sesuai."
-
-msgid ""
-"Use the :command:`neutron port-chain-update` command to dynamically add or "
-"remove port pair groups or flow classifiers on a port chain."
-msgstr ""
-"Gunakan perintah :command:`neutron port-chain-update` untuk secara dinamis "
-"menambahkan atau menghapus kelompok pasangan port atau pengklasifikasi "
-"aliran pada rantai port."
-
-msgid ""
-"Use the :command:`neutron port-pair-group-update` command to perform dynamic "
-"scale-out or scale-in operations by adding or removing port pairs on a port "
-"pair group."
-msgstr ""
-"Gunakan perintah :command:`neutron port-pair-group-update` untuk melakukan "
-"scale-out dinamis atau operasi scale-in dengan menambahkan atau menghapus "
-"pasang port pada kelompok pasangan port."
-
-msgid "Use the :command:`neutron rbac-show` command to see the details:"
-msgstr "Gunakan perintah :command:`neutron rbac-show`, lihat rincian:"
-
-msgid ""
-"Use the :command:`openstack extension list` command to check if these "
-"extensions are enabled. Check ``agent`` and ``agent_scheduler`` are included "
-"in the output."
-msgstr ""
-"Gunakan perintah :command:`openstack extension list` untuk memeriksa apakah "
-"ekstensi ini diaktifkan. Periksa ``agent`` dan `` agent_scheduler`` "
-"disertakan dalam output."
-
-msgid ""
-"Use the command :command:`openstack extension list --network` to verify that "
-"the ``Trunk Extension`` and ``Trunk port details`` extensions are enabled."
-msgstr ""
-"Gunakan perintah :command:`openstack extension list --network` untuk "
-"memverifikasi dimana ekstensi ``Trunk Extension`` dan ``Trunk port "
-"details`` diaktifkan."
-
-msgid ""
-"Use the following example configuration as a template to add support for "
-"high-availability using DVR to an existing operational environment that "
-"supports self-service networks."
-msgstr ""
-"Menggunakan contoh konfigurasi berikut sebagai template untuk menambahkan "
-"dukungan untuk ketersediaan tinggi menggunakan DVR di lingkungan operasional "
-"yang ada yang mendukung jaringan self-service."
-
-msgid ""
-"Use the following example configuration as a template to add support for "
-"high-availability using VRRP to an existing operational environment that "
-"supports self-service networks."
-msgstr ""
-"Gunakan contoh konfigurasi berikut sebagai template untuk menambahkan "
-"dukungan high-availability penggunaan VRRP untuk lingkungan operasional yang "
-"ada yang mendukung jaringan self-service."
-
-msgid ""
-"Use the following example configuration as a template to add support for "
-"self-service networks to an existing operational environment that supports "
-"provider networks."
-msgstr ""
-"Gunakan contoh konfigurasi berikut sebagai template untuk menambahkan "
-"dukungan untuk jaringan self-service untuk lingkungan operasional yang ada "
-"yang mendukung jaringan provider."
-
-msgid ""
-"Use the following example configuration as a template to add support for the "
-"Macvtap mechanism driver to an existing operational environment."
-msgstr ""
-"Gunakan konfigurasi contoh berikut sebagai template untuk menambahkan "
-"dukungan driver mekanisme Macvtap din lingkungan operasional yang ada."
-
-msgid ""
-"Use the following example configuration as a template to deploy provider "
-"networks in your environment."
-msgstr ""
-"Gunakan konfigurasi contoh berikut sebagai template untuk menggunakan "
-"jaringan provider di lingkungan Anda."
-
-msgid "Use the previous commands to assign the network to agents."
-msgstr "Gunakan perintah sebelumnya untuk menetapkan jaringan untuk agen."
-
-msgid ""
-"Use this script to remove empty bridges on compute nodes by running the "
-"following command:"
-msgstr ""
-"Menggunakan skrip ini untuk menghapus jembatan kosong pada node komputasi "
-"dengan menjalankan perintah berikut:"
-
-msgid "User workflow"
-msgstr "Alur kerja pengguna"
-
-msgid ""
-"Users are encouraged to take these tools, test them, provide feedback, and "
-"then expand on the feature set to suit their own deployments; deployers that "
-"refrain from participating in this process intending to wait for a path that "
-"better suits their use case are likely to be disappointed."
-msgstr ""
-"Pengguna didorong untuk mengambil alat-alat ini, menguji alat mereka, "
-"memberikan umpan balik, dan kemudian memperluas pada pengaturan fitur yang "
-"sesuai dengan deployment mereka sendiri; deployers yang menahan diri dari "
-"berpartisipasi dalam proses ini berniat untuk menunggu jalan yang lebih baik "
-"sesuai dengan use case mereka mungkin akan kecewa."
-
-msgid ""
-"Users can also integrate the Networking and Compute services with an "
-"external DNS. To accomplish this, the users have to:"
-msgstr ""
-"Pengguna juga dapat mengintegrasikan layanan Networking dan Compute dengan "
-"DNS eksternal. Untuk mencapai hal ini, pengguna harus:"
-
-msgid ""
-"Users can control the behavior of the Networking service in regards to DNS "
-"using two attributes associated with ports, networks, and floating IPs. The "
-"following table shows the attributes available for each one of these "
-"resources:"
-msgstr ""
-"Pengguna dapat mengontrol perilaku layanan Networking dalam hal DNS "
-"menggunakan dua atribut yang terkait dengan ports, jaringan, dan IP "
-"mengambang. Tabel berikut menunjukkan atribut yang tersedia untuk masing-"
-"masing sumber daya ini:"
-
-msgid ""
-"Users create project networks for connectivity within projects. By default, "
-"they are fully isolated and are not shared with other projects. OpenStack "
-"Networking supports the following types of network isolation and overlay "
-"technologies."
-msgstr ""
-"Pengguna membuat jaringan proyek untuk konektivitas dalam proyek. Secara "
-"default, jeringan proyek sepenuhnya terisolasi dan tidak dibagi dengan "
-"proyek-proyek lainnya. OpenStack Networking mendukung jenis jaringan isolasi "
-"dan overlay teknologi."
-
-msgid "Using DPDK in OVS requires the following minimum software versions:"
-msgstr ""
-"Penggunaan DPDK di OVS membutuhkan perangkat lunak versi minimum berikut:"
-
-msgid "Using SLAAC for addressing"
-msgstr "Penggunaan SLAAC untuk pengalamatan"
-
-msgid "Using SR-IOV interfaces"
-msgstr "Penggunaan SR-IOV interface"
-
-msgid ""
-"Using subnet pools constrains what addresses can be used by requiring that "
-"every subnet be within the defined pool. It also prevents address reuse or "
-"overlap by two subnets from the same pool."
-msgstr ""
-"Penggunaan kolam subnet membatasi alamat dimana dapat digunakan dengan "
-"mewajibkan bahwa setiap subnet berada dalam kolam yang ditetapkan. Hal ini "
-"juga mencegah penggunaan ulang alamat atau tumpang tindih dengan dua subnet "
-"dari kolam yang sama."
-
-msgid ""
-"Using this script can still trigger the original race condition. Only run "
-"this script if you have evacuated all instances off a compute node and you "
-"want to clean up the bridges. In addition to evacuating all instances, you "
-"should fence off the compute node where you are going to run this script so "
-"new instances do not get scheduled on it."
-msgstr ""
-"Penggunaan skrip ini masih dapat memicu kondisi race condition (kondisi "
-"perebutan). Hanya menjalankan skrip ini jika Anda telah mengevakuasi semua "
-"instance dari node komputasi dan Anda ingin membersihkan jembatan. Selain "
-"mengevakuasi semua instance, Anda harus memagari node komputasi ketika Anda "
-"akan menjalankan skrip ini sehingga instance baru tidak bisa dijadwalkan di "
-"node komputasi."
-
-msgid "Using trunks and subports inside an instance"
-msgstr "Penggunaan trunk dan subport dalam sebuah instance"
-
-msgid "Using vhost-user interfaces"
-msgstr "Penggunaan antarmuka vhost-user"
-
-msgid "VF"
-msgstr "VF"
-
-msgid "VLAN"
-msgstr "VLAN"
-
-msgid "VLAN ID 101 (tagged)"
-msgstr "VLAN ID 101 (tagged)"
-
-msgid "VLAN ID 102 (tagged)"
-msgstr "VLAN ID 102 (tagged)"
-
-msgid ""
-"VLAN is a networking technology that enables a single switch to act as if it "
-"was multiple independent switches. Specifically, two hosts that are "
-"connected to the same switch but on different VLANs do not see each other's "
-"traffic. OpenStack is able to take advantage of VLANs to isolate the traffic "
-"of different projects, even if the projects happen to have instances running "
-"on the same compute host. Each VLAN has an associated numerical ID, between "
-"1 and 4095. We say \"VLAN 15\" to refer to the VLAN with a numerical ID of "
-"15."
-msgstr ""
-"VLAN adalah teknologi jaringan yang mengaktifkan sebuah switch tunggal untuk "
-"bertindak seolah-olah itu beberapa switch independen. Secara khusus, dua "
-"host yang terhubung ke switch yang sama tetapi pada VLAN yang berbeda tidak "
-"melihat lalu lintas masing-masing. OpenStack mampu memanfaatkan VLAN untuk "
-"mengisolasi lalu lintas dari proyek yang berbeda, bahkan jika proyek "
-"kebetulan punya instance yang sedang berjalan pada host komputasi yang sama. "
-"Setiap VLAN memiliki angka ID yang terkait, antara 1 sampai 4095. Kita "
-"mengatakan \"VLAN 15\" untuk merujuk pada VLAN dengan ID numerik dari 15."
-
-msgid "VLANs"
-msgstr "VLANs"
-
-msgid "VPNaaS"
-msgstr "VPNaaS"
-
-msgid ""
-"VPNaaS supports IPv6, but support in Kilo and prior releases will have some "
-"bugs that may limit how it can be used. More thorough and complete testing "
-"and bug fixing is being done as part of the Liberty release. IPv6-based VPN-"
-"as-a-Service is configured similar to the IPv4 configuration. Either or both "
-"the ``peer_address`` and the ``peer_cidr`` can specified as an IPv6 address. "
-"The choice of addressing modes and router modes described above should not "
-"impact support."
-msgstr ""
-"VPNaaS mendukung IPv6, namun dukungan di Kilo dan rilis sebelumnya akan "
-"memiliki beberapa bug yang mungkin membatasi bagaimana hal itu dapat "
-"digunakan. Lebih menyeluruh dan lengkap pengujian dan perbaikan bug telah "
-"dilakukan sebagai bagian dari rilis Liberty. IPv6 yang berbasis VPN-as-a-"
-"Service dikonfigurasi mirip dengan konfigurasi IPv4. Salah satu atau kedua "
-"``peer_address`` dan `` peer_cidr`` dapat ditetapkan sebagai alamat IPv6. "
-"Pilihan mode pengalamatan dan mode router dijelaskan di atas seharusnya "
-"tidak berdampak dukungan."
-
-msgid "VXLAN"
-msgstr "VXLAN"
-
-msgid "VXLAN ID (VNI) 101"
-msgstr "VXLAN ID (VNI) 101"
-
-msgid "VXLAN ID (VNI) 102"
-msgstr "VXLAN ID (VNI) 102"
-
-msgid ""
-"VXLAN and GRE are encapsulation protocols that create overlay networks to "
-"activate and control communication between compute instances. A Networking "
-"router is required to allow traffic to flow outside of the GRE or VXLAN "
-"project network. A router is also required to connect directly-connected "
-"project networks with external networks, including the Internet. The router "
-"provides the ability to connect to instances directly from an external "
-"network using floating IP addresses."
-msgstr ""
-"VXLAN dan GRE adalah protokol enkapsulasi yang membuat jaringan overlay "
-"untuk mengaktifkan dan mengendalikan komunikasi antara instance komputasi. "
-"Sebuah router jaringan diperlukan untuk mengizinkan lalu lintas mengalir di "
-"luar jaringan proyek GRE atau VXLAN. Sebuah router juga diperlukan untuk "
-"menghubungkan jaringan proyek langsung terhubung dengan jaringan eksternal, "
-"termasuk Internet. Router memberikan kemampuan untuk terhubung ke instance "
-"langsung dari jaringan eksternal menggunakan alamat IP mengambang."
-
-msgid ""
-"VXLAN multicast group configuration is not applicable for the Open vSwitch "
-"agent."
-msgstr ""
-"VXLAN konfigurasi kelompok multicast tidak berlaku untuk agen Open vSwitch."
-
-msgid "Validating the requirements for auto-allocation"
-msgstr "Memvalidasi persyaratan untuk auto-alokasi"
-
-msgid ""
-"Various virtual networking resources support tags for use by external "
-"systems or any other clients of the Networking service API."
-msgstr ""
-"Berbagai sumber daya jaringan virtual mendukungan tag untuk digunakan oleh "
-"sistem eksternal atau klien lainnya dari Networking service API."
-
-msgid "Verify addition of the BGP peer to the BGP speaker."
-msgstr "Lakukan verifikasi penambahan peer BGP ke BGP speaker."
-
-msgid "Verify association of the provider network with the BGP speaker."
-msgstr "Lakukan verifikasi asosiasi jaringan provider dengan BGP speaker."
-
-msgid ""
-"Verify creation of the internal high-availability network that handles VRRP "
-"*heartbeat* traffic."
-msgstr ""
-"Lakukan verifikasi penciptaan jaringan high-availability internal yang "
-"menangani VRRP lalu lintas *heartbeat*."
-
-msgid "Verify distributed routing on the router."
-msgstr "Lakukan verifikasi routing terdistribusikan pada router."
-
-msgid "Verify failover operation"
-msgstr "Verifikasi operasi pemulihan kegagalan"
-
-msgid "Verify network operation"
-msgstr "Lakukan verifikasi operasi jaringan"
-
-msgid "Verify presence and operation of each BGP dynamic routing agent."
-msgstr ""
-"Lakukan verifikasi keberadaan dan operasi masing-masing agen routing dinamis "
-"BGP."
-
-msgid "Verify presence and operation of the agents."
-msgstr "Memverifikasi keberadaan dan operasi dari agen."
-
-msgid "Verify presence and operation of the agents:"
-msgstr "Memverifikasi keberadaan dan operasi agen:"
-
-msgid "Verify scheduling of the BGP speaker to the agent."
-msgstr "Lakukan verifikasi penjadwalan BGP speaker untuk agen."
-
-msgid "Verify service operation"
-msgstr "Melakukan verifikasi operasi layanan"
-
-msgid "Verify that each IPv4 subnet associates with at least one DHCP agent."
-msgstr ""
-"Pastikan bahwa setiap subnet IPv4 berasosiasi dengan setidaknya satu agen "
-"DHCP."
-
-msgid ""
-"Verify that host aggregates were created for each segment in the Compute "
-"service (for the sake of brevity, only one of the segments is shown in this "
-"example)."
-msgstr ""
-"Pastikan bahwa kumpulan host diciptakan untuk setiap segmen dalam pelayanan "
-"Compute (untuk singkatnya, hanya salah satu segmen yang ditunjukkan dalam "
-"contoh ini)."
-
-msgid ""
-"Verify that inventories were created for each segment IPv4 subnet in the "
-"Compute service placement API (for the sake of brevity, only one of the "
-"segments is shown in this example)."
-msgstr ""
-"Lakukan verifikasi dimana persediaan yang diciptakan untuk setiap subnet "
-"IPv4 segment dalam placement API di layanan Compute (untuk singkatnya, "
-"hanya salah satu segmen yang ditunjukkan dalam contoh ini)."
-
-msgid "Verify that the VFs have been created and are in ``up`` state:"
-msgstr "Pastikan bahwa VF telah dibuat dan dalam keadaan ``up``:"
-
-msgid ""
-"Verify that the load balancer is responding to pings before moving further:"
-msgstr ""
-"Pastikan bahwa penyeimbang beban merespon ping sebelum bergerak lebih lanjut:"
-
-msgid ""
-"Verify that the network contains the ``segment1`` and ``segment2`` segments."
-msgstr "Pastikan jaringan berisi segmen `` segment1`` dan ``segment2``."
-
-msgid ""
-"Verify the prefixes and next-hop IP addresses that the BGP speaker "
-"advertises."
-msgstr ""
-"Lakukan verifikasi prefiks dan alamat IP next-hop dimana speaker BGP "
-"menyiarkan update dan perubahan."
-
-msgid "Version"
-msgstr "Version (versi)"
-
-msgid ""
-"Virtual Function. The virtual PCIe device created from a physical Ethernet "
-"controller."
-msgstr ""
-"Virtual Function. Perangkat PCIe virtual yang terbuat dari physical Ethernet "
-"controller."
-
-msgid "Virtual extensible local area network (VXLAN)"
-msgstr "Virtual extensible local area network (VXLAN)"
-
-msgid "Virtual routing and forwarding (VRF)"
-msgstr "Virtual routing and forwarding (VRF)"
-
-msgid ""
-"Virtual routing and forwarding is an IP technology that allows multiple "
-"instances of a routing table to coexist on the same router at the same time. "
-"It is another name for the network namespace functionality described above."
-msgstr ""
-"Virtual routing and forwarding adalah sebuah teknologi IP yang mengizinkan "
-"beberapa instance dari tabel routing untuk hidup berdampingan pada router "
-"yang sama pada waktu yang sama. VRF ini adalah nama lain untuk fungsi "
-"jaringan namespace yang dijelaskan di atas."
-
-msgid "We anticipate this to expand to VM ports in the Ocata cycle."
-msgstr ""
-"Kami mengantisipasi ini untuk memperluas ke port VM dalam siklus Ocata."
-
-msgid ""
-"We recommend using Open vSwitch version 2.4 or higher. Optional features may "
-"require a higher minimum version."
-msgstr ""
-"Sebaiknya menggunakan Open vSwitch versi 2.4 atau lebih tinggi. Fitur "
-"opsional mungkin memerlukan versi minimum yang lebih tinggi."
-
-msgid ""
-"We recommend using VLAN provider networks for segregation. This way you can "
-"combine instances without SR-IOV ports and instances with SR-IOV ports on a "
-"single network."
-msgstr ""
-"Kami merekomendasikan penggunaan jaringan provider VLAN untuk pemisahan. "
-"Dengan cara ini Anda dapat menggabungkan instance tanpa port SR-IOV dan "
-"instance dengan port SR-IOV pada jaringan tunggal."
-
-msgid ""
-"When ``admin_state`` is set to ``DOWN``, the user is blocked from performing "
-"operations on the trunk. ``admin_state`` is set by the user and should not "
-"be used to monitor the health of the trunk."
-msgstr ""
-"Ketika ``admin_state`` diatur ke ``DOWN``, pengguna akan diblokir untuk "
-"melakukan operasi di trunk. ``admin_state`` diatur oleh pengguna dan tidak "
-"boleh digunakan untuk memantau kesehatan trunk."
-
-msgid ""
-"When a NIC receives an Ethernet frame, by default the NIC checks to see if "
-"the destination MAC address matches the address of the NIC (or the broadcast "
-"address), and the Ethernet frame is discarded if the MAC address does not "
-"match. For a compute host, this behavior is undesirable because the frame "
-"may be intended for one of the instances. NICs can be configured for "
-"*promiscuous mode*, where they pass all Ethernet frames to the operating "
-"system, even if the MAC address does not match. Compute hosts should always "
-"have the appropriate NICs configured for promiscuous mode."
-msgstr ""
-"Ketika NIC menerima frame Ethernet, secara default NIC memeriksa untuk "
-"melihat apakah tujuan alamat MAC sesuai dengan alamat NIC (atau alamat "
-"broadcast), dan frame Ethernet dibuang jika alamat MAC tidak cocok. Untuk "
-"host komputasi, perilaku ini tidak diinginkan karena frame mungkin "
-"dimaksudkan untuk salah satu instance. NIC dapat dikonfigurasi untuk "
-"*promiscuous mode*, dimana mereka melewatkan semua frame Ethernet ke sistem "
-"operasi, bahkan jika alamat MAC tidak cocok. Host komputasi harus selalu "
-"memiliki NIC yang tepat dikonfigurasi untuk promiscuous mode."
-
-msgid ""
-"When configuring instances to use a subport, ensure that the interface on "
-"the instance is set to use the MAC address assigned to the port by the "
-"Networking service. Instances are not made aware of changes made to the "
-"trunk after they are active. For example, when a subport with a "
-"``segmentation-type`` of ``vlan`` is added to a trunk, any operations "
-"specific to the instance operating system that allow the instance to send "
-"and receive traffic on the new VLAN must be handled outside of the "
-"Networking service."
-msgstr ""
-"Ketika mengkonfigurasi instance untuk menggunakan subport, pastikan bahwa "
-"antarmuka pada instance diatur untuk menggunakan alamat MAC yang ditugaskan "
-"(assigned) ke port oleh layanan Networking. Instance tidak menyadarai "
-"perubahan yang dibuat trunk setelah mereka aktif. Misalnya, ketika subport "
-"dengan ``segmentasi-type`` dari ``vlan`` ditambahkan ke trunk, setiap "
-"operasi akan menspesifikasi untuk sistem operasi instance yang mengizinkan "
-"instance mengirim dan menerima lalu lintas pada VLAN baru harus ditangani di "
-"luar layanan Networking."
-
-msgid ""
-"When creating subports, the MAC address of the trunk parent port can be set "
-"on the subport. This will allow VLAN subinterfaces inside an instance "
-"launched on a trunk to be configured without explicitly setting a MAC "
-"address. Although unique MAC addresses can be used for subports, this can "
-"present issues with ARP spoof protections and the native OVS firewall "
-"driver. If the native OVS firewall driver is to be used, we recommend that "
-"the MAC address of the parent port be re-used on all subports."
-msgstr ""
-"Ketika pembuatan subports, alamat MAC dari port tua induk dapat diatur pada "
-"subport tersebut. Hal ini akan mengizinkan subinterfaces VLAN dalam sebuah "
-"instance yang diluncurkan pada trunk yang akan dikonfigurasi tanpa secara "
-"eksplisit menetapkan alamat MAC. Meskipun alamat MAC yang unik dapat "
-"digunakan untuk subports, ini dapat menimbulkan masalah dengan perlindungan "
-"spoof ARP dan driver firewall OVS asli (native). Jika driver firewall OVS "
-"asli akan digunakan, kami menyarankan bahwa alamat MAC dari port induk akan "
-"digunakan kembali pada semua subports."
-
-msgid ""
-"When libvirt boots a virtual machine, it places the machine's VIF in the "
-"bridge ``virbr0`` unless explicitly told not to."
-msgstr ""
-"Ketika libvirt boot mesin virtual, ia menempatkan VIF mesin di jembatan "
-"``virbr0`` kecuali secara eksplisit mengatakan tidak."
-
-msgid ""
-"When the ``--or-show`` option is used the command returns the topology "
-"information if it already exists."
-msgstr ""
-"Bila opsi ``--or-show`` digunakan, perintah akan mengembalikan informasi "
-"topologi jika sudah ada."
-
-msgid ""
-"When the ``router_distributed = True`` flag is configured, routers created "
-"by all users are distributed. Without it, only privileged users can create "
-"distributed routers by using ``--distributed True``."
-msgstr ""
-"Ketika flag ``router_distributed = True`` dikonfigurasi, router yang dibuat "
-"oleh semua pengguna didistribusikan. Tanpa itu, hanya pengguna istimewa "
-"dapat menciptakan didistribusikan router dengan menggunakan ``--distributed "
-"True``."
-
-msgid ""
-"When the router receives a packet with the matching IP address and port, it "
-"translates these back to the private IP address and port, and forwards the "
-"packet along."
-msgstr ""
-"Ketika router menerima paket dengan alamat IP dan port yang cocok , router "
-"itu menterjemahkan kembali paket ke alamat IP private dan port, dan "
-"meneruskan paket secara bersama."
-
-msgid ""
-"When the source or destination IP address are not of the same IP version "
-"(for example, IPv6), the command returns an error."
-msgstr ""
-"Ketika sumber atau alamat IP tujuan adalah bukan dari versi IP yang sama "
-"(misalnya, IPv6), perintah akan mengembalikan suatu kesalahan."
-
-msgid ""
-"When this functionality is enabled, it is leveraged by the Compute service "
-"when creating instances. When allocating ports for an instance during boot, "
-"the Compute service populates the ``dns_name`` attributes of these ports "
-"with the ``hostname`` attribute of the instance, which is a DNS sanitized "
-"version of its display name. As a consequence, at the end of the boot "
-"process, the allocated ports will be known in the dnsmasq associated to "
-"their networks by their instance ``hostname``."
-msgstr ""
-"Ketika fungsi ini diaktifkan, terjadi pengungkitkan oleh layanan Compute "
-"saat membuat instance. Ketika mengalokasikan port untuk instance saat boot, "
-"layanan Compute akan mengisi atribut port ``dns_name`` ini dengan atribut "
-"instance ``hostname``, yang merupakan versi DNS yang dibersihkan dari nama "
-"tampilan nya. Akibatnya, pada akhir proses booting, port dialokasikan akan "
-"diketahui dalam dnsmasq terkait ke jaringan mereka dengan instance `` "
-"hostname``mereka."
-
-msgid ""
-"When troubleshooting an instance that is not reachable over the network, it "
-"can be helpful to examine this log to verify that all four steps of the DHCP "
-"protocol were carried out for the instance in question."
-msgstr ""
-"Ketika ada masalah sebuah instance yang tidak dapat dijangkau melalui "
-"jaringan, ada bantuan untuk memeriksa log ini untuk memverifikasi bahwa "
-"semua empat langkah dari protokol DHCP dilakukan untuk instance yang "
-"dipermasalahkan."
-
-msgid ""
-"When using DevStack, it is important to start your server after the ``stack."
-"sh`` script has finished to ensure that the required network interfaces have "
-"been created."
-msgstr ""
-"Bila menggunakan DevStack, Hal ini penting untuk memulai server Anda setelah "
-"skrip ``stack.sh`` telah selesai untuk memastikan bahwa antarmuka jaringan "
-"yang diperlukan telah dibuat."
-
-msgid ""
-"When using Quality of Service (QoS), ``max_burst_kbps`` (burst over "
-"``max_kbps``) is not supported. In addition, ``max_kbps`` is rounded to Mbps."
-msgstr ""
-"Ketika penggunaan Quality of Service (QoS), ``max_burst_kbps`` (meledak "
-"lebih ``max_kbps``) tidak didukung. Sebagai tambahan, ``max_kbps`` "
-"dibulatkan ke Mbps."
-
-msgid ""
-"When using SLAAC, the currently supported combinations for ``ipv6_ra_mode`` "
-"and ``ipv6_address_mode`` are as follows."
-msgstr ""
-"Bila menggunakan SLAAC, kombinasi didukung saat ini untuk ``ipv6_ra_mode`` "
-"dan ``ipv6_address_mode`` adalah sebagai berikut."
-
-msgid ""
-"When using a ``segmentation-type`` of ``vlan``, the OVS and Linux bridge "
-"drivers present the network of the parent port as the untagged VLAN and all "
-"subports as tagged VLANs."
-msgstr ""
-"Bila menggunakan ``segmentasi-type`` dari ` vlan``, driver OVS dan Linux "
-"bridge menyajikan jaringan port induk sebagai VLAN untagged dan semua "
-"subports sebagai VLAN taged."
-
-msgid ""
-"When using the Open vSwitch or Linux bridge drivers, new MTU calculations "
-"will be propogated automatically after restarting the ``l3-agent`` service."
-msgstr ""
-"Bila menggunakan Open vSwitch atau driver jembatan Linux, perhitungan MTU "
-"baru akan diperbanyak secara otomatis setelah restart layanan ``l3-agent``."
-
-msgid ""
-"When using the neutron L3 agent in a configuration where it is auto-"
-"configuring an IPv6 address via SLAAC, and the agent is learning its default "
-"IPv6 route from the ICMPv6 Router Advertisement, it may be necessary to set "
-"the ``net.ipv6.conf..accept_ra`` sysctl to the value "
-"``2`` in order for routing to function correctly. For a more detailed "
-"description, please see the `bug `__."
-msgstr ""
-"Bila menggunakan neutron L3 agent dalam konfigurasi dimana itu adalah auto-"
-"configuring alamat IPv6 melalui SLAAC, dan agen sedang mempelajari rute IPv6 "
-"defaultnya dari ICMPv6 Router Advertisement, itu mungkin perlu untuk "
-"mengatur sysctl ``net.ipv6.conf . .accept_ra`` dengan "
-"nilai ``2`` agar routing berfungsi dengan benar. Untuk penjelasan lebih "
-"rinci, silakan lihat `bug ` "
-"__."
-
-msgid ""
-"When using the reference implementation of the OpenStack Networking prefix "
-"delegation driver, Dibbler must also be installed on your OpenStack "
-"Networking node(s) to serve as a DHCPv6 client. Version 1.0.1 or higher is "
-"required."
-msgstr ""
-"Bila menggunakan implementasi referensi dari driver prefix delegation "
-"OpenStack Networking, Dibbler juga harus diinstal pada node OpenStack "
-"Networking Anda untuk melayani sebagai klien DHCPv6. Versi 1.0.1 atau lebih "
-"tinggi diperlukan."
-
-msgid ""
-"When you create a network with one port, the network will be scheduled to an "
-"active DHCP agent. If many active DHCP agents are running, select one "
-"randomly. You can design more sophisticated scheduling algorithms in the "
-"same way as nova-schedule later on."
-msgstr ""
-"Bila Anda membuat jaringan dengan satu port, jaringan akan dijadwalkan untuk "
-"agen DHCP aktif. Jika banyak agen DHCP aktif berjalan, pilih salah satu "
-"secara acak. Anda dapat merancang algoritma penjadwalan yang lebih canggih "
-"dalam cara yang sama seperti nova-schedule nanti."
-
-msgid ""
-"When you run :command:`openstack network list` or :command:`openstack "
-"network show`, the ``shared`` flag is calculated by the server based on the "
-"calling project and the RBAC entries for each network. For QoS objects use :"
-"command:`openstack network qos policy list` or :command:`openstack network "
-"qos policy show` respectively. If there is a wildcard entry, the ``shared`` "
-"flag is always set to ``True``. If there are only entries that share with "
-"specific projects, only the projects the object is shared to will see the "
-"flag as ``True`` and the rest will see the flag as ``False``."
-msgstr ""
-"Ketika Anda menjalankan :command:`openstack network list` atau :command:"
-"`openstack network show`, flag ``shared`` dihitung oleh server berdasarkan "
-"proyek panggilan dan entri RBAC untuk setiap jaringan. Untuk objek QoS "
-"gunakan :command:`openstack network qos policy list` atau :command:"
-"`openstack network qos policy show` masing-masing. Jika ada entri wildcard, "
-"flag ``shared`` selalu diatur ke ``True``. Jika hanya ada entri yang berbagi "
-"dengan proyek tertentu, hanya proyek yang objeknya berbagi akan melihat flag "
-"sebagai `` True`` dan sisanya akan melihat bendera sebagai ``False``."
-
-msgid ""
-"Where ``ha_vrrp_health_check_interval`` indicates how often in seconds the "
-"health check should run. The default value is ``0``, which indicates that "
-"the check should not run at all."
-msgstr ""
-"Dimana ``ha_vrrp_health_check_interval`` menunjukkan seberapa sering dalam "
-"detik dimana pengecekan kesehatan harus dijalankan. Nilai default adalah "
-"``0``, yang menunjukkan bahwa pengecekan tidak berjalan sama sekali."
-
-msgid ""
-"While NICs use MAC addresses to address network hosts, TCP/IP applications "
-"use IP addresses. The Address Resolution Protocol (ARP) bridges the gap "
-"between Ethernet and IP by translating IP addresses into MAC addresses."
-msgstr ""
-"Sementara NIC menggunakan alamat MAC untuk mengatasi host jaringan, aplikasi "
-"TCP/IP menggunakan alamat IP. Address Resolution Protocol (ARP) menjembatani "
-"kesenjangan antara Ethernet dan IP dengan menerjemahkan alamat IP ke alamat "
-"MAC."
-
-msgid "Whitelist PCI devices in nova-compute (Compute)"
-msgstr "Daftarlah (whitelist) perangkat PCI di nova-compute (Compute)"
-
-msgid "Whitelist PCI devices nova-compute (Compute)"
-msgstr "Daftarlah (whitelist) perangkat PCI nova-compute (Compute)"
-
-msgid "Why you need them"
-msgstr "Mengapa Anda membutuhkan mereka"
-
-msgid ""
-"With IPv4, the default_quota can be set to the number of absolute addresses "
-"any given project is allowed to consume from the pool. For example, with a "
-"quota of 128, I might get 203.0.113.128/26, 203.0.113.224/28, and still have "
-"room to allocate 48 more addresses in the future."
-msgstr ""
-"Dengan IPv4, default_quota yang dapat diatur untuk jumlah alamat absolut "
-"setiap proyek yang diberikan diperbolehkan untuk mengkonsumsi dari kolam. "
-"Misalnya, dengan kuota 128, saya mungkin akan mendapatkan 203.0.113.128/26, "
-"203.0.113.224/28, dan masih memiliki ruang untuk mengalokasikan 48 alamat "
-"lebih di masa depan."
-
-msgid ""
-"With IPv6 it is a little different. It is not practical to count individual "
-"addresses. To avoid ridiculously large numbers, the quota is expressed in "
-"the number of /64 subnets which can be allocated. For example, with a "
-"default_quota of 3, I might get 2001:db8:c18e:c05a::/64, 2001:"
-"db8:221c:8ef3::/64, and still have room to allocate one more prefix in the "
-"future."
-msgstr ""
-"Dengan IPv6 itu adalah sedikit berbeda. Hal ini tidak praktis untuk "
-"menghitung alamat individu. Untuk menghindari nomor besar yang tidak masuk "
-"akal, kuota dinyatakan dalam jumlah /64 subnet yang dapat dialokasikan. "
-"Misalnya, dengan default_quota dari 3, saya bisa mendapatkan 2001:db8:c18e:"
-"c05a::/64, 2001:db8:221c:8ef3::/64, dan masih memiliki ruang untuk "
-"mengalokasikan satu awalan (prefix) lebih di masa depan."
-
-msgid ""
-"With subnet pools, all addresses in use within the address scope are unique "
-"from the point of view of the address scope owner. Therefore, add more than "
-"one subnet pool to an address scope if the pools have different owners, "
-"allowing for delegation of parts of the address scope. Delegation prevents "
-"address overlap across the whole scope. Otherwise, you receive an error if "
-"two pools have the same address ranges."
-msgstr ""
-"Dengan kolam subnet, semua alamat digunakan dalam lingkup alamat menjadi "
-"unik dari sudut pandang pemilik lingkup alamat. Oleh karena itu, tambahkan "
-"lebih dari satu subnet kolam untuk lingkup alamat jika kolam memiliki "
-"pemilik yang berbeda, yang memungkinkan untuk pendelegasian bagian dari "
-"lingkup alamat. Delegasi mencegah alamat tumpang tindih di seluruh lingkup. "
-"Jika tidak, Anda menerima pesan kesalahan jika dua kolam memiliki rentang "
-"alamat yang sama."
-
-msgid ""
-"With subnets, the resource is the IP address space. Some subnets take more "
-"of it than others. For example, 203.0.113.0/24 uses 256 addresses in one "
-"subnet but 198.51.100.224/28 uses only 16. If address space is limited, the "
-"quota system can encourage efficient use of the space."
-msgstr ""
-"Dengan subnet, sumber daya adalah ruang alamat IP. Beberapa subnet mengambil "
-"lebih banyak daripada lainnya. Misalnya, 203.0.113.0/24 menggunakan 256 "
-"alamat dalam satu subnet tetapi 198.51.100.224/28 hanya menggunakan 16. Jika "
-"ruang alamat terbatas, sistem kuota dapat mendorong efisiensi penggunaan "
-"ruang."
-
-msgid ""
-"With the load balancer online, you can add a listener for plaintext HTTP "
-"traffic on port 80:"
-msgstr ""
-"Dengan load balancer online, Anda dapat menambahkan pendengar untuk lalu "
-"lintas HTTP plaintext pada port 80:"
-
-msgid ""
-"Within a network, use a unique physical network name for each segment which "
-"enables reuse of the same segmentation details between subnets. For example, "
-"using the same VLAN ID across all segments of a particular provider network. "
-"Similar to conventional provider networks, the operator must provision the "
-"layer-2 physical network infrastructure accordingly."
-msgstr ""
-"Dalam jaringan, gunakan nama jaringan fisik yang unik untuk setiap segmen "
-"yang mengaktifkan penggunaan kembali rincian segmentasi yang sama antara "
-"subnet. Misalnya, penggunaan VLAN ID yang sama di semua segmen dari jaringan "
-"operator tertentu. Serupa dengan jaringan penyedia konvensional, operator "
-"harus menyediakan infrastruktur jaringan fisik lapisan-2 yang sesuai."
-
-msgid "Workflow"
-msgstr "Workflow (alur kerja)"
-
-msgid "YES"
-msgstr "YES"
-
-msgid "Yes"
-msgstr "Yes"
-
-msgid ""
-"You can add a health monitor so that unresponsive servers are removed from "
-"the pool:"
-msgstr ""
-"Anda dapat menambahkan monitor kesehatan sehingga server yang tidak "
-"responsif dapat dikeluarkan dari kolam:"
-
-msgid ""
-"You can add another listener on port 443 for HTTPS traffic. LBaaS v2 offers "
-"SSL/TLS termination at the load balancer, but this example takes a simpler "
-"approach and allows encrypted connections to terminate at each member server."
-msgstr ""
-"Anda dapat menambahkan pendengar lain pada port 443 untuk lalu lintas HTTPS. "
-"LBaaS v2 menawarkan terminasi SSL/TLS pada penyeimbang beban, tapi contoh "
-"ini mengambil pendekatan yang lebih sederhana dan memungkinkan koneksi "
-"terenkripsi untuk mengakhiri di setiap server anggota."
-
-msgid ""
-"You can add this mechanism driver to an existing environment using either "
-"the Linux bridge or OVS mechanism drivers with only provider networks or "
-"provider and self-service networks. You can change the configuration of "
-"existing compute nodes or add compute nodes with the Macvtap mechanism "
-"driver. The example configuration assumes addition of compute nodes with the "
-"Macvtap mechanism driver to the :ref:`deploy-lb-selfservice` or :ref:`deploy-"
-"ovs-selfservice` deployment examples."
-msgstr ""
-"Anda dapat menambahkan driver mekanisme ini untuk lingkungan yang ada "
-"menggunakan jembatan Linux ataupun driver mekanisme OVS dengan hanya "
-"jaringan provider atau jaringan provider dan jaringan self-service. Anda "
-"dapat mengubah konfigurasi node komputasi yang ada atau menambahkan node "
-"komputasi dengan driver mekanisme Macvtap. Contoh konfigurasi mengasumsikan "
-"penambahan node komputasi dengan driver mekanisme Macvtap ke contoh "
-"pengerahan :ref:`deploy-lb-selfservice` atau :ref:`deploy-ovs-selfservice` ."
-
-msgid ""
-"You can adjust quotas using the :command:`neutron quota-update` command:"
-msgstr ""
-"Anda dapat mengatur kuota menggunakan perintah :command:`neutron quota-"
-"update`"
-
-msgid "You can also add a health monitor for the HTTPS pool:"
-msgstr "Anda juga dapat menambahkan monitor kesehatan untuk HTTPS pool:"
-
-msgid ""
-"You can also identify floating IP agent gateways in your environment to "
-"assist with verifying operation of the BGP speaker."
-msgstr ""
-"Anda juga dapat mengidentifikasi agent gateway IP mengambang di lingkungan "
-"Anda untuk membantu memverifikasi operasi BGP speaker."
-
-msgid ""
-"You can control the default number of DHCP agents assigned to a network by "
-"setting the following configuration option in the file ``/etc/neutron/"
-"neutron.conf``."
-msgstr ""
-"Anda dapat mengontrol jumlah default agen DHCP yang ditugaskan ke jaringan "
-"dengan menetapkan pilihan konfigurasi berikut di file ``/etc/neutron/neutron."
-"conf``."
-
-msgid ""
-"You can create a firewall policy without any rules and add rules later, as "
-"follows:"
-msgstr ""
-"Anda dapat membuat kebijakan firewall tanpa aturan dan menambahkan aturan "
-"kemudian, sebagai berikut:"
-
-msgid ""
-"You can create, update, list, delete, and show DSCP markings with the "
-"neutron client:"
-msgstr ""
-"Anda dapat membuat, memperbarui, mendaftar, menghapus, dan menunjukkan DSCP "
-"marking dengan klien neutron:"
-
-msgid "You can determine the maximum number of VFs a PF can support:"
-msgstr "You can determine the maximum number of VFs a PF can support:"
-
-msgid ""
-"You can disable security groups including basic and anti-spoofing rules by "
-"setting the port attribute ``port_security_enabled`` to ``False``."
-msgstr ""
-"Anda dapat menonaktifkan kelompok keamanan termasuk aturan dasar dan anti-"
-"spoofing dengan menetapkan atribut port ``port_security_enabled`` ke "
-"``False``."
-
-msgid ""
-"You can keep the DHCP and metadata agents on each compute node or move them "
-"to the network node."
-msgstr ""
-"Anda dapat menjaga agen DHCP dan metadata pada setiap node komputasi atau "
-"memindahkan mereka ke node jaringan."
-
-msgid ""
-"You can keep the DHCP and metadata agents on each compute node or move them "
-"to the network nodes."
-msgstr ""
-"Anda dapat menjaga DHCP dan agen metadata pada setiap node komputasi atau "
-"memindahkannya ke node jaringan."
-
-msgid ""
-"You can modify or delete this policy with the same constraints as any other "
-"RBAC ``access_as_external`` policy."
-msgstr ""
-"Anda dapat mengubah atau menghapus kebijakan ini dengan kendala yang sama "
-"seperti kebijakan ``access_as_external`` RBAC lainnya."
-
-msgid ""
-"You can modify rules at runtime. Rule modifications will be propagated to "
-"any attached port."
-msgstr ""
-"Anda dapat memodifikasi aturan pada saat runtime. Modifikasi aturan akan "
-"disebarkan ke port yang terpasang."
-
-msgid ""
-"You can now ping ``instance2`` directly because ``instance2`` shares the "
-"same address scope as the external network:"
-msgstr ""
-"Anda sekarang dapat ping ``instance2`` langsung karena share ``instance2`` "
-"lingkup alamat sama dengan jaringan eksternal:"
-
-msgid ""
-"You can repeat the ``--flow-classifier`` option to specify multiple flow "
-"classifiers for a port chain. Each flow classifier identifies a flow."
-msgstr ""
-"Anda dapat mengulangi ``opsi --flow-classifier`` untuk menentukan beberapa "
-"pengklasifikasi aliran untuk rantai port. Setiap klassifier aliran "
-"mengidentifikasi aliran."
-
-msgid ""
-"You can repeat the ``--port-pair-group`` option to specify additional port "
-"pair groups in the port chain. A port chain must contain at least one port "
-"pair group."
-msgstr ""
-"Anda dapat mengulangi opsi ``--port-pair-group`` untuk menentukan tambahan "
-"kelompok pasangan port dalam rantai port. Sebuah rantai port harus "
-"mengandung setidaknya satu kelompok pasangan port."
-
-msgid ""
-"You can repeat the ``--port-pair`` option for multiple port pairs of "
-"functionally equivalent service functions."
-msgstr ""
-"Anda dapat mengulangi opsi ``--port-pair`` untuk beberapa pasang port fungsi "
-"layanan setara secara fungsional."
-
-msgid ""
-"You can request a specific subnet from the pool. You need to specify a "
-"subnet that falls within the pool's prefixes. If the subnet is not already "
-"allocated, the request succeeds. You can leave off the IP version because it "
-"is deduced from the subnet pool."
-msgstr ""
-"Anda dapat meminta subnet dari kolam. Anda perlu menentukan subnet yang "
-"termasuk dalam awalan (prefix) kolam ini. Jika subnet tersebut belum "
-"dialokasikan, permintaan berhasil. Anda dapat meninggalkan versi IP karena "
-"disimpulkan dari subnet kolam."
-
-msgid ""
-"You can see that only the DHCP agent for HostB is hosting the ``net2`` "
-"network."
-msgstr ""
-"Anda dapat melihat bahwa hanya agen DHCP untuk HostB menjadi host jaringan "
-"``net2``."
-
-msgid ""
-"You can trigger MTU recalculation for existing networks by changing the MTU "
-"configuration and restarting the ``neutron-server`` service. However, "
-"propagating MTU calculations to the data plane may require users to delete "
-"and recreate ports on the network."
-msgstr ""
-"Anda dapat mencetuskan (trigger) perhitungan kembali MTU untuk jaringan yang "
-"ada dengan mengubah konfigurasi MTU dan restart layanan ``neutron-server``. "
-"Namun, pengerahan perhitungan MTU terhadap data plane mungkin mengharuskan "
-"pengguna untuk menghapus dan menciptakan port pada jaringan."
-
-msgid ""
-"You can use ``curl`` to verify connectivity through the load balancers to "
-"your web servers:"
-msgstr ""
-"Anda dapat menggunakan ``curl`` untuk memverifikasi konektivitas melalui "
-"load balancers ke server web Anda:"
-
-msgid ""
-"You can use the :command:`neutron lbaas-loadbalancer-show` command from the "
-"beginning of this section to locate the ``vip_port_id``. The ``vip_port_id`` "
-"is the ID of the network port that is assigned to the load balancer. You can "
-"associate a free floating IP address to the load balancer using :command:"
-"`neutron floatingip-associate`:"
-msgstr ""
-"Anda dapat menggunakan perintah :command:`neutron lbaas-loadbalancer-show` "
-"dari awal bagian ini untuk menemukan ``vip_port_id``. ``vip_port_id`` adalah "
-"ID dari port jaringan yang ditugaskan untuk penyeimbang beban. Anda dapat "
-"mengaitkan alamat IP mengambang bebas untuk penyeimbang beban menggunakan :"
-"command:`neutron floatingip-associate`:"
-
-msgid ""
-"You can view the load balancer status and IP address with the :command:"
-"`neutron lbaas-loadbalancer-show` command:"
-msgstr ""
-"Anda dapat melihat status penyeimbang beban dan alamat IP dengan perintah :"
-"command:`neutron lbaas-loadbalancer-show`:"
-
-msgid ""
-"You can virtualize these nodes for demonstration, training, or proof-of-"
-"concept purposes. However, you must use physical hosts for evaluation of "
-"performance or scaling."
-msgstr ""
-"Anda dapat virtualisasi node ini untuk demonstrasi, pelatihan, atau tujuan "
-"proof-of-concept. Namun, Anda harus menggunakan host fisik untuk evaluasi "
-"kinerja atau scaling (pembesaran)."
-
-msgid ""
-"You cannot ping ``instance1`` directly because the address scopes do not "
-"match:"
-msgstr ""
-"Anda tidak bisa ping langsung ``instance1`` karena lingkup alamat tidak "
-"cocok:"
-
-msgid ""
-"You may change them to work with your particular network infrastructure."
-msgstr ""
-"Anda dapat mengubah mereka untuk bekerja dengan infrastruktur jaringan "
-"tertentu."
-
-msgid ""
-"You must configure this option for all eligible DHCP agents and restart them "
-"to activate the values."
-msgstr ""
-"Anda harus mengkonfigurasi opsi ini untuk semua agen DHCP yang memenuhi "
-"syarat dan restart mereka untuk mengaktifkan nilai."
-
-msgid ""
-"\\* A firewall group can be applied to all ports on a given router in order "
-"to effect this."
-msgstr ""
-"\\* Sekelompok firewall dapat diterapkan ke semua port pada router yang "
-"diberikan supaya efek ini."
-
-msgid "\\*\\* This feature is planned for Ocata."
-msgstr "\\*\\* Fitur ini direncanakan untuk Ocata."
-
-msgid ""
-"`Basic Load-Balancer-as-a-Service operations `__"
-msgstr ""
-"`Basic Load-Balancer-as-a-Service operations `__"
-
-msgid ""
-"`Load-Balancer-as-a-Service (LBaaS) overview `__"
-msgstr ""
-"`Load-Balancer-as-a-Service (LBaaS) overview `__"
-
-msgid "``$1`` The operation being performed."
-msgstr "``$1``, Operasi yang dilakukan."
-
-msgid "``$IFACE`` The network interface upon which the request was received."
-msgstr "``$IFACE``, Antarmuka jaringan dimana permintaan itu diterima."
-
-msgid "``$PREFIX1`` The prefix being added/deleted by the Dibbler server."
-msgstr "``$PREFIX1``, Awalan yang ditambahkan/dihapus oleh server Dibbler."
-
-msgid "``$REMOTE_ADDR`` The IP address of the requesting Dibbler client."
-msgstr "``$REMOTE_ADDR`` Alamat IP dari klien Dibbler yang meminta."
-
-msgid "``ACTIVE``"
-msgstr "``ACTIVE``"
-
-msgid "``BUILD``"
-msgstr "``BUILD``"
-
-msgid "``DEGRADED``"
-msgstr "``DEGRADED``"
-
-msgid "``DOWN``"
-msgstr "``DOWN``"
-
-msgid "``ERROR``"
-msgstr "``ERROR``"
-
-msgid "``None``"
-msgstr "``None``"
-
-msgid ""
-"``admin_auth_url``: the Identity service admin authorization endpoint url. "
-"This endpoint will be used by the Networking service to authenticate as an "
-"admin user to create and update reverse lookup (PTR) zones."
-msgstr ""
-"``admin_auth_url``: admin authorization endpoint url layanan Identity. "
-"Endpoint ini akan digunakan oleh layanan Networking untuk mengotentikasi "
-"sebagai user admin membuat dan memperbarui zona reverse lookup (PTR)."
-
-msgid ""
-"``admin_password``: the password of the admin user to be used by Networking "
-"service to create and update reverse lookup (PTR) zones."
-msgstr ""
-"``admin_password``: password dari user admin untuk digunakan oleh layanan "
-"Networking membuat dan memperbarui zona reverse lookup (PTR)."
-
-msgid ""
-"``admin_tenant_name``: the project of the admin user to be used by the "
-"Networking service to create and update reverse lookup (PTR) zones."
-msgstr ""
-"``admin_tenant_name``: proyek user admin untuk digunakan oleh layanan "
-"Networking membuat dan memperbarui zona reverse lookup (PTR)."
-
-msgid ""
-"``admin_username``: the admin user to be used by the Networking service to "
-"create and update reverse lookup (PTR) zones."
-msgstr ""
-"``admin_username``: user admin yang digunakan oleh layanan Networking "
-"membuat dan memperbarui reverse zona lookup (PTR)."
-
-msgid ""
-"``allow_reverse_dns_lookup``: a boolean value specifying whether to enable "
-"or not the creation of reverse lookup (PTR) records."
-msgstr ""
-"``allow_reverse_dns_lookup``: nilai boolean yang menspesifikasi apakah "
-"mengaktifkan atau tidak mengaktifkan pembuatan reverse lookup (PTR) record."
-
-msgid "``auto-allocated-topology``"
-msgstr "``auto-allocated-topology``"
-
-msgid "``auto_allocated_network``"
-msgstr "``auto_allocated_network``"
-
-msgid "``auto_allocated_router``"
-msgstr "``auto_allocated_router``"
-
-msgid "``auto_allocated_subnet_v4``"
-msgstr "``auto_allocated_subnet_v4``"
-
-msgid "``auto_allocated_subnet_v6``"
-msgstr "``auto_allocated_subnet_v6``"
-
-msgid "``chain_parameters`` - Dictionary of chain parameters"
-msgstr "``chain_parameters`` - Dictionary of chain parameters"
-
-msgid "``create_network:provider:physical_network``"
-msgstr "``create_network:provider:physical_network``"
-
-msgid "``description`` - Readable description"
-msgstr "``description`` - Readable description"
-
-msgid "``destination_ip_prefix`` - Destination IP address or prefix"
-msgstr "``destination_ip_prefix`` - Destination IP address or prefix"
-
-msgid "``destination_port_range_max`` - Maximum destination protocol port"
-msgstr "``destination_port_range_max`` - Maximum destination protocol port"
-
-msgid "``destination_port_range_min`` - Minimum destination protocol port"
-msgstr "``destination_port_range_min`` - Minimum destination protocol port"
-
-msgid "``dhcpv6-stateful``"
-msgstr "``dhcpv6-stateful``"
-
-msgid "``dhcpv6-stateless``"
-msgstr "``dhcpv6-stateless``"
-
-msgid "``egress`` - Egress port"
-msgstr "``egress`` - Egress port"
-
-msgid "``ethertype`` - Ethertype (IPv4/IPv6)"
-msgstr "``ethertype`` - Ethertype (IPv4/IPv6)"
-
-msgid "``external-net``"
-msgstr "``external-net``"
-
-msgid "``flow_classifiers`` - List of flow classifier IDs"
-msgstr "``flow_classifiers`` - List of flow classifier IDs"
-
-msgid "``id`` - Flow classifier ID"
-msgstr "``id`` - Flow classifier ID"
-
-msgid "``id`` - Port chain ID"
-msgstr "``id`` - Port chain ID"
-
-msgid "``id`` - Port pair ID"
-msgstr "``id`` - Port pair ID"
-
-msgid "``id`` - Port pair group ID"
-msgstr "``id`` - Port pair group ID"
-
-msgid ""
-"``iface`` The name of the network interface on which to listen for prefix "
-"delegation messages."
-msgstr ""
-"`` Iface``, nama antarmuka jaringan untuk mendengarkan pesan awalan delegasi."
-
-msgid "``ingress`` - Ingress port"
-msgstr "``ingress`` - Ingress port"
-
-msgid ""
-"``insecure``: Disable SSL certificate validation. By default, certificates "
-"are validated."
-msgstr ""
-"``insecure``: Menonaktifkan validasi sertifikat SSL. Secara default, "
-"sertifikat divalidasi."
-
-msgid ""
-"``ipv4_ptr_zone_prefix_size``: the size in bits of the prefix for the IPv4 "
-"reverse lookup (PTR) zones."
-msgstr ""
-"``ipv4_ptr_zone_prefix_size``: ukuran dalam bit dari awalan (prefix) untuk "
-"zona IPv4 reverse lookup (PTR)."
-
-msgid "``ipv6_address_mode``"
-msgstr "``ipv6_address_mode``"
-
-msgid ""
-"``ipv6_address_mode``: Determines how instances obtain IPv6 address, default "
-"gateway, or optional information."
-msgstr ""
-"``Ipv6_address_mode``: Menentukan bagaimana instance mendapatkan alamat "
-"IPv6, gateway default, atau informasi opsional."
-
-msgid ""
-"``ipv6_ptr_zone_prefix_size``: the size in bits of the prefix for the IPv6 "
-"reverse lookup (PTR) zones."
-msgstr ""
-"``ipv6_ptr_zone_prefix_size``: ukuran dalam bit dari awalan (prefix) untuk "
-"zona IPv6 reverse lookup (PTR)."
-
-msgid "``ipv6_ra_mode``"
-msgstr "``ipv6_ra_mode``"
-
-msgid "``ipv6_ra_mode``: Determines who sends RA."
-msgstr "`` Ipv6_ra_mode``: Menentukan siapa yang mengirimkan RA."
-
-msgid "``l7_parameters`` - Dictionary of L7 parameters"
-msgstr "``l7_parameters`` - Dictionary of L7 parameters"
-
-msgid "``logical_destination_port`` - Destination port"
-msgstr "``logical_destination_port`` - Destination port"
-
-msgid "``logical_source_port`` - Source port"
-msgstr "``logical_source_port`` - Source port"
-
-msgid "``name`` - Readable name"
-msgstr "``name`` - Readable name"
-
-msgid "``network:floatingip_agent_gateway``"
-msgstr "``network:floatingip_agent_gateway``"
-
-msgid "``not-tags-any``"
-msgstr "``not-tags-any``"
-
-msgid "``not-tags``"
-msgstr "``not-tags``"
-
-msgid ""
-"``pd-length`` The length that delegated prefixes will be. This must be 64 to "
-"work with the current OpenStack Networking reference implementation."
-msgstr ""
-"`` Pd-length`` Panjang dimana prefiks akan mendelegasikan. Ini harus menjadi "
-"64 untuk bekerja dengan implementasi referensi OpenStack Networking saat ini."
-
-msgid ""
-"``pd-pool`` The larger prefix from which you want your delegated prefixes to "
-"come. The example given is sufficient if you do not need external network "
-"access, otherwise a unique globally routable prefix is necessary."
-msgstr ""
-"``pd-pool``, prefiks yang lebih besar dari mana Anda ingin prefiks Anda "
-"didelegasikan untuk datang. Contoh yang diberikan ini sudah cukup jika Anda "
-"tidak memerlukan akses jaringan eksternal, jika tidak prefix routable global "
-"yang unik ini diperlukan."
-
-msgid "``port_pair_groups`` - List of port pair group IDs"
-msgstr "``port_pair_groups`` - List of port pair group IDs"
-
-msgid "``port_pairs`` - List of service function port pairs"
-msgstr "``port_pairs`` - List of service function port pairs"
-
-msgid "``protocol`` - IP protocol"
-msgstr "``protocol`` - IP protocol"
-
-msgid "``router``"
-msgstr "``router``"
-
-msgid ""
-"``script`` Points to a script to be run when a prefix is delegated or "
-"released. This is only needed if you want instances on your subnets to have "
-"external network access. More on this below."
-msgstr ""
-"``script``, poin untuk script untuk dijalankan ketika awalan didelegasikan "
-"atau dilepaskan. Ini hanya diperlukan jika Anda ingin instance pada subnet "
-"Anda untuk memiliki akses jaringan eksternal. Lebih di bawah ini."
-
-msgid ""
-"``service_function_parameters`` - Dictionary of service function parameters"
-msgstr ""
-"``service_function_parameters`` - Dictionary of service function parameters"
-
-msgid "``slaac``"
-msgstr "``slaac``"
-
-msgid "``source_ip_prefix`` - Source IP address or prefix"
-msgstr "``source_ip_prefix`` - Source IP address or prefix"
-
-msgid "``source_port_range_max`` - Maximum source protocol port"
-msgstr "``source_port_range_max`` - Maximum source protocol port"
-
-msgid "``source_port_range_min`` - Minimum source protocol port"
-msgstr "``source_port_range_min`` - Minimum source protocol port"
-
-msgid "``subnet_allocation``"
-msgstr "``subnet_allocation``"
-
-msgid "``tags-any``"
-msgstr "``tags-any``"
-
-msgid "``tags``"
-msgstr "``tags``"
-
-msgid "``tenant_id`` - Project ID"
-msgstr "``tenant_id`` - Project ID"
-
-msgid "``update_network:provider:physical_network``"
-msgstr "``update_network:provider:physical_network``"
-
-msgid "``url``: the OpenStack DNS service public endpoint URL."
-msgstr "``url``: URL endpoint publik layanan DNS OpenStack."
-
-msgid ""
-"``vhost-user`` requires file descriptor-backed shared memory. Currently, the "
-"only way to request this is by requesting large pages. This is why instances "
-"spawned on hosts with OVS-DPDK must request large pages. The aggregate "
-"flavor affinity filter can be used to associate flavors with large page "
-"support to hosts with OVS-DPDK support."
-msgstr ""
-"``vhost-user`` membutuhkan memori bersama descriptor-backed file. Saat ini, "
-"satu-satunya cara untuk meminta ini dengan meminta halaman besar. Inilah "
-"sebabnya mengapa instance melahirkan pada host dengan OVS-DPDK harus meminta "
-"halaman besar. Filter afinitas flavor agregat dapat digunakan untuk "
-"mengasosiasikan flavor dengan dukungan halaman besar untuk host dengan "
-"dukungan OVS-DPDK."
-
-msgid "a set of iptables rules"
-msgstr "Seperangkat aturan iptables"
-
-msgid "availability zone candidates for the resource"
-msgstr "Calon zona ketersediaan untuk sumber"
-
-msgid "availability zones for the resource"
-msgstr "Zona ketersediaan untuk sumber"
-
-msgid "availability_zone_hints"
-msgstr "availability_zone_hints"
-
-msgid "availability_zones"
-msgstr "availability_zones"
-
-msgid "classless inter-domain routing (CIDR)"
-msgstr "Classless inter-domain routing (CIDR)"
-
-msgid "compute0001"
-msgstr "compute0001"
-
-msgid "compute0002"
-msgstr "compute0002"
-
-msgid "compute0101"
-msgstr "compute0101"
-
-msgid "compute0102"
-msgstr "compute0102"
-
-msgid "dependencies."
-msgstr "ketergantungan."
-
-msgid "dhcpv6-stateful"
-msgstr "dhcpv6-stateful"
-
-msgid "dhcpv6-stateless"
-msgstr "dhcpv6-stateless"
-
-msgid "dns_domain"
-msgstr "dns_domain"
-
-msgid "dns_name"
-msgstr "dns_name"
-
-msgid "dnsmasq for providing IP addresses to virtual machines using DHCP"
-msgstr ""
-"dnsmasq untuk menyediakan alamat IP untuk mesin virtual menggunakan DHCP"
-
-msgid "dotted quad"
-msgstr "dotted quad"
-
-msgid "fd00:198:51:100::/64"
-msgstr "fd00:198:51:100::/64"
-
-msgid "fd00:198:51:100::1"
-msgstr "fd00:198:51:100::1"
-
-msgid "fd00:203:0:113::/64"
-msgstr "fd00:203:0:113::/64"
-
-msgid "fd00:203:0:113::1"
-msgstr "fd00:203:0:113::1"
-
-msgid ""
-"iptables to implement SNAT so instances can connect out to the public "
-"internet, and to ensure that virtual machines are permitted to communicate "
-"with dnsmasq using DHCP"
-msgstr ""
-"iptables untuk melaksanakan SNAT sehingga instance dapat terhubung ke "
-"internet publik, dan untuk memastikan bahwa mesin virtual diizinkan untuk "
-"berkomunikasi dengan dnsmasq menggunakan DHCP"
-
-msgid "ipv6 address mode"
-msgstr "ipv6 address mode"
-
-msgid "ipv6 ra mode"
-msgstr "ipv6 ra mode"
-
-msgid "ipv6_address_mode"
-msgstr "ipv6_address_mode"
-
-msgid "ipv6_ra_mode"
-msgstr "ipv6_ra_mode"
-
-msgid "ipv6_ra_mode and ipv6_address_mode combinations"
-msgstr "Kombinasi ipv6_ra_mode dan ipv6_address_mode"
-
-msgid "libvirt 1.2.13"
-msgstr "libvirt 1.2.13"
-
-msgid "libvirt 1.2.17"
-msgstr "libvirt 1.2.17"
-
-msgid "libvirt network implementation"
-msgstr "Implementasi jaringan libvirt"
-
-msgid "list of string"
-msgstr "daftar string"
-
-msgid "network"
-msgstr "network"
-
-msgid "network0001"
-msgstr "network0001"
-
-msgid "network0002"
-msgstr "network0002"
-
-msgid "networks"
-msgstr "network (jaringan)"
-
-msgid "neutron, networking, OpenStack"
-msgstr "neutron, networking, OpenStack"
-
-msgid "neutron-linuxbridge-cleanup utility"
-msgstr " Utilitas neutron-linuxbridge-cleanup."
-
-msgid "no"
-msgstr "no"
-
-msgid "ports"
-msgstr "port"
-
-msgid "rack 1"
-msgstr "rack 1"
-
-msgid "rack 2"
-msgstr "rack 2"
-
-msgid "radvd A,M,O"
-msgstr "radvd A,M,O"
-
-msgid "router"
-msgstr "router"
-
-msgid "routers"
-msgstr "router"
-
-msgid "segment 1"
-msgstr "segment 1"
-
-msgid "segment 2"
-msgstr "segment 2"
-
-msgid "segment1"
-msgstr "segment1"
-
-msgid "segment2"
-msgstr "segment2"
-
-msgid "slaac"
-msgstr "slaac"
-
-msgid "subnet (IPv4)"
-msgstr "subnet (IPv4)"
-
-msgid "subnet (IPv6)"
-msgstr "subnet (IPv6)"
-
-msgid "subnetpools"
-msgstr "subnetpool"
-
-msgid "subnets"
-msgstr "subnet"
-
-msgid "type driver / mech driver"
-msgstr "type driver / mech driver"
-
-msgid "v1"
-msgstr "v1"
-
-msgid "v2"
-msgstr "v2"
-
-msgid "yes"
-msgstr "yes"
diff --git a/doc/networking-guide/source/locale/ja/LC_MESSAGES/networking-guide.po b/doc/networking-guide/source/locale/ja/LC_MESSAGES/networking-guide.po
deleted file mode 100644
index d3c2cae111..0000000000
--- a/doc/networking-guide/source/locale/ja/LC_MESSAGES/networking-guide.po
+++ /dev/null
@@ -1,9025 +0,0 @@
-# SOME DESCRIPTIVE TITLE.
-# Copyright (C) 2015, OpenStack contributors
-# This file is distributed under the same license as the Networking Guide package.
-#
-# Translators:
-# nao nishijima , 2015
-# Sasuke(Kyohei MORIYAMA) <>, 2015
-# Tomoyuki KATO , 2014-2015
-# yfukuda , 2014
-# Yoshiteru Takizawa , 2015
-# Akihiro Motoki , 2015. #zanata
-# KATO Tomoyuki , 2015. #zanata
-# OpenStack Infra , 2015. #zanata
-# Akihiro Motoki , 2016. #zanata
-# KATO Tomoyuki , 2016. #zanata
-# 小羽根 陸 , 2016. #zanata
-# KATO Tomoyuki