diff --git a/RELEASENOTES.rst b/RELEASENOTES.rst index c9ceb2ae7a..0cedc31f29 100644 --- a/RELEASENOTES.rst +++ b/RELEASENOTES.rst @@ -20,6 +20,11 @@ Next release: Mitaka * Tracking of release notes in RELEASENOTES.rst file. +Configuration Reference +----------------------- + +* RST conversion completed + HA Guide -------- diff --git a/doc-tools-check-languages.conf b/doc-tools-check-languages.conf index a8dd6d1609..924b95e4ad 100644 --- a/doc-tools-check-languages.conf +++ b/doc-tools-check-languages.conf @@ -28,6 +28,7 @@ DOC_DIR="doc/" declare -A SPECIAL_BOOKS=( ["admin-guide-cloud"]="RST" ["arch-design"]="RST" + ["config-reference"]="RST" ["image-guide"]="RST" ["install-guide"]="RST" ["networking-guide"]="RST" @@ -35,7 +36,6 @@ declare -A SPECIAL_BOOKS=( ["user-guide-admin"]="RST" # Skip in-progress guides ["contributor-guide"]="skip" - ["config-ref-rst"]="skip" # Do not translate ["cli-reference"]="skip" # Skip arch design while its being revised diff --git a/doc/common/tables/aodh-amqp.xml b/doc/common/tables/aodh-amqp.xml deleted file mode 100644 index 77183e07a1..0000000000 --- a/doc/common/tables/aodh-amqp.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/aodh-api.xml b/doc/common/tables/aodh-api.xml deleted file mode 100644 index de7b806a20..0000000000 --- a/doc/common/tables/aodh-api.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[api]
= 0.0.0.0(StrOpt) The listen IP for the aodh API server.
= api_paste.ini(StrOpt) Configuration file for WSGI definition of API.
= False(BoolOpt) Toggle Pecan Debug Middleware.
= 8042(IntOpt) The port for the aodh API server.
= 1(IntOpt) Number of workers for aodh API server.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
-
diff --git a/doc/common/tables/aodh-auth.xml b/doc/common/tables/aodh-auth.xml deleted file mode 100644 index 77d0bd2fbf..0000000000 --- a/doc/common/tables/aodh-auth.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization configuration options
Configuration option = Default valueDescription
[service_credentials]
= False(BoolOpt) Disables X.509 certificate validation when an SSL connection to Identity Service is established.
= http://localhost:5000/v2.0(StrOpt) Auth URL to use for OpenStack service access.
= None(StrOpt) Certificate chain for SSL validation.
= publicURL(StrOpt) Type of endpoint in Identity service catalog to use for communication with OpenStack services.
= admin(StrOpt) Password to use for OpenStack service access.
= default(StrOpt) The domain id of the user project
= admin(StrOpt) The user project name
= None(StrOpt) Region name to use for OpenStack service endpoints.
= (StrOpt) Tenant ID to use for OpenStack service access.
= admin(StrOpt) Tenant name to use for OpenStack service access.
= default(StrOpt) The domain id of the user
= aodh(StrOpt) User name to use for OpenStack service access.
-
diff --git a/doc/common/tables/aodh-auth_token.xml b/doc/common/tables/aodh-auth_token.xml deleted file mode 100644 index 2cbeff26f5..0000000000 --- a/doc/common/tables/aodh-auth_token.xml +++ /dev/null @@ -1,174 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/aodh-common.xml b/doc/common/tables/aodh-common.xml deleted file mode 100644 index f8db8562aa..0000000000 --- a/doc/common/tables/aodh-common.xml +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= -1(IntOpt) Maximum count of actions for each state of an alarm, non-positive number means no limit.
= 60(IntOpt) Period of evaluation cycle, should be >= than configured pipeline interval for collection of underlying meters.
= 60(IntOpt) TTL of event alarm caches, in seconds. Set to 0 to disable caching.
= alarm.all(StrOpt) The topic that aodh uses for event alarm evaluation.
= 64(IntOpt) Size of executor thread pool.
= http://localhost:8041(StrOpt) URL to Gnocchi.
= localhost(StrOpt) Name of this node, which must be valid in an AMQP key. Can be an opaque identifier. For ZeroMQ only, must be a valid host name, FQDN, or IP address.
= 600(IntOpt) Timeout seconds for HTTP requests. Set it to None to disable timeout.
= None(ListOpt) Memcached servers or None for in process cache.
= 1(IntOpt) Number of workers for notification service. A single notification agent is enabled by default.
= alarm_notifier(StrOpt) The topic that aodh uses for alarm notifier messages.
= None(IntOpt) Maximum number of alarms defined for a project.
= True(BoolOpt) Record alarm change events.
= (StrOpt) SSL Client certificate for REST notifier.
= (StrOpt) SSL Client private key for REST notifier.
= 0(IntOpt) Number of retries for REST notifier
= True(BoolOpt) Whether to verify the SSL Server certificate when calling alarm action.
= None(IntOpt) Maximum number of alarms defined for a user.
-
diff --git a/doc/common/tables/aodh-coordination.xml b/doc/common/tables/aodh-coordination.xml deleted file mode 100644 index 87568f1013..0000000000 --- a/doc/common/tables/aodh-coordination.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Coordination configuration options
Configuration option = Default valueDescription
[coordination]
= None(StrOpt) The backend URL to use for distributed coordination. If left empty, per-deployment central agent and per-host compute agent won't do workload partitioning and will only function correctly if a single instance of that service is running.
= 10.0(FloatOpt) Number of seconds between checks to see if group membership has changed
= 1.0(FloatOpt) Number of seconds between heartbeats for distributed coordination.
-
diff --git a/doc/common/tables/aodh-cors.xml b/doc/common/tables/aodh-cors.xml deleted file mode 100644 index e13d794623..0000000000 --- a/doc/common/tables/aodh-cors.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/aodh-database.xml b/doc/common/tables/aodh-database.xml deleted file mode 100644 index 40feaf6ca7..0000000000 --- a/doc/common/tables/aodh-database.xml +++ /dev/null @@ -1,110 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[database]
= None(StrOpt) The connection string used to connect to the alarm database - rather use ${database.connection}
= -1(IntOpt) Number of seconds that alarm histories are kept in the database for (<= 0 means forever).
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/aodh-logging.xml b/doc/common/tables/aodh-logging.xml deleted file mode 100644 index a6a0e09c9c..0000000000 --- a/doc/common/tables/aodh-logging.xml +++ /dev/null @@ -1,102 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation.
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s .
= None(StrOpt) (Optional) The base directory used for relative --log-file paths.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines.
= True(BoolOpt) Log output to standard error.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
-
diff --git a/doc/common/tables/aodh-policy.xml b/doc/common/tables/aodh-policy.xml deleted file mode 100644 index 6a31243785..0000000000 --- a/doc/common/tables/aodh-policy.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of policy configuration options
Configuration option = Default valueDescription
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
-
diff --git a/doc/common/tables/aodh-qpid.xml b/doc/common/tables/aodh-qpid.xml deleted file mode 100644 index b4ab64705f..0000000000 --- a/doc/common/tables/aodh-qpid.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/aodh-rabbitmq.xml b/doc/common/tables/aodh-rabbitmq.xml deleted file mode 100644 index 9635fcfd87..0000000000 --- a/doc/common/tables/aodh-rabbitmq.xml +++ /dev/null @@ -1,118 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/aodh-redis.xml b/doc/common/tables/aodh-redis.xml deleted file mode 100644 index 8ad02194b6..0000000000 --- a/doc/common/tables/aodh-redis.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= None(StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_ring]
= /etc/oslo/matchmaker_ring.json(StrOpt) Matchmaker ring file (JSON).
-
diff --git a/doc/common/tables/aodh-rpc.xml b/doc/common/tables/aodh-rpc.xml deleted file mode 100644 index 3106a2f512..0000000000 --- a/doc/common/tables/aodh-rpc.xml +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 300(IntOpt) Heartbeat frequency.
= 600(IntOpt) Heartbeat time-to-live.
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 60(IntOpt) Seconds to wait for a response from a call.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
-
diff --git a/doc/common/tables/aodh-zeromq.xml b/doc/common/tables/aodh-zeromq.xml deleted file mode 100644 index 98dead90dc..0000000000 --- a/doc/common/tables/aodh-zeromq.xml +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= local(StrOpt) MatchMaker driver.
= 9501(IntOpt) ZeroMQ receiver listening port.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
-
diff --git a/doc/common/tables/ceilometer-amqp.xml b/doc/common/tables/ceilometer-amqp.xml deleted file mode 100644 index 6223f90f5d..0000000000 --- a/doc/common/tables/ceilometer-amqp.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use for notifications. If not set, we fall back to the same configuration used for RPC.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/ceilometer-api.xml b/doc/common/tables/ceilometer-api.xml deleted file mode 100644 index bf4950cfc1..0000000000 --- a/doc/common/tables/ceilometer-api.xml +++ /dev/null @@ -1,122 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= api_paste.ini(StrOpt) Configuration file for WSGI definition of API.
= event_pipeline.yaml(StrOpt) Configuration file for event pipeline definition.
= pipeline.yaml(StrOpt) Configuration file for pipeline definition.
= 20(IntOpt) Polling interval for pipeline file configuration in seconds.
= False(BoolOpt) Refresh Event Pipeline configuration on-the-fly.
= False(BoolOpt) Refresh Pipeline configuration on-the-fly.
= (ListOpt) List of metadata keys reserved for metering use. And these keys are additional to the ones included in the namespace.
= 256(IntOpt) Limit on length of reserved metadata values.
= metering.(ListOpt) List of metadata prefixes reserved for metering use.
[api]
= None(BoolOpt) Set True to redirect alarms URLs to aodh. Default autodetection by querying keystone.
= None(StrOpt) The endpoint of Aodh to redirect alarms URLs to Aodh API. Default autodetection by querying keystone.
= 100(IntOpt) Default maximum number of items returned by API request.
= None(BoolOpt) Set True to disable resource/meter/sample URLs. Default autodetection by querying keystone.
= 0.0.0.0(StrOpt) The listen IP for the ceilometer API server.
= False(BoolOpt) Toggle Pecan Debug Middleware.
= 8777(PortOpt) The port for the ceilometer API server.
= 1(IntOpt) Number of workers for api, default value is 1.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
-
diff --git a/doc/common/tables/ceilometer-auth.xml b/doc/common/tables/ceilometer-auth.xml deleted file mode 100644 index c7ebd66cac..0000000000 --- a/doc/common/tables/ceilometer-auth.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization configuration options
Configuration option = Default valueDescription
[service_credentials]
= None(Opt) Config Section from which to load plugin specific options
= None(Opt) Authentication type to load
= None(StrOpt) PEM encoded Certificate Authority to use when verifying HTTPs connections.
= None(StrOpt) PEM encoded client certificate cert file
= False(BoolOpt) Verify HTTPS connections.
= public(StrOpt) Type of endpoint in Identity service catalog to use for communication with OpenStack services.
= None(StrOpt) PEM encoded client certificate key file
= None(StrOpt) Region name to use for OpenStack service endpoints.
= None(IntOpt) Timeout value for http requests
-
diff --git a/doc/common/tables/ceilometer-auth_token.xml b/doc/common/tables/ceilometer-auth_token.xml deleted file mode 100644 index fdec6d7dac..0000000000 --- a/doc/common/tables/ceilometer-auth_token.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(Opt) Config Section from which to load plugin specific options
= None(Opt) Authentication type to load
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/ceilometer-collector.xml b/doc/common/tables/ceilometer-collector.xml deleted file mode 100644 index 7f329a751e..0000000000 --- a/doc/common/tables/ceilometer-collector.xml +++ /dev/null @@ -1,75 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of collector configuration options
Configuration option = Default valueDescription
[collector]
= False(BoolOpt) Requeue the event on the collector event queue when the collector fails to dispatch it.
= False(BoolOpt) Requeue the sample on the collector sample queue when the collector fails to dispatch it. This is only valid if the sample come from the notifier publisher.
= 0.0.0.0(StrOpt) Address to which the UDP socket is bound. Set to an empty string to disable.
= 4952(PortOpt) Port to which the UDP socket is bound.
= 1(IntOpt) Number of workers for collector service. default value is 1.
[dispatcher_file]
= 0(IntOpt) The max number of the files to keep.
= None(StrOpt) Name and the location of the file to record meters.
= 0(IntOpt) The max size of the file.
-
diff --git a/doc/common/tables/ceilometer-common.xml b/doc/common/tables/ceilometer-common.xml deleted file mode 100644 index 1c5b970793..0000000000 --- a/doc/common/tables/ceilometer-common.xml +++ /dev/null @@ -1,123 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) To reduce polling agent load, samples are sent to the notification agent in a batch. To gain higher throughput at the cost of load set this to False.
= 64(IntOpt) Size of executor thread pool.
= localhost(StrOpt) Name of this node, which must be valid in an AMQP key. Can be an opaque identifier. For ZeroMQ only, must be a valid host name, FQDN, or IP address.
= 600(IntOpt) Timeout seconds for HTTP requests. Set it to None to disable timeout.
= None(ListOpt) Memcached servers or None for in process cache.
= ['compute', 'central'](MultiChoicesOpt) Polling namespace(s) to be used while resource polling
= [](MultiChoicesOpt) List of pollsters (or wildcard templates) to be used while polling
= /etc/ceilometer/rootwrap.conf(StrOpt) Path to the rootwrap configuration file touse for running commands as root
= 0(IntOpt) To reduce large requests at same time to Nova or other components from different compute agents, shuffle start time of polling task.
= False(BoolOpt) Indicates if expirer expires only samples. If set true, expired samples will be deleted, but residual resource and meter definition data will remain.
[compute]
= False(BoolOpt) Enable work-load partitioning, allowing multiple compute agents to be run simultaneously.
[coordination]
= None(StrOpt) The backend URL to use for distributed coordination. If left empty, per-deployment central agent and per-host compute agent won't do workload partitioning and will only function correctly if a single instance of that service is running.
= 10.0(FloatOpt) Number of seconds between checks to see if group membership has changed
= 1.0(FloatOpt) Number of seconds between heartbeats for distributed coordination.
[keystone_authtoken]
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
[meter]
= meters.yaml(StrOpt) Configuration file for defining meter notifications.
[polling]
= None(StrOpt) Work-load partitioning group prefix. Use only if you want to run multiple polling agents with different config files. For each sub-group of the agent pool with the same partitioning_group_prefix a disjoint subset of pollsters should be loaded.
-
diff --git a/doc/common/tables/ceilometer-cors.xml b/doc/common/tables/ceilometer-cors.xml deleted file mode 100644 index 4e669af2c6..0000000000 --- a/doc/common/tables/ceilometer-cors.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(ListOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(ListOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/ceilometer-database.xml b/doc/common/tables/ceilometer-database.xml deleted file mode 100644 index f4f2b65823..0000000000 --- a/doc/common/tables/ceilometer-database.xml +++ /dev/null @@ -1,147 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) DEPRECATED - Database connection string.
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= 512(IntOpt) The max length of resources id in DB2 nosql, the value should be larger than len(hostname) * 2 as compute node's resource id is <hostname>_<nodename>.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= None(StrOpt) The connection string used to connect to the event database. (if unset, connection is used)
= -1(IntOpt) Number of seconds that events are kept in the database for (<= 0 means forever).
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= None(StrOpt) The connection string used to connect to the metering database. (if unset, connection is used)
= -1(IntOpt) Number of seconds that samples are kept in the database for (<= 0 means forever).
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/ceilometer-debug.xml b/doc/common/tables/ceilometer-debug.xml deleted file mode 100644 index 966c522abc..0000000000 --- a/doc/common/tables/ceilometer-debug.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Allow novaclient's debug log output. (Use default_log_levels instead)
-
diff --git a/doc/common/tables/ceilometer-dispatcher_gnocchi.xml b/doc/common/tables/ceilometer-dispatcher_gnocchi.xml deleted file mode 100644 index acbd004d1c..0000000000 --- a/doc/common/tables/ceilometer-dispatcher_gnocchi.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Gnocchi dispatcher configuration options
Configuration option = Default valueDescription
[dispatcher_gnocchi]
= None(StrOpt) The archive policy to use when the dispatcher create a new metric.
= gnocchi(StrOpt) Gnocchi project used to filter out samples generated by Gnocchi service activity
= True(BoolOpt) Filter out samples generated by Gnocchi service activity
= gnocchi_resources.yaml(StrOpt) The Yaml file that defines mapping between samples and gnocchi resources/metrics
= None(StrOpt) URL to Gnocchi. default: autodetection
-
diff --git a/doc/common/tables/ceilometer-dispatcher_http.xml b/doc/common/tables/ceilometer-dispatcher_http.xml deleted file mode 100644 index 7fef257eb7..0000000000 --- a/doc/common/tables/ceilometer-dispatcher_http.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HTTP dispatcher configuration options
Configuration option = Default valueDescription
[dispatcher_http]
= None(StrOpt) The target for event data where the http request will be sent to. If this is not set, it will default to same as Sample target.
= (StrOpt) The target where the http request will be sent. If this is not set, no data will be posted. For example: target = http://hostname:1234/path
= 5(IntOpt) The max time in seconds to wait for a request to timeout.
-
diff --git a/doc/common/tables/ceilometer-dispatchers.xml b/doc/common/tables/ceilometer-dispatchers.xml deleted file mode 100644 index 13c5ff83a7..0000000000 --- a/doc/common/tables/ceilometer-dispatchers.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of dispatchers configuration options
Configuration option = Default valueDescription
[DEFAULT]
= ['database'](MultiStrOpt) Dispatchers to process event data.
= ['database'](MultiStrOpt) Dispatchers to process metering data.
-
diff --git a/doc/common/tables/ceilometer-events.xml b/doc/common/tables/ceilometer-events.xml deleted file mode 100644 index 086c6b831a..0000000000 --- a/doc/common/tables/ceilometer-events.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of events configuration options
Configuration option = Default valueDescription
[event]
= event_definitions.yaml(StrOpt) Configuration file for event definitions.
= False(BoolOpt) Drop notifications if no event definition matches. (Otherwise, we convert them with just the default traits)
= [](MultiStrOpt) Store the raw notification for select priority levels (info and/or error). By default, raw details are not captured.
[notification]
= True(BoolOpt) Acknowledge message when event persistence fails.
= False(BoolOpt) Save event details.
= 1(IntOpt) Number of workers for notification service, default value is 1.
= False(BoolOpt) Enable workload partitioning, allowing multiple notification agents to be run simultaneously.
-
diff --git a/doc/common/tables/ceilometer-exchange.xml b/doc/common/tables/ceilometer-exchange.xml deleted file mode 100644 index 7635a507ab..0000000000 --- a/doc/common/tables/ceilometer-exchange.xml +++ /dev/null @@ -1,100 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of exchange configuration options
Configuration option = Default valueDescription
[DEFAULT]
= ceilometer(StrOpt) Exchange name for ceilometer notifications.
= cinder(StrOpt) Exchange name for Cinder notifications.
= central(StrOpt) Exchange name for DNS notifications.
= glance(StrOpt) Exchange name for Glance notifications.
= heat(StrOpt) Exchange name for Heat notifications
= ['nova', 'glance', 'neutron', 'cinder'](MultiStrOpt) Exchanges name to listen for notifications.
= ironic(StrOpt) Exchange name for Ironic notifications.
= keystone(StrOpt) Exchange name for Keystone notifications.
= magnum(StrOpt) Exchange name for Magnum notifications.
= neutron(StrOpt) Exchange name for Neutron notifications.
= nova(StrOpt) Exchange name for Nova notifications.
= sahara(StrOpt) Exchange name for Data Processing notifications.
= openstack(StrOpt) Source for samples emitted on this instance.
= swift(StrOpt) Exchange name for Swift notifications.
= trove(StrOpt) Exchange name for DBaaS notifications.
-
diff --git a/doc/common/tables/ceilometer-glance.xml b/doc/common/tables/ceilometer-glance.xml deleted file mode 100644 index d46a52bf6d..0000000000 --- a/doc/common/tables/ceilometer-glance.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of glance configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 0(IntOpt) Number of items to request in each paginated Glance API request (parameter used by glancecelient). If this is less than or equal to 0, page size is not specified (default value in glanceclient is used).
-
diff --git a/doc/common/tables/ceilometer-inspector.xml b/doc/common/tables/ceilometer-inspector.xml deleted file mode 100644 index 56cd3b1544..0000000000 --- a/doc/common/tables/ceilometer-inspector.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of inspector configuration options
Configuration option = Default valueDescription
[DEFAULT]
= libvirt(StrOpt) Inspector to use for inspecting the hypervisor layer. Known inspectors are libvirt, hyperv, vmware, xenapi and powervm.
= kvm(StrOpt) Libvirt domain type.
= (StrOpt) Override the default libvirt URI (which is dependent on libvirt_type).
-
diff --git a/doc/common/tables/ceilometer-ipmi.xml b/doc/common/tables/ceilometer-ipmi.xml deleted file mode 100644 index d63f327d19..0000000000 --- a/doc/common/tables/ceilometer-ipmi.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IPMI configuration options
Configuration option = Default valueDescription
[ipmi]
= 3(IntOpt) Number of retries upon Intel Node Manager initialization failure
= 3(IntOpt) Tolerance of IPMI/NM polling failures before disable this pollster. Negative indicates retrying forever.
-
diff --git a/doc/common/tables/ceilometer-logging.xml b/doc/common/tables/ceilometer-logging.xml deleted file mode 100644 index 8e9a35167d..0000000000 --- a/doc/common/tables/ceilometer-logging.xml +++ /dev/null @@ -1,128 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s(StrOpt) Format string for user_identity field of the logging_context_format_string
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
-
diff --git a/doc/common/tables/ceilometer-magnetodb.xml b/doc/common/tables/ceilometer-magnetodb.xml deleted file mode 100644 index 497c79b46a..0000000000 --- a/doc/common/tables/ceilometer-magnetodb.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of MagnetoDB configuration options
Configuration option = Default valueDescription
[DEFAULT]
= magnetodb(StrOpt) Exchange name for Magnetodb notifications.
-
diff --git a/doc/common/tables/ceilometer-notification.xml b/doc/common/tables/ceilometer-notification.xml deleted file mode 100644 index e923108581..0000000000 --- a/doc/common/tables/ceilometer-notification.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of notification configuration options
Configuration option = Default valueDescription
[notification]
= True(BoolOpt) WARNING: Ceilometer historically offered the ability to store events as meters. This usage is NOT advised as it can flood the metering database and cause performance degradation.
= 10(IntOpt) Number of queues to parallelize workload across. This value should be larger than the number of active notification agents for optimal results.
-
diff --git a/doc/common/tables/ceilometer-policy.xml b/doc/common/tables/ceilometer-policy.xml deleted file mode 100644 index 5ec56cf8b0..0000000000 --- a/doc/common/tables/ceilometer-policy.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of policy configuration options
Configuration option = Default valueDescription
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
-
diff --git a/doc/common/tables/ceilometer-qpid.xml b/doc/common/tables/ceilometer-qpid.xml deleted file mode 100644 index 4dc5bb1f19..0000000000 --- a/doc/common/tables/ceilometer-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/ceilometer-rabbitmq.xml b/doc/common/tables/ceilometer-rabbitmq.xml deleted file mode 100644 index 6baa8e1575..0000000000 --- a/doc/common/tables/ceilometer-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/ceilometer-redis.xml b/doc/common/tables/ceilometer-redis.xml deleted file mode 100644 index 5db3cf474a..0000000000 --- a/doc/common/tables/ceilometer-redis.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/ceilometer-rgw.xml b/doc/common/tables/ceilometer-rgw.xml deleted file mode 100644 index aebd232e2a..0000000000 --- a/doc/common/tables/ceilometer-rgw.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Rados gateway configuration options
Configuration option = Default valueDescription
[rgw_admin_credentials]
= None(StrOpt) Access key for Radosgw Admin.
= None(StrOpt) Secret key for Radosgw Admin.
-
diff --git a/doc/common/tables/ceilometer-rpc.xml b/doc/common/tables/ceilometer-rpc.xml deleted file mode 100644 index d0e33b2842..0000000000 --- a/doc/common/tables/ceilometer-rpc.xml +++ /dev/null @@ -1,167 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
[notification]
= [](MultiStrOpt) Messaging URLs to listen for notifications. Example: transport://user:pass@host1:port[,hostN:portN]/virtual_host (DEFAULT/transport_url is used if empty)
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
[publisher]
= change this for valid signing(StrOpt) Secret value for signing messages. Set value empty if signing is not required to avoid computational overhead.
[publisher_notifier]
= event(StrOpt) The topic that ceilometer uses for event notifications.
= metering(StrOpt) The topic that ceilometer uses for metering notifications.
= messagingv2(StrOpt) The driver that ceilometer uses for metering notifications.
-
diff --git a/doc/common/tables/ceilometer-service_types.xml b/doc/common/tables/ceilometer-service_types.xml deleted file mode 100644 index 667b6cb619..0000000000 --- a/doc/common/tables/ceilometer-service_types.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of service types configuration options
Configuration option = Default valueDescription
[service_types]
= image(StrOpt) Glance service type.
= energy(StrOpt) Kwapi service type.
= network(StrOpt) Neutron service type.
= compute(StrOpt) Nova service type.
= object-store(StrOpt) Radosgw service type.
= object-store(StrOpt) Swift service type.
-
diff --git a/doc/common/tables/ceilometer-storage.xml b/doc/common/tables/ceilometer-storage.xml deleted file mode 100644 index ef0b075187..0000000000 --- a/doc/common/tables/ceilometer-storage.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of storage configuration options
Configuration option = Default valueDescription
[storage]
= 10(IntOpt) Maximum number of connection retries during startup. Set to -1 to specify an infinite retry count.
= 10(IntOpt) Interval (in seconds) between retries of connection.
-
diff --git a/doc/common/tables/ceilometer-swift.xml b/doc/common/tables/ceilometer-swift.xml deleted file mode 100644 index e62223b2db..0000000000 --- a/doc/common/tables/ceilometer-swift.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of swift configuration options
Configuration option = Default valueDescription
[DEFAULT]
= AUTH_(StrOpt) Swift reseller prefix. Must be on par with reseller_prefix in proxy-server.conf.
-
diff --git a/doc/common/tables/ceilometer-tripleo.xml b/doc/common/tables/ceilometer-tripleo.xml deleted file mode 100644 index 292c32c634..0000000000 --- a/doc/common/tables/ceilometer-tripleo.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of TripleO configuration options
Configuration option = Default valueDescription
[hardware]
= snmp.yaml(StrOpt) Configuration file for defining hardware snmp meters.
= ro_snmp_user(StrOpt) SNMPd user name of all nodes running in the cloud.
= password(StrOpt) SNMPd password of all the nodes running in the cloud.
= snmp://(StrOpt) URL scheme to use for hardware nodes.
-
diff --git a/doc/common/tables/ceilometer-vmware.xml b/doc/common/tables/ceilometer-vmware.xml deleted file mode 100644 index 7c2b6af663..0000000000 --- a/doc/common/tables/ceilometer-vmware.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VMware configuration options
Configuration option = Default valueDescription
[vmware]
= 10(IntOpt) Number of times a VMware vSphere API may be retried.
= None(StrOpt) CA bundle file to use in verifying the vCenter server certificate.
= (StrOpt) IP address of the VMware vSphere host.
= (StrOpt) Password of VMware vSphere.
= 443(PortOpt) Port of the VMware vSphere host.
= (StrOpt) Username of VMware vSphere.
= False(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "ca_file" is set.
= 0.5(FloatOpt) Sleep time in seconds for polling an ongoing async task.
= None(StrOpt) Optional vim service WSDL location e.g http://<server>/vimService.wsdl. Optional over-ride to default location for bug work-arounds.
-
diff --git a/doc/common/tables/ceilometer-xenapi.xml b/doc/common/tables/ceilometer-xenapi.xml deleted file mode 100644 index 3a34bf34ce..0000000000 --- a/doc/common/tables/ceilometer-xenapi.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of XenAPI configuration options
Configuration option = Default valueDescription
[xenapi]
= None(StrOpt) Password for connection to XenServer/Xen Cloud Platform.
= None(StrOpt) URL for connection to XenServer/Xen Cloud Platform.
= root(StrOpt) Username for connection to XenServer/Xen Cloud Platform.
-
diff --git a/doc/common/tables/ceilometer-zaqar.xml b/doc/common/tables/ceilometer-zaqar.xml deleted file mode 100644 index 91d8bc2db2..0000000000 --- a/doc/common/tables/ceilometer-zaqar.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Zaqar configuration options
Configuration option = Default valueDescription
[DEFAULT]
= zaqar(StrOpt) Exchange name for Messaging service notifications.
-
diff --git a/doc/common/tables/ceilometer-zeromq.xml b/doc/common/tables/ceilometer-zeromq.xml deleted file mode 100644 index 3a26d1e832..0000000000 --- a/doc/common/tables/ceilometer-zeromq.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= 100(IntOpt) Number of retries to find free port number before fail with ZMQBindError.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= 65536(IntOpt) Maximal port number for random ports range.
= 49152(IntOpt) Minimal port number for random ports range.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/cinder-amqp.xml b/doc/common/tables/cinder-amqp.xml deleted file mode 100644 index be4707c118..0000000000 --- a/doc/common/tables/cinder-amqp.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/cinder-api.xml b/doc/common/tables/cinder-api.xml deleted file mode 100644 index 0c6dd018bc..0000000000 --- a/doc/common/tables/cinder-api.xml +++ /dev/null @@ -1,193 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= api-paste.ini(StrOpt) File name for the paste.deploy config for cinder-api
= True(BoolOpt) Enables or disables rate limit of the API.
= 3600(IntOpt) Cache volume availability zones in memory for the provided duration in seconds
= None(StrOpt) Backend override of host value.
= 525600(IntOpt) Default timeout for CLI operations in minutes. For example, LUN migration is a typical long running operation, which depends on the LUN size and the load of the array. An upper bound in the specific deployment can be set to avoid unnecessary long wait. By default, it is 365 days long.
= True(BoolOpt) DEPRECATED: Deploy v1 of the Cinder API.
= True(BoolOpt) Deploy v2 of the Cinder API.
= {}(StrOpt) User defined capabilities, a JSON formatted string specifying key/value pairs. The key/value pairs can be used by the CapabilitiesFilter to select between backends when requests specify volume types. For example, specifying a service level or the geographical location of a backend, then creating a volume type to allow the user to select by these different properties.
= False(BoolOpt) Force LUN creation even if the full threshold of pool is reached.
= (StrOpt) List of Management IP addresses (separated by commas)
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
= 1000(IntOpt) The maximum number of items that a collection resource returns in a single response
= 114688(IntOpt) Max size for body of a request
= None(StrOpt) Base URL that will be presented to users in links to the OpenStack Volume API
= (ListOpt) Specify list of extensions to load when using osapi_volume_extension option with cinder.api.contrib.select_extensions
= ['cinder.api.contrib.standard_extensions'](MultiStrOpt) osapi volume extension to load
= 0.0.0.0(StrOpt) IP address on which OpenStack Volume API listens
= 8776(IntOpt) Port on which OpenStack Volume API listens
= None(IntOpt) Number of workers for OpenStack Volume API service. The default is equal to the number of CPUs available.
= (StrOpt) Password for Redis server (optional).
= -1(IntOpt) Max size allowed per volume, in gigabytes
= 6379(IntOpt) Use this port to connect to redis host.
= None(StrOpt) Public url to use for versions endpoint. The default is None, which will use the request's host_url attribute to populate the URL base. If Cinder is operating behind a proxy, you will want to change this to represent the proxy's URL.
= name, status, metadata, availability_zone(ListOpt) Volume filter options which non-admin user could use to query volumes. Default values are: ['name', 'status', 'metadata', 'availability_zone']
= cinder.transfer.api.API(StrOpt) The full class name of the volume transfer API class
= cinder.volume.api.API(StrOpt) The full class name of the volume API class to use
= volume-%s(StrOpt) Template string to be used to generate volume names
= -1.0(FloatOpt) Multiplier used for weighing volume number. Negative numbers mean to spread vs stack.
= 16(IntOpt) The number of characters in the autogenerated auth key.
= 8(IntOpt) The number of characters in the salt.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
[oslo_versionedobjects]
= False(BoolOpt) Make exception message format errors fatal
-
diff --git a/doc/common/tables/cinder-auth.xml b/doc/common/tables/cinder-auth.xml deleted file mode 100644 index 44b669980a..0000000000 --- a/doc/common/tables/cinder-auth.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization configuration options
Configuration option = Default valueDescription
[DEFAULT]
= keystone(StrOpt) The strategy to use for auth. Supports noauth, keystone, and deprecated.
-
diff --git a/doc/common/tables/cinder-auth_token.xml b/doc/common/tables/cinder-auth_token.xml deleted file mode 100644 index 5a8352529d..0000000000 --- a/doc/common/tables/cinder-auth_token.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/cinder-backups.xml b/doc/common/tables/cinder-backups.xml deleted file mode 100644 index 33f7b1217b..0000000000 --- a/doc/common/tables/cinder-backups.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of backups configuration options
Configuration option = Default valueDescription
[DEFAULT]
= cinder.backup.api.API(StrOpt) The full class name of the volume backup API class
= zlib(StrOpt) Compression algorithm (None to disable)
= cinder.backup.drivers.swift(StrOpt) Driver to use for backups.
= cinder.backup.manager.BackupManager(StrOpt) Full class name for the Manager for volume backup
= 2(IntOpt) Backup metadata version to be used when backing up volume metadata. If this number is bumped, make sure the service doing the restore supports the new version.
= backup-%s(StrOpt) Template string to be used to generate backup names
= 10(IntOpt) The number of chunks or objects, for which one Ceilometer notification will be sent
= $state_path/backup(StrOpt) Path specifying where to store backups.
= False(BoolOpt) Offload pending backup delete during backup service startup.
= 120(IntOpt) Interval, in seconds, between two progress notifications reporting the backup status
= cinder-backup(StrOpt) The topic that volume backup nodes listen on
= snapshot-%s(StrOpt) Template string to be used to generate snapshot names
= True(BoolOpt) Create volume from snapshot at the host where snapshot resides
-
diff --git a/doc/common/tables/cinder-backups_ceph.xml b/doc/common/tables/cinder-backups_ceph.xml deleted file mode 100644 index 6119643017..0000000000 --- a/doc/common/tables/cinder-backups_ceph.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Ceph backup driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 134217728(IntOpt) The chunk size, in bytes, that a backup is broken into before transfer to the Ceph object store.
= /etc/ceph/ceph.conf(StrOpt) Ceph configuration file to use.
= backups(StrOpt) The Ceph pool where volume backups are stored.
= 0(IntOpt) RBD stripe count to use when creating a backup image.
= 0(IntOpt) RBD stripe unit to use when creating a backup image.
= cinder(StrOpt) The Ceph user to connect with. Default here is to use the same user as for Cinder volumes. If not using cephx this should be set to None.
= True(BoolOpt) If True, always discard excess bytes when restoring volumes i.e. pad with zeroes.
-
diff --git a/doc/common/tables/cinder-backups_nfs.xml b/doc/common/tables/cinder-backups_nfs.xml deleted file mode 100644 index f067c80b61..0000000000 --- a/doc/common/tables/cinder-backups_nfs.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NFS backup driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Custom directory to use for backups.
= True(BoolOpt) Enable or Disable the timer to send the periodic progress notifications to Ceilometer when backing up the volume to the backend storage. The default value is True to enable the timer.
= 1999994880(IntOpt) The maximum size in bytes of the files used to hold backups. If the volume being backed up exceeds this size, then it will be backed up into multiple files.backup_file_size must be a multiple of backup_sha_block_size_bytes.
= None(StrOpt) Mount options passed to the NFS client. See NFS man page for details.
= $state_path/backup_mount(StrOpt) Base dir containing mount point for NFS share.
= 32768(IntOpt) The size in bytes that changes are tracked for incremental backups. backup_file_size has to be multiple of backup_sha_block_size_bytes.
= None(StrOpt) NFS share in hostname:path, ipv4addr:path, or "[ipv6addr]:path" format.
-
diff --git a/doc/common/tables/cinder-backups_swift.xml b/doc/common/tables/cinder-backups_swift.xml deleted file mode 100644 index 4e7678cc2f..0000000000 --- a/doc/common/tables/cinder-backups_swift.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Swift backup driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= per_user(StrOpt) Swift authentication mechanism
= 1(StrOpt) Swift authentication version. Specify "1" for auth 1.0, or "2" for auth 2.0
= 32768(IntOpt) The size in bytes that changes are tracked for incremental backups. backup_swift_object_size has to be multiple of backup_swift_block_size.
= None(StrOpt) Location of the CA certificate file to use for swift client requests.
= volumebackups(StrOpt) The default Swift container to use
= True(BoolOpt) Enable or Disable the timer to send the periodic progress notifications to Ceilometer when backing up the volume to the Swift backend storage. The default value is True to enable the timer.
= None(StrOpt) Swift key for authentication
= 52428800(IntOpt) The size in bytes of Swift backup objects
= 3(IntOpt) The number of retries to make for Swift operations
= 2(IntOpt) The backoff time in seconds between Swift retries
= None(StrOpt) Swift tenant/account name. Required when connecting to an auth 2.0 system
= None(StrOpt) The URL of the Swift endpoint
= None(StrOpt) Swift user name
= object-store:swift:publicURL(StrOpt) Info to match when looking for swift in the service catalog. Format is: separated values of the form: <service_type>:<service_name>:<endpoint_type> - Only used if backup_swift_url is unset
-
diff --git a/doc/common/tables/cinder-backups_tsm.xml b/doc/common/tables/cinder-backups_tsm.xml deleted file mode 100644 index 09f65021c4..0000000000 --- a/doc/common/tables/cinder-backups_tsm.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IBM Tivoli Storage Manager backup driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Enable or Disable compression for backups
= password(StrOpt) TSM password for the running username
= backup(StrOpt) Volume prefix for the backup id when backing up to TSM
-
diff --git a/doc/common/tables/cinder-block-device.xml b/doc/common/tables/cinder-block-device.xml deleted file mode 100644 index 98aee227a5..0000000000 --- a/doc/common/tables/cinder-block-device.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of block device configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (ListOpt) List of all available devices
-
diff --git a/doc/common/tables/cinder-blockbridge.xml b/doc/common/tables/cinder-blockbridge.xml deleted file mode 100644 index 46388a450a..0000000000 --- a/doc/common/tables/cinder-blockbridge.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of BlockBridge EPS volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) IP address/hostname of Blockbridge API.
= None(IntOpt) Override HTTPS port to connect to Blockbridge API server.
= None(StrOpt) Blockbridge API password (for auth scheme 'password')
= token(StrOpt) Blockbridge API authentication scheme (token or password)
= None(StrOpt) Blockbridge API token (for auth scheme 'token')
= None(StrOpt) Blockbridge API user (for auth scheme 'password')
= None(StrOpt) Default pool name if unspecified.
= {'OpenStack': '+openstack'}(DictOpt) Defines the set of exposed pools and their associated backend query strings
-
diff --git a/doc/common/tables/cinder-ca.xml b/doc/common/tables/cinder-ca.xml deleted file mode 100644 index a30f2cf611..0000000000 --- a/doc/common/tables/cinder-ca.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CA and SSL configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) CA certificate file to use to verify connecting clients
= None(StrOpt) Certificate file to use when starting the server securely
= None(StrOpt) Private key file to use when starting the server securely
-
diff --git a/doc/common/tables/cinder-cloudbyte.xml b/doc/common/tables/cinder-cloudbyte.xml deleted file mode 100644 index bbe315476c..0000000000 --- a/doc/common/tables/cinder-cloudbyte.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CloudByte volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) CloudByte storage specific account name. This maps to a project name in OpenStack.
= {'latency': '15', 'iops': '10', 'graceallowed': 'false', 'iopscontrol': 'true', 'memlimit': '0', 'throughput': '0', 'tpcontrol': 'false', 'networkspeed': '0'}(DictOpt) These values will be used for CloudByte storage's addQos API call.
= None(StrOpt) Driver will use this API key to authenticate against the CloudByte storage's management interface.
= None(StrOpt) This corresponds to the discovery authentication group in CloudByte storage. Chap users are added to this group. Driver uses the first user found for this group. Default value is None.
= 3(IntOpt) Will confirm a successful volume creation in CloudByte storage by making this many number of attempts.
= 5(IntOpt) A retry value in seconds. Will be used by the driver to check if volume creation was successful in CloudByte storage.
= 3(IntOpt) Will confirm a successful volume deletion in CloudByte storage by making this many number of attempts.
= 5(IntOpt) A retry value in seconds. Will be used by the driver to check if volume deletion was successful in CloudByte storage.
= {'compression': 'off', 'deduplication': 'off', 'blocklength': '512B', 'sync': 'always', 'protocoltype': 'ISCSI', 'recordsize': '16k'}(DictOpt) These values will be used for CloudByte storage's createVolume API call.
= None(StrOpt) This corresponds to the name of Tenant Storage Machine (TSM) in CloudByte storage. A volume will be created in this TSM.
-
diff --git a/doc/common/tables/cinder-common.xml b/doc/common/tables/cinder-common.xml deleted file mode 100644 index 1a231cb15d..0000000000 --- a/doc/common/tables/cinder-common.xml +++ /dev/null @@ -1,279 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) If the requested Cinder availability zone is unavailable, fall back to the value of default_availability_zone, then storage_availability_zone, instead of failing.
= (StrOpt) Password for specified CHAP account name.
= (StrOpt) CHAP user name.
= /etc/chelsio-iscsi/chiscsi.conf(StrOpt) Chiscsi (CXT) global defaults configuration file
= None(StrOpt) ID of the project which will be used as the Cinder internal tenant.
= None(StrOpt) ID of the user to be used in volume operations as the Cinder internal tenant.
= 900(IntOpt) Timeout for client connections' socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever.
= cinder.compute.nova.API(StrOpt) The full class name of the compute API class to use
= cinder.consistencygroup.api.API(StrOpt) The full class name of the consistencygroup API class
= None(StrOpt) Default availability zone for new volumes. If not set, the storage_availability_zone option value is used as the default for new volumes.
= None(StrOpt) Default volume type to use
= None(StrOpt) Namespace for driver private data values to be saved in.
= False(BoolOpt) If set to True the http client will validate the SSL certificate of the backend endpoint.
= False(BoolOpt) Enables the Force option on upload_to_image. This enables running upload_volume on in-use volumes for backends that support it.
= True(BoolOpt) Services to be added to the available pool on create
= None(StrOpt) If this option is specified then the end time specified is used instead of the end time of the last completed audit period.
= False(BoolOpt) If this is set to True, attachment of volumes for image transfer will be aborted when multipathd is not running. Otherwise, it will fallback to single path.
= 64(IntOpt) Size of executor thread pool.
= localhost(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a host name, FQDN, or IP address.
= /etc/iet/ietd.conf(StrOpt) IET configuration file
= (ListOpt) The list of secondary IP addresses of the iSCSI daemon
= True(BoolOpt) There are two types of target configurations managed (replicate to another configured backend) or unmanaged (replicate to a device not managed by Cinder).
= 20.0(FloatOpt) Float representation of the over subscription ratio when thin provisioning is involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times of the total physical capacity. If the ratio is 10.5, it means provisioned capacity can be 10.5 times of the total physical capacity. A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. A ratio lower than 1.0 will be ignored and the default value will be used instead.
= None(ListOpt) Memcached servers or None for in process cache.
= False(BoolOpt) Enable monkey patching
= (ListOpt) List of modules/decorators to monkey patch
= 10.0.0.1(StrOpt) IP address of this host
= False(BoolOpt) Whether snapshots count against gigabyte quota
= 3(IntOpt) Number of times to attempt to run flakey shell commands
= None(StrOpt) Auth URL associated with the OpenStack privileged account.
= None(StrOpt) OpenStack privileged account username. Used for requests to other services (such as Nova) that require an account with special rights.
= None(StrOpt) Password associated with the OpenStack privileged account.
= None(StrOpt) Tenant name associated with the OpenStack privileged account.
= 60(IntOpt) Range, in seconds, to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0)
= 60(IntOpt) Interval, in seconds, between running periodic tasks
= cinder.replication.api.API(StrOpt) The full class name of the volume replication API class
= None(ListOpt) List of k/v pairs representing a replication target for this backend device. For unmanaged the format is: {'key-1'='val1' 'key-2'='val2'...},{...} and for managed devices its simply a list of valid configured backend_names that the driver supports replicating to: backend-a,bakcend-b...
= 10(IntOpt) Interval, in seconds, between nodes reporting state to datastore
= 300(IntOpt) Global backend request timeout, in seconds
= 0(IntOpt) The percentage of backend capacity is reserved
= /etc/cinder/rootwrap.conf(StrOpt) Path to the rootwrap configuration file to use for running commands as root
= False(BoolOpt) Send the volume and snapshot create and delete notifications generated in the specified period.
= 60(IntOpt) Maximum time since last check-in for a service to be considered up
= clean.sqlite(StrOpt) File name of clean sqlite db
= $state_path/ssh_known_hosts(StrOpt) File containing SSH host keys for the systems with which Cinder needs to communicate. OPTIONAL: Default=$state_path/ssh_known_hosts
= None(StrOpt) If this option is specified then the start time specified is used instead of the start time of the last completed audit period.
= /var/lib/cinder(StrOpt) Top-level directory for maintaining cinder's state
= nova(StrOpt) Availability zone of this node
= False(BoolOpt) Option to enable strict host key checking. When set to "True" Cinder will only connect to systems with a host key present in the configured "ssh_hosts_key_file". When set to "False" the host key will be saved upon first connection and used for subsequent connections. Default=False
= True(BoolOpt) Sets the value of TCP_KEEPALIVE (True/False) for each server socket.
= None(IntOpt) Sets the value of TCP_KEEPCNT for each server socket. Not supported on OS X.
= None(IntOpt) Sets the value of TCP_KEEPINTVL in seconds for each server socket. Not supported on OS X.
= 600(IntOpt) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X.
= 0(IntOpt) Count of reservations until usage is refreshed
= False(BoolOpt) Option to enable/disable CHAP authentication for targets.
= False(BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
= True(BoolOpt) If False, closes the client socket connection explicitly. Setting it to True to maintain backward compatibility. Recommended setting is set it to False.
[keystone_authtoken]
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
-
diff --git a/doc/common/tables/cinder-compute.xml b/doc/common/tables/cinder-compute.xml deleted file mode 100644 index 97b0480f0d..0000000000 --- a/doc/common/tables/cinder-compute.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Compute configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Allow to perform insecure SSL requests to nova
= None(StrOpt) Location of ca certificates file to use for nova client requests.
= compute:Compute Service:adminURL(StrOpt) Same as nova_catalog_info, but for admin endpoint.
= compute:Compute Service:publicURL(StrOpt) Match this value when searching for nova in the service catalog. Format is: separated values of the form: <service_type>:<service_name>:<endpoint_type>
= None(StrOpt) Same as nova_endpoint_template, but for admin endpoint.
= None(StrOpt) Override service catalog lookup with template for nova endpoint e.g. http://localhost:8774/v2/%(project_id)s
= None(StrOpt) Region name of this node
-
diff --git a/doc/common/tables/cinder-cors.xml b/doc/common/tables/cinder-cors.xml deleted file mode 100644 index 8f55549028..0000000000 --- a/doc/common/tables/cinder-cors.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/cinder-database.xml b/doc/common/tables/cinder-database.xml deleted file mode 100644 index 1046ead100..0000000000 --- a/doc/common/tables/cinder-database.xml +++ /dev/null @@ -1,131 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[DEFAULT]
= cinder.db(StrOpt) Driver to use for database access
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
= False(BoolOpt) Enable the experimental use of thread pooling for all DB API calls
-
diff --git a/doc/common/tables/cinder-datera.xml b/doc/common/tables/cinder-datera.xml deleted file mode 100644 index 56a5634a55..0000000000 --- a/doc/common/tables/cinder-datera.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Datera volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 7717(StrOpt) Datera API port.
= None(StrOpt) DEPRECATED: This will be removed in the Liberty release. Use san_login and san_password instead. This directly sets the Datera API token.
= 1(StrOpt) Datera API version.
= 3(StrOpt) Number of replicas to create of an inode.
= None(StrOpt) The path to the client certificate for verification, if the driver supports it.
= None(StrOpt) The path to the client certificate key for verification, if the driver supports it.
-
diff --git a/doc/common/tables/cinder-debug.xml b/doc/common/tables/cinder-debug.xml deleted file mode 100644 index 49ab5e19c3..0000000000 --- a/doc/common/tables/cinder-debug.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(ListOpt) List of options that control which trace info is written to the DEBUG log level to assist developers. Valid values are method and api.
-
diff --git a/doc/common/tables/cinder-dellsc.xml b/doc/common/tables/cinder-dellsc.xml deleted file mode 100644 index e5c7f0098b..0000000000 --- a/doc/common/tables/cinder-dellsc.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Dell Storage Center volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 3033(IntOpt) Dell API port
= openstack(StrOpt) Name of the server folder to use on the Storage Center
= 64702(IntOpt) Storage Center System Serial Number
= False(BoolOpt) Enable HTTPS SC certificate verification.
= openstack(StrOpt) Name of the volume folder to use on the Storage Center
-
diff --git a/doc/common/tables/cinder-dothill.xml b/doc/common/tables/cinder-dothill.xml deleted file mode 100644 index 1781da1f15..0000000000 --- a/doc/common/tables/cinder-dothill.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Dot Hill volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= https(StrOpt) DotHill API interface protocol.
= A(StrOpt) Pool or Vdisk name to use for volume creation.
= virtual(StrOpt) linear (for Vdisk) or virtual (for Pool).
= (ListOpt) List of comma-separated target iSCSI IP addresses.
= False(BoolOpt) Whether to verify DotHill array SSL certificate.
= None(StrOpt) DotHill array SSL certificate path.
-
diff --git a/doc/common/tables/cinder-drbd.xml b/doc/common/tables/cinder-drbd.xml deleted file mode 100644 index 9d6af7c7f5..0000000000 --- a/doc/common/tables/cinder-drbd.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of DRBD configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 1(StrOpt) Number of nodes that should replicate the data.
-
diff --git a/doc/common/tables/cinder-emc.xml b/doc/common/tables/cinder-emc.xml deleted file mode 100644 index 1c83b0b258..0000000000 --- a/doc/common/tables/cinder-emc.xml +++ /dev/null @@ -1,104 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of EMC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Report free_capacity_gb as 0 when the limit to maximum number of pool LUNs is reached. By default, the value is False.
= /etc/cinder/cinder_emc_config.xml(StrOpt) use this file for cinder emc plugin config data
= False(BoolOpt) To destroy storage group when the last LUN is removed from it. By default, the value is False.
= False(BoolOpt) Delete a LUN even if it is in Storage Groups.
= False(BoolOpt) Automatically deregister initiators after the related storage group is destroyed. By default, the value is False.
= False(BoolOpt) Automatically register initiators. By default, the value is False.
= *(StrOpt) Comma separated iSCSI or FC ports to be used in Nova or Cinder.
= (StrOpt) Mapping between hostname and its iSCSI initiator IP addresses.
= 255(IntOpt) Default max number of LUNs in a storage group. By default, the value is 255.
= (StrOpt) Naviseccli Path.
= global(StrOpt) VNX authentication scope type.
= None(StrOpt) Comma-separated list of storage pool names to be used.
= None(StrOpt) Directory path that contains the VNX security file. Make sure the security file is generated first.
= 5(IntOpt) Number of retries in case array is busy
= 5(IntOpt) Interval between retries in case array is busy
= (StrOpt) XMS cluster id in multi-cluster environment
-
diff --git a/doc/common/tables/cinder-emc_sio.xml b/doc/common/tables/cinder-emc_sio.xml deleted file mode 100644 index 4a9e7e3938..0000000000 --- a/doc/common/tables/cinder-emc_sio.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of EMC SIO volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Whether to allow force delete.
= None(StrOpt) Protection domain id.
= None(StrOpt) Protection domain name.
= 443(StrOpt) REST server port.
= True(BoolOpt) Whether to round volume capacity.
= None(StrOpt) Server certificate path.
= None(StrOpt) Storage pool id.
= None(StrOpt) Storage pool name.
= None(StrOpt) Storage pools.
= False(BoolOpt) Whether to unmap volume before deletion.
= False(BoolOpt) Whether to verify server certificate.
-
diff --git a/doc/common/tables/cinder-eqlx.xml b/doc/common/tables/cinder-eqlx.xml deleted file mode 100644 index 9e105dd310..0000000000 --- a/doc/common/tables/cinder-eqlx.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Dell EqualLogic volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= admin(StrOpt) Existing CHAP account name. Note that this option is deprecated in favour of "chap_username" as specified in cinder/volume/driver.py and will be removed in next release.
= password(StrOpt) Password for specified CHAP account name. Note that this option is deprecated in favour of "chap_password" as specified in cinder/volume/driver.py and will be removed in the next release
= 5(IntOpt) Maximum retry count for reconnection. Default is 5.
= 30(IntOpt) Timeout for the Group Manager cli command execution. Default is 30. Note that this option is deprecated in favour of "ssh_conn_timeout" as specified in cinder/volume/drivers/san/san.py and will be removed in M release.
= group-0(StrOpt) Group name to use for creating volumes. Defaults to "group-0".
= default(StrOpt) Pool in which volumes will be created. Defaults to "default".
= False(BoolOpt) Use CHAP authentication for targets. Note that this option is deprecated in favour of "use_chap_auth" as specified in cinder/volume/driver.py and will be removed in next release.
-
diff --git a/doc/common/tables/cinder-flashsystem.xml b/doc/common/tables/cinder-flashsystem.xml deleted file mode 100644 index 958f9c183e..0000000000 --- a/doc/common/tables/cinder-flashsystem.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IBM FlashSystem volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= FC(StrOpt) Connection protocol should be FC. (Default is FC.)
= 0(IntOpt) Default iSCSI Port ID of FlashSystem. (Default port is 0.)
= True(BoolOpt) Allows vdisk to multi host mapping. (Default is True)
= False(BoolOpt) Connect with multipath (FC only).(Default is false.)
-
diff --git a/doc/common/tables/cinder-hds-hnas.xml b/doc/common/tables/cinder-hds-hnas.xml deleted file mode 100644 index c39872bae1..0000000000 --- a/doc/common/tables/cinder-hds-hnas.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HDS HNAS iSCSI and NFS driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /opt/hds/hnas/cinder_iscsi_conf.xml(StrOpt) Configuration file for HDS iSCSI cinder plugin
= /opt/hds/hnas/cinder_nfs_conf.xml(StrOpt) Configuration file for HDS NFS cinder plugin
-
diff --git a/doc/common/tables/cinder-hgst.xml b/doc/common/tables/cinder-hgst.xml deleted file mode 100644 index 7140957a20..0000000000 --- a/doc/common/tables/cinder-hgst.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HGST volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= Net 1 (IPv4)(StrOpt) Space network name to use for data transfer
= 0(StrOpt) Should spaces be redundantly stored (1/0)
= disk(StrOpt) Group to own created spaces
= 0600(StrOpt) UNIX mode for created spaces
= root(StrOpt) User to own created spaces
= os:gbd0(StrOpt) Comma separated list of Space storage servers:devices. ex: os1_stor:gbd0,os2_stor:gbd0
-
diff --git a/doc/common/tables/cinder-hitachi-hbsd.xml b/doc/common/tables/cinder-hitachi-hbsd.xml deleted file mode 100644 index bb2fa43bf8..0000000000 --- a/doc/common/tables/cinder-hitachi-hbsd.xml +++ /dev/null @@ -1,128 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Hitachi storage volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Add CHAP user
= 10(IntOpt) Interval to check copy asynchronously
= None(StrOpt) iSCSI authentication method
= HBSD-CHAP-password(StrOpt) iSCSI authentication password
= HBSD-CHAP-user(StrOpt) iSCSI authentication username
= 3(IntOpt) Interval to check copy
= 3(IntOpt) Copy speed of storage system
= FULL(StrOpt) Default copy method of storage system
= None(StrOpt) Range of group number
= False(BoolOpt) Request for creating HostGroup or iSCSI Target
= True(BoolOpt) Add to HORCM configuration
= 200,201(StrOpt) Instance numbers for HORCM
= None(StrOpt) Password of storage system for HORCM
= 600(IntOpt) Timeout until a resource lock is released, in seconds. The value must be between 0 and 7200.
= None(StrOpt) Username of storage system for HORCM
= None(StrOpt) Range of logical device of storage system
= None(IntOpt) Pool ID of storage system
= None(StrOpt) Serial number of storage system
= None(StrOpt) Control port names for HostGroup or iSCSI Target
= None(IntOpt) Thin pool ID of storage system
= None(StrOpt) Name of an array unit
= False(BoolOpt) Request for FC Zone creating HostGroup
-
diff --git a/doc/common/tables/cinder-hpe3par.xml b/doc/common/tables/cinder-hpe3par.xml deleted file mode 100644 index e37173da86..0000000000 --- a/doc/common/tables/cinder-hpe3par.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HPE 3PAR Fibre Channel and iSCSI drivers configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1
= OpenStack(ListOpt) List of the CPG(s) to use for volume creation
= (StrOpt) The CPG to use for Snapshots for volumes. If empty the userCPG will be used.
= False(BoolOpt) Enable HTTP debugging to 3PAR
= False(BoolOpt) Enable CHAP authentication for iSCSI connections.
= (ListOpt) List of target iSCSI addresses to use.
= (StrOpt) 3PAR password for the user specified in hpe3par_username
= (StrOpt) The time in hours when a snapshot expires and is deleted. This must be larger than expiration
= (StrOpt) The time in hours to retain a snapshot. You can't delete it before this expires.
= (StrOpt) 3PAR username with the 'edit' role
-
diff --git a/doc/common/tables/cinder-hpelefthand.xml b/doc/common/tables/cinder-hpelefthand.xml deleted file mode 100644 index 875e6fab87..0000000000 --- a/doc/common/tables/cinder-hpelefthand.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HPE LeftHand/StoreVirtual driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) HPE LeftHand WSAPI Server Url like https://<LeftHand ip>:8081/lhos
= None(StrOpt) HPE LeftHand cluster name
= False(BoolOpt) Enable HTTP debugging to LeftHand
= False(BoolOpt) Configure CHAP authentication for iSCSI connections (Default: Disabled)
= None(StrOpt) HPE LeftHand Super user password
= None(StrOpt) HPE LeftHand Super user username
-
diff --git a/doc/common/tables/cinder-hpmsa.xml b/doc/common/tables/cinder-hpmsa.xml deleted file mode 100644 index 727386659e..0000000000 --- a/doc/common/tables/cinder-hpmsa.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HP MSA volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= https(StrOpt) HPMSA API interface protocol.
= A(StrOpt) Pool or Vdisk name to use for volume creation.
= virtual(StrOpt) linear (for Vdisk) or virtual (for Pool).
= (ListOpt) List of comma-separated target iSCSI IP addresses.
= False(BoolOpt) Whether to verify HPMSA array SSL certificate.
= None(StrOpt) HPMSA array SSL certificate path.
-
diff --git a/doc/common/tables/cinder-hpxp.xml b/doc/common/tables/cinder-hpxp.xml deleted file mode 100644 index 27a1c97ea2..0000000000 --- a/doc/common/tables/cinder-hpxp.xml +++ /dev/null @@ -1,112 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HP XP volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 10(IntOpt) Interval to check copy asynchronously
= None(ListOpt) Target port names of compute node for host group or iSCSI target
= 3(IntOpt) Interval to check copy
= 3(IntOpt) Copy speed of storage system
= FULL(StrOpt) Default copy method of storage system. There are two valid values: "FULL" specifies that a full copy; "THIN" specifies that a thin copy. Default value is "FULL"
= False(BoolOpt) Request for creating host group or iSCSI target
= True(BoolOpt) Add to HORCM configuration
= False(BoolOpt) Only discover a specific name of host group or iSCSI target
= 200, 201(ListOpt) Instance numbers for HORCM
= meta_resource(StrOpt) Resource group name of storage system for HORCM
= None(StrOpt) Username of storage system for HORCM
= None(StrOpt) Logical device range of storage system
= None(StrOpt) Pool of storage system
= None(StrOpt) Type of storage command line interface
= None(StrOpt) ID of storage system
= None(ListOpt) Target port names for host group or iSCSI target
= None(StrOpt) Thin pool of storage system
= False(BoolOpt) Request for FC Zone creating host group
-
diff --git a/doc/common/tables/cinder-huawei.xml b/doc/common/tables/cinder-huawei.xml deleted file mode 100644 index bf8565cbd3..0000000000 --- a/doc/common/tables/cinder-huawei.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Huawei storage driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /etc/cinder/cinder_huawei_conf.xml(StrOpt) The configuration file for the Cinder Huawei driver.
-
diff --git a/doc/common/tables/cinder-ibmnas.xml b/doc/common/tables/cinder-ibmnas.xml deleted file mode 100644 index b34ea869e6..0000000000 --- a/doc/common/tables/cinder-ibmnas.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IBM SONAS and Storwise V7000 volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= v7ku(StrOpt) IBMNAS platform type to be used as backend storage; valid values are - v7ku : for using IBM Storwize V7000 Unified, sonas : for using IBM Scale Out NAS, gpfs-nas : for using NFS based IBM GPFS deployments.
= (StrOpt) IP address or Hostname of NAS system.
= admin(StrOpt) User name to connect to NAS system.
= None(StrOpt) Options used to mount the storage backend file system where Cinder volumes are stored.
= (StrOpt) Password to connect to NAS system.
= (StrOpt) Filename of private key to use for SSH authentication.
= auto(StrOpt) Allow network-attached storage systems to operate in a secure environment where root level access is not permitted. If set to False, access is as the root user and insecure. If set to True, access is not as root. If set to auto, a check is done to determine if this is a new installation: True is used if so, otherwise False. Default is auto.
= auto(StrOpt) Set more secure file permissions on network-attached storage volume files to restrict broad other/world access. If set to False, volumes are created with open permissions. If set to True, volumes are created with permissions for the cinder user and group (660). If set to auto, a check is done to determine if this is a new installation: True is used if so, otherwise False. Default is auto.
= (StrOpt) Path to the share to use for storing Cinder volumes. For example: "/srv/export1" for an NFS server export available at 10.0.5.10:/srv/export1 .
= 22(IntOpt) SSH port to use to connect to NAS system.
-
diff --git a/doc/common/tables/cinder-images.xml b/doc/common/tables/cinder-images.xml deleted file mode 100644 index bea8c1c650..0000000000 --- a/doc/common/tables/cinder-images.xml +++ /dev/null @@ -1,112 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of images configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (ListOpt) A list of url schemes that can be downloaded directly via the direct_url. Currently supported schemes: [file].
= False(BoolOpt) Allow to perform insecure SSL (https) requests to glance
= $glance_host:$glance_port(ListOpt) A list of the glance API servers available to cinder ([hostname|ip]:port)
= False(BoolOpt) Enables or disables negotiation of SSL layer compression. In some cases disabling compression can improve data throughput, such as when high network bandwidth is available and you use compressed image formats like qcow2.
= 1(IntOpt) Version of the glance API to use
= None(StrOpt) Location of ca certificates file to use for glance client requests.
= checksum, container_format, disk_format, image_name, image_id, min_disk, min_ram, name, size(ListOpt) Default core properties of image
= $my_ip(StrOpt) Default glance host name or IP
= 0(IntOpt) Number retries when downloading an image from glance
= 9292(IntOpt) Default glance port
= None(IntOpt) http/https timeout value for glance operations. If no value (None) is supplied here, the glanceclient default value is used.
= $state_path/conversion(StrOpt) Directory used for temporary storage during image conversion
= False(BoolOpt) If set to True, upload-to-image in raw format will create a cloned volume and register its location to the image service, instead of uploading the volume content. The cinder backend and locations support must be enabled in the image service, and glance_api_version must be set to 2.
= False(BoolOpt) If set to True, the image volume created by upload-to-image will be placed in the internal tenant. Otherwise, the image volume is created in the current context's tenant.
= False(BoolOpt) Enable the image volume cache for this backend.
= 0(IntOpt) Max number of entries allowed in the image volume cache. 0 => unlimited.
= 0(IntOpt) Max size of the image volume cache for this backend in GB. 0 => unlimited.
= False(BoolOpt) Do we attach/detach volumes in cinder using multipath for volume to image and image to volume transfers?
-
diff --git a/doc/common/tables/cinder-infortrend.xml b/doc/common/tables/cinder-infortrend.xml deleted file mode 100644 index e36f0d978a..0000000000 --- a/doc/common/tables/cinder-infortrend.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Infortrend volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 5(IntOpt) Maximum retry time for cli. Default is 5.
= /opt/bin/Infortrend/raidcmd_ESDS10.jar(StrOpt) The Infortrend CLI absolute path. By default, it is at /opt/bin/Infortrend/raidcmd_ESDS10.jar
= 30(IntOpt) Default timeout for CLI copy operations in minutes. Support: migrate volume, create cloned volume and create volume from snapshot. By Default, it is 30 minutes.
= (StrOpt) Infortrend raid pool name list. It is separated with comma.
= full(StrOpt) Let the volume use specific provisioning. By default, it is the full provisioning. The supported options are full or thin.
= 0,1,2,3,4,5,6,7(StrOpt) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma. By default, it is the channel 0~7.
= 0,1,2,3,4,5,6,7(StrOpt) Infortrend raid channel ID list on Slot B for OpenStack usage. It is separated with comma. By default, it is the channel 0~7.
= 0(StrOpt) Let the volume use specific tiering level. By default, it is the level 0. The supported levels are 0,2,3,4.
-
diff --git a/doc/common/tables/cinder-keymgr.xml b/doc/common/tables/cinder-keymgr.xml deleted file mode 100644 index 1f9f290fca..0000000000 --- a/doc/common/tables/cinder-keymgr.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of key manager configuration options
Configuration option = Default valueDescription
[keymgr]
= cinder.keymgr.conf_key_mgr.ConfKeyManager(StrOpt) The full class name of the key manager API class
= http://localhost:9311/v1(StrOpt) Url for encryption service.
= http://localhost:5000/v3(StrOpt) Authentication url for encryption service.
= None(StrOpt) Fixed key returned by key manager, specified in hex
-
diff --git a/doc/common/tables/cinder-lenovo.xml b/doc/common/tables/cinder-lenovo.xml deleted file mode 100644 index 7c7862d352..0000000000 --- a/doc/common/tables/cinder-lenovo.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Lenovo volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= https(StrOpt) Lenovo api interface protocol.
= A(StrOpt) Pool or Vdisk name to use for volume creation.
= virtual(StrOpt) linear (for VDisk) or virtual (for Pool).
= (ListOpt) List of comma-separated target iSCSI IP addresses.
= False(BoolOpt) Whether to verify Lenovo array SSL certificate.
= None(StrOpt) Lenovo array SSL certificate path.
-
diff --git a/doc/common/tables/cinder-logging.xml b/doc/common/tables/cinder-logging.xml deleted file mode 100644 index 9fdac7084d..0000000000 --- a/doc/common/tables/cinder-logging.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= False(BoolOpt) Make exception message format errors fatal.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
-
diff --git a/doc/common/tables/cinder-lvm.xml b/doc/common/tables/cinder-lvm.xml deleted file mode 100644 index dfac2d600e..0000000000 --- a/doc/common/tables/cinder-lvm.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of LVM configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /etc/cinder/lvm.conf(StrOpt) LVM conf file to use for the LVM driver in Cinder; this setting is ignored if the specified file does not exist (You can also specify 'None' to not use a conf file even if one exists).
= 0(IntOpt) If >0, create LVs with multiple mirrors. Note that this requires lvm_mirrors + 2 PVs with available space
= default(StrOpt) Type of LVM volumes to deploy; (default, thin, or auto). Auto defaults to thin if thin is supported.
= cinder-volumes(StrOpt) Name for the VG that will contain exported volumes
-
diff --git a/doc/common/tables/cinder-netapp_7mode_iscsi.xml b/doc/common/tables/cinder-netapp_7mode_iscsi.xml deleted file mode 100644 index d32112f93f..0000000000 --- a/doc/common/tables/cinder-netapp_7mode_iscsi.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NetApp 7-Mode iSCSI driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Administrative user account name used to access the storage system or proxy server.
= None(StrOpt) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
= None(StrOpt) Password for the administrative user account specified in the netapp_login option.
= (.+)(StrOpt) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
= None(StrOpt) The hostname (or IP address) for the storage system or proxy server.
= None(IntOpt) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
= 1.2(FloatOpt) The quantity to be multiplied by the requested volume size to ensure enough space is available on the virtual storage server (Vserver) to fulfill the volume creation request. Note: this option is deprecated and will be removed in favor of "reserved_percentage" in the Mitaka release.
= ontap_cluster(StrOpt) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
= None(StrOpt) The storage protocol to be used on the data path with the storage system.
= http(StrOpt) The transport protocol used when communicating with the storage system or proxy server.
= None(StrOpt) The vFiler unit on which provisioning of block storage volumes will be done. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode. Only use this option when utilizing the MultiStore feature on the NetApp storage system.
-
diff --git a/doc/common/tables/cinder-netapp_7mode_nfs.xml b/doc/common/tables/cinder-netapp_7mode_nfs.xml deleted file mode 100644 index 4aea6dad63..0000000000 --- a/doc/common/tables/cinder-netapp_7mode_nfs.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NetApp 7-Mode NFS driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 720(IntOpt) This option specifies the threshold for last access time for images in the NFS image cache. When a cache cleaning cycle begins, images in the cache that have not been accessed in the last M minutes, where M is the value of this parameter, will be deleted from the cache to create free space on the NFS share.
= None(StrOpt) Administrative user account name used to access the storage system or proxy server.
= None(StrOpt) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
= None(StrOpt) Password for the administrative user account specified in the netapp_login option.
= (.+)(StrOpt) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
= None(StrOpt) The hostname (or IP address) for the storage system or proxy server.
= None(IntOpt) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
= ontap_cluster(StrOpt) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
= None(StrOpt) The storage protocol to be used on the data path with the storage system.
= http(StrOpt) The transport protocol used when communicating with the storage system or proxy server.
= None(StrOpt) The vFiler unit on which provisioning of block storage volumes will be done. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode. Only use this option when utilizing the MultiStore feature on the NetApp storage system.
= 20(IntOpt) If the percentage of available space for an NFS share has dropped below the value specified by this option, the NFS image cache will be cleaned.
= 60(IntOpt) When the percentage of available space on an NFS share has reached the percentage specified by this option, the driver will stop clearing files from the NFS image cache that have not been accessed in the last M minutes, where M is the value of the expiry_thres_minutes configuration option.
-
diff --git a/doc/common/tables/cinder-netapp_cdot_iscsi.xml b/doc/common/tables/cinder-netapp_cdot_iscsi.xml deleted file mode 100644 index 74d392ffe5..0000000000 --- a/doc/common/tables/cinder-netapp_cdot_iscsi.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NetApp cDOT iSCSI driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Administrative user account name used to access the storage system or proxy server.
= None(StrOpt) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created.
= enabled(StrOpt) This option determines if storage space is reserved for LUN allocation. If enabled, LUNs are thick provisioned. If space reservation is disabled, storage space is allocated on demand.
= None(StrOpt) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
= None(StrOpt) Password for the administrative user account specified in the netapp_login option.
= (.+)(StrOpt) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
= None(StrOpt) The hostname (or IP address) for the storage system or proxy server.
= None(IntOpt) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
= 1.2(FloatOpt) The quantity to be multiplied by the requested volume size to ensure enough space is available on the virtual storage server (Vserver) to fulfill the volume creation request. Note: this option is deprecated and will be removed in favor of "reserved_percentage" in the Mitaka release.
= ontap_cluster(StrOpt) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
= None(StrOpt) The storage protocol to be used on the data path with the storage system.
= http(StrOpt) The transport protocol used when communicating with the storage system or proxy server.
= None(StrOpt) This option specifies the virtual storage server (Vserver) name on the storage cluster on which provisioning of block storage volumes should occur.
-
diff --git a/doc/common/tables/cinder-netapp_cdot_nfs.xml b/doc/common/tables/cinder-netapp_cdot_nfs.xml deleted file mode 100644 index 80f9be9751..0000000000 --- a/doc/common/tables/cinder-netapp_cdot_nfs.xml +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NetApp cDOT NFS driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 720(IntOpt) This option specifies the threshold for last access time for images in the NFS image cache. When a cache cleaning cycle begins, images in the cache that have not been accessed in the last M minutes, where M is the value of this parameter, will be deleted from the cache to create free space on the NFS share.
= None(StrOpt) This option specifies the path of the NetApp copy offload tool binary. Ensure that the binary has execute permissions set which allow the effective user of the cinder-volume process to execute the file.
= None(StrOpt) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts.
= None(StrOpt) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts.
= None(StrOpt) Administrative user account name used to access the storage system or proxy server.
= None(StrOpt) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created.
= None(StrOpt) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
= None(StrOpt) Password for the administrative user account specified in the netapp_login option.
= (.+)(StrOpt) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
= None(StrOpt) The hostname (or IP address) for the storage system or proxy server.
= None(IntOpt) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
= ontap_cluster(StrOpt) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
= None(StrOpt) The storage protocol to be used on the data path with the storage system.
= http(StrOpt) The transport protocol used when communicating with the storage system or proxy server.
= None(StrOpt) This option specifies the virtual storage server (Vserver) name on the storage cluster on which provisioning of block storage volumes should occur.
= 20(IntOpt) If the percentage of available space for an NFS share has dropped below the value specified by this option, the NFS image cache will be cleaned.
= 60(IntOpt) When the percentage of available space on an NFS share has reached the percentage specified by this option, the driver will stop clearing files from the NFS image cache that have not been accessed in the last M minutes, where M is the value of the expiry_thres_minutes configuration option.
-
diff --git a/doc/common/tables/cinder-netapp_eseries_iscsi.xml b/doc/common/tables/cinder-netapp_eseries_iscsi.xml deleted file mode 100644 index 15c008cd06..0000000000 --- a/doc/common/tables/cinder-netapp_eseries_iscsi.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NetApp E-Series driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) This option is only utilized when the storage family is configured to eseries. This option is used to restrict provisioning to the specified controllers. Specify the value of this option to be a comma separated list of controller hostnames or IP addresses to be used for provisioning.
= False(BoolOpt) This option specifies whether the driver should allow operations that require multiple attachments to a volume. An example would be live migration of servers that have volumes attached. When enabled, this backend is limited to 256 total volumes in order to guarantee volumes can be accessed by more than one host.
= None(StrOpt) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts.
= None(StrOpt) Administrative user account name used to access the storage system or proxy server.
= None(StrOpt) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
= None(StrOpt) Password for the administrative user account specified in the netapp_login option.
= (.+)(StrOpt) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
= None(StrOpt) Password for the NetApp E-Series storage array.
= None(StrOpt) The hostname (or IP address) for the storage system or proxy server.
= None(IntOpt) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
= ontap_cluster(StrOpt) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
= http(StrOpt) The transport protocol used when communicating with the storage system or proxy server.
= /devmgr/v2(StrOpt) This option is used to specify the path to the E-Series proxy application on a proxy server. The value is combined with the value of the netapp_transport_type, netapp_server_hostname, and netapp_server_port options to create the URL used by the driver to connect to the proxy application.
-
diff --git a/doc/common/tables/cinder-nimble.xml b/doc/common/tables/cinder-nimble.xml deleted file mode 100644 index 95c33a25d0..0000000000 --- a/doc/common/tables/cinder-nimble.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Nimble driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= default(StrOpt) Nimble Controller pool name
= *(StrOpt) Nimble Subnet Label
-
diff --git a/doc/common/tables/cinder-openvstorage.xml b/doc/common/tables/cinder-openvstorage.xml deleted file mode 100644 index 99b830e79f..0000000000 --- a/doc/common/tables/cinder-openvstorage.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Open vStorage driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Vpool to use for volumes - backend is defined by vpool not by us.
-
diff --git a/doc/common/tables/cinder-profiler.xml b/doc/common/tables/cinder-profiler.xml deleted file mode 100644 index 19dced05f0..0000000000 --- a/doc/common/tables/cinder-profiler.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of profiler configuration options
Configuration option = Default valueDescription
[profiler]
= False(BoolOpt) If False fully disable profiling feature.
= False(BoolOpt) If False doesn't trace SQL requests.
-
diff --git a/doc/common/tables/cinder-prophetstor_dpl.xml b/doc/common/tables/cinder-prophetstor_dpl.xml deleted file mode 100644 index 4896f27474..0000000000 --- a/doc/common/tables/cinder-prophetstor_dpl.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ProphetStor Fibre Channel and iSCSi drivers configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) DPL pool uuid in which DPL volumes are stored.
= 8357(IntOpt) DPL port number.
= 3260(IntOpt) The port that the iSCSI daemon is listening on
= (StrOpt) IP address of SAN controller
= admin(StrOpt) Username for SAN controller
= (StrOpt) Password for SAN controller
= True(BoolOpt) Use thin provisioning for SAN volumes?
-
diff --git a/doc/common/tables/cinder-pure.xml b/doc/common/tables/cinder-pure.xml deleted file mode 100644 index 608d34be1a..0000000000 --- a/doc/common/tables/cinder-pure.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Pure Storage driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) REST API authorization token.
-
diff --git a/doc/common/tables/cinder-qpid.xml b/doc/common/tables/cinder-qpid.xml deleted file mode 100644 index c96188ccb5..0000000000 --- a/doc/common/tables/cinder-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/cinder-quobyte.xml b/doc/common/tables/cinder-quobyte.xml deleted file mode 100644 index 8bfa4073c1..0000000000 --- a/doc/common/tables/cinder-quobyte.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Quobyte USP volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Path to a Quobyte Client configuration file.
= $state_path/mnt(StrOpt) Base dir containing the mount point for the Quobyte volume.
= True(BoolOpt) Create volumes as QCOW2 files rather than raw files.
= True(BoolOpt) Create volumes as sparse files which take no space. If set to False, volume is created as regular file.In such case volume creation takes a lot of time.
= None(StrOpt) URL to the Quobyte volume e.g., quobyte://<DIR host>/<volume name>
-
diff --git a/doc/common/tables/cinder-quota.xml b/doc/common/tables/cinder-quota.xml deleted file mode 100644 index 58b629999e..0000000000 --- a/doc/common/tables/cinder-quota.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of quota configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 0(IntOpt) Number of seconds between subsequent usage refreshes
= 1000(IntOpt) Total amount of storage, in gigabytes, allowed for backups per project
= 10(IntOpt) Number of volume backups allowed per project
= 10(IntOpt) Number of consistencygroups allowed per project
= cinder.quota.DbQuotaDriver(StrOpt) Default driver to use for quota checks
= 1000(IntOpt) Total amount of storage, in gigabytes, allowed for volumes and snapshots per project
= 10(IntOpt) Number of volume snapshots allowed per project
= 10(IntOpt) Number of volumes allowed per project
= 86400(IntOpt) Number of seconds until a reservation expires
= True(BoolOpt) Enables or disables use of default quota class with default quota.
-
diff --git a/doc/common/tables/cinder-rabbitmq.xml b/doc/common/tables/cinder-rabbitmq.xml deleted file mode 100644 index 4bdc670ede..0000000000 --- a/doc/common/tables/cinder-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/cinder-redis.xml b/doc/common/tables/cinder-redis.xml deleted file mode 100644 index e8818327a2..0000000000 --- a/doc/common/tables/cinder-redis.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/cinder-rpc.xml b/doc/common/tables/cinder-rpc.xml deleted file mode 100644 index 1cfb3162e4..0000000000 --- a/doc/common/tables/cinder-rpc.xml +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
= cinder-volume(StrOpt) The topic that volume nodes listen on
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
-
diff --git a/doc/common/tables/cinder-san.xml b/doc/common/tables/cinder-san.xml deleted file mode 100644 index 51f4ee7caa..0000000000 --- a/doc/common/tables/cinder-san.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SAN configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Cluster name to use for creating volumes
= (StrOpt) IP address of SAN controller
= False(BoolOpt) Execute commands locally instead of over SSH; use if the volume service is running on the SAN device
= admin(StrOpt) Username for SAN controller
= (StrOpt) Password for SAN controller
= (StrOpt) Filename of private key to use for SSH authentication
= None(StrOpt) VNX secondary SP IP Address.
= 22(IntOpt) SSH port to use with SAN
= True(BoolOpt) Use thin provisioning for SAN volumes?
= 30(IntOpt) SSH connection timeout in seconds
= 5(IntOpt) Maximum ssh connections in the pool
= 1(IntOpt) Minimum ssh connections in the pool
-
diff --git a/doc/common/tables/cinder-scality.xml b/doc/common/tables/cinder-scality.xml deleted file mode 100644 index ca843d330a..0000000000 --- a/doc/common/tables/cinder-scality.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Scality SOFS volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Path or URL to Scality SOFS configuration file
= $state_path/scality(StrOpt) Base dir where Scality SOFS shall be mounted
= cinder/volumes(StrOpt) Path from Scality SOFS root to volume dir
-
diff --git a/doc/common/tables/cinder-scheduler.xml b/doc/common/tables/cinder-scheduler.xml deleted file mode 100644 index 4536db36f4..0000000000 --- a/doc/common/tables/cinder-scheduler.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of scheduler configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) String representation for an equation that will be used to filter hosts. Only used when the driver filter is set to be used by the Cinder scheduler.
= None(StrOpt) String representation for an equation that will be used to determine the goodness of a host. Only used when using the goodness weigher is set to be used by the Cinder scheduler.
= AvailabilityZoneFilter, CapacityFilter, CapabilitiesFilter(ListOpt) Which filter class names to use for filtering hosts when not specified in the request.
= CapacityWeigher(ListOpt) Which weigher class names to use for weighing hosts.
= cinder.scheduler.filter_scheduler.FilterScheduler(StrOpt) Default scheduler driver to use
= cinder.scheduler.host_manager.HostManager(StrOpt) The scheduler host manager class to use
= (StrOpt) Absolute path to scheduler configuration JSON file.
= cinder.scheduler.manager.SchedulerManager(StrOpt) Full class name for the Manager for scheduler
= 3(IntOpt) Maximum number of attempts to schedule an volume
= cinder-scheduler(StrOpt) The topic that scheduler nodes listen on
-
diff --git a/doc/common/tables/cinder-scst.xml b/doc/common/tables/cinder-scst.xml deleted file mode 100644 index 33e90ffca1..0000000000 --- a/doc/common/tables/cinder-scst.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SCST volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= iscsi(StrOpt) SCST target implementation can choose from multiple SCST target drivers.
= None(StrOpt) Certain ISCSI targets have predefined target names, SCST target driver uses this name.
-
diff --git a/doc/common/tables/cinder-sheepdog.xml b/doc/common/tables/cinder-sheepdog.xml deleted file mode 100644 index 4dad389601..0000000000 --- a/doc/common/tables/cinder-sheepdog.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Sheepdog driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 127.0.0.1(StrOpt) IP address of sheep daemon.
= 7000(IntOpt) Port of sheep daemon.
-
diff --git a/doc/common/tables/cinder-smbfs.xml b/doc/common/tables/cinder-smbfs.xml deleted file mode 100644 index f9d61d802c..0000000000 --- a/doc/common/tables/cinder-smbfs.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Samba volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= qcow2(StrOpt) Default format that will be used when creating volumes if no volume format is specified.
= noperm,file_mode=0775,dir_mode=0775(StrOpt) Mount options passed to the smbfs client. See mount.cifs man page for details.
= $state_path/mnt(StrOpt) Base dir containing mount points for smbfs shares.
= 1.0(FloatOpt) This will compare the allocated to available space on the volume destination. If the ratio exceeds this number, the destination will no longer be valid.
= /etc/cinder/smbfs_shares(StrOpt) File with the list of available smbfs shares.
= True(BoolOpt) Create volumes as sparsed files which take no space rather than regular files when using raw format, in which case volume creation takes lot of time.
= 0.95(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination.
-
diff --git a/doc/common/tables/cinder-solidfire.xml b/doc/common/tables/cinder-solidfire.xml deleted file mode 100644 index 40d8cb4912..0000000000 --- a/doc/common/tables/cinder-solidfire.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SolidFire driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Create SolidFire accounts with this prefix. Any string can be used here, but the string "hostname" is special and will create a prefix using the cinder node hostname (previous default behavior). The default is NO prefix.
= True(BoolOpt) Create an internal cache of copy of images when a bootable volume is created to eliminate fetch from glance and qemu-conversion on subsequent calls.
= False(BoolOpt) Allow tenants to specify QOS on create
= 443(IntOpt) SolidFire API port. Useful if the device api is behind a proxy on a different port.
= True(BoolOpt) Set 512 byte emulation on volume creation;
= True(BoolOpt) Create an internal mapping of volume IDs and account. Optimizes lookups and performance at the expense of memory, very large deployments may want to consider setting to False.
= None(StrOpt) Overrides default cluster SVIP with the one specified. This is required or deployments that have implemented the use of VLANs for iSCSI networks in their cloud.
= openstack-vtemplate(StrOpt) Account name on the SolidFire Cluster to use as owner of template/cache volumes (created if does not exist).
-
diff --git a/doc/common/tables/cinder-srb.xml b/doc/common/tables/cinder-srb.xml deleted file mode 100644 index 4d5c3e365c..0000000000 --- a/doc/common/tables/cinder-srb.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Scality REST Block storage driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Comma-separated list of REST servers IP to connect to. (eg http://IP1/,http://IP2:81/path
-
diff --git a/doc/common/tables/cinder-storage.xml b/doc/common/tables/cinder-storage.xml deleted file mode 100644 index 4e575d43af..0000000000 --- a/doc/common/tables/cinder-storage.xml +++ /dev/null @@ -1,160 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of storage configuration options
Configuration option = Default valueDescription
[DEFAULT]
= -1.0(FloatOpt) Multiplier used for weighing volume capacity. Negative numbers mean to stack vs spread.
= 1.0(FloatOpt) Multiplier used for weighing volume capacity. Negative numbers mean to stack vs spread.
= None(ListOpt) A list of backend names to use. These backend names should be backed by a unique [CONFIG] group with its options
= tgtadm(StrOpt) iSCSI target user-land tool to use. tgtadm is default, use lioadm for LIO iSCSI support, scstadmin for SCST target support, iseradm for the ISER protocol, ietadm for iSCSI Enterprise Target, iscsictl for Chelsio iSCSI Target or fake for testing.
= fileio(StrOpt) Sets the behavior of the iSCSI target to either perform blockio or fileio optionally, auto can be set and Cinder will autodetect type of backing device
= $my_ip(StrOpt) The IP address that the iSCSI daemon is listening on
= 3260(IntOpt) The port that the iSCSI daemon is listening on
= iscsi(StrOpt) Determines the iSCSI protocol for new iSCSI volumes, created with tgtadm or lioadm target helpers. In order to enable RDMA, this parameter should be set with the value "iser". The supported iSCSI protocol values are "iscsi" and "iser".
= (StrOpt) Sets the target-specific flags for the iSCSI target. Only used for tgtadm to specify backing device flags using bsoflags option. The specified string is passed as is to the underlying tool.
= iqn.2010-10.org.openstack:(StrOpt) Prefix for iSCSI volumes
= on(StrOpt) Sets the behavior of the iSCSI target to either perform write-back(on) or write-through(off). This parameter is valid if iscsi_helper is set to tgtadm or iseradm.
= tgtadm(StrOpt) The name of the iSER target user-land tool to use
= $my_ip(StrOpt) The IP address that the iSER daemon is listening on
= 3260(IntOpt) The port that the iSER daemon is listening on
= iqn.2010-10.org.openstack:(StrOpt) Prefix for iSER volumes
= 300(IntOpt) Timeout for creating the volume to migrate to when performing volume migration (seconds)
= 3(IntOpt) The maximum number of times to rescan iSER targetto find volume
= 3(IntOpt) The maximum number of times to rescan targets to find volume
= None(StrOpt) The backend name for a given driver implementation
= zero(StrOpt) Method used to wipe old volumes
= None(StrOpt) The flag to pass to ionice to alter the i/o priority of the process used to zero a volume after deletion, for example "-c3" for idle only priority.
= 0(IntOpt) Size in MiB to wipe at start of old volumes. 0 => all
= cinder-volume-copy(StrOpt) The blkio cgroup name to be used to limit bandwidth of volume copy
= 0(IntOpt) The upper limit of bandwidth of volume copy. 0 => unlimited
= 1M(StrOpt) The default block size used when copying/clearing volumes
= cinder.volume.drivers.lvm.LVMVolumeDriver(StrOpt) Driver to use for volume creation
= cinder.volume.manager.VolumeManager(StrOpt) Full class name for the Manager for volume
= False(BoolOpt) Offload pending volume delete during volume service startup
= month(StrOpt) Time period for which to generate volume usages. The options are hour, day, month, or year.
= $state_path/volumes(StrOpt) Volume configuration file storage directory
-
diff --git a/doc/common/tables/cinder-storage_ceph.xml b/doc/common/tables/cinder-storage_ceph.xml deleted file mode 100644 index f9585f2f6c..0000000000 --- a/doc/common/tables/cinder-storage_ceph.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Ceph storage configuration options
Configuration option = Default valueDescription
[DEFAULT]
= -1(IntOpt) Timeout value (in seconds) used when connecting to ceph cluster. If value < 0, no timeout is set and default librados value is used.
= 5(IntOpt) Interval value (in seconds) between connection retries to ceph cluster.
= 3(IntOpt) Number of retries if connection to ceph cluster failed.
= (StrOpt) Path to the ceph configuration file
= ceph(StrOpt) The name of ceph cluster
= False(BoolOpt) Flatten volumes created from snapshots to remove dependency from volume to snapshot
= 5(IntOpt) Maximum number of nested volume clones that are taken before a flatten occurs. Set to 0 to disable cloning.
= rbd(StrOpt) The RADOS pool where rbd volumes are stored
= None(StrOpt) The libvirt uuid of the secret for the rbd_user volumes
= 4(IntOpt) Volumes will be chunked into objects of this size (in megabytes).
= None(StrOpt) The RADOS client name for accessing rbd volumes - only set when using cephx authentication
= None(StrOpt) Directory where temporary image files are stored when the volume driver does not write them directly to the volume. Warning: this option is now deprecated, please use image_conversion_dir instead.
-
diff --git a/doc/common/tables/cinder-storage_glusterfs.xml b/doc/common/tables/cinder-storage_glusterfs.xml deleted file mode 100644 index 77abc3f588..0000000000 --- a/doc/common/tables/cinder-storage_glusterfs.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of GlusterFS storage configuration options
Configuration option = Default valueDescription
[DEFAULT]
= $state_path/backup_mount(StrOpt) Base dir containing mount point for gluster share.
= None(StrOpt) GlusterFS share in <hostname|ipv4addr|ipv6addr>:<gluster_vol_name> format. Eg: 1.2.3.4:backup_vol
= $state_path/mnt(StrOpt) Base dir containing mount points for gluster shares.
= /etc/cinder/glusterfs_shares(StrOpt) File with the list of available gluster shares
= thin(StrOpt) Provisioning type that will be used when creating volumes.
-
diff --git a/doc/common/tables/cinder-storage_gpfs.xml b/doc/common/tables/cinder-storage_gpfs.xml deleted file mode 100644 index c403fd85f5..0000000000 --- a/doc/common/tables/cinder-storage_gpfs.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of GPFS storage configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS.
= None(StrOpt) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently.
= 0(IntOpt) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth.
= None(StrOpt) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored.
= True(BoolOpt) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time.
= system(StrOpt) Specifies the storage pool that volumes are assigned to. By default, the system storage pool is used.
-
diff --git a/doc/common/tables/cinder-storage_nfs.xml b/doc/common/tables/cinder-storage_nfs.xml deleted file mode 100644 index b23c269b5c..0000000000 --- a/doc/common/tables/cinder-storage_nfs.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NFS storage configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 3(IntOpt) The number of attempts to mount nfs shares before raising an error. At least one attempt will be made to mount an nfs share, regardless of the value specified.
= None(StrOpt) Mount options passed to the nfs client. See section of the nfs man page for details.
= $state_path/mnt(StrOpt) Base dir containing mount points for nfs shares.
= 1.0(FloatOpt) This will compare the allocated to available space on the volume destination. If the ratio exceeds this number, the destination will no longer be valid. Note that this option is deprecated in favor of "max_oversubscription_ratio" and will be removed in the Mitaka release.
= /etc/cinder/nfs_shares(StrOpt) File with the list of available nfs shares
= True(BoolOpt) Create volumes as sparsed files which take no space.If set to False volume is created as regular file.In such case volume creation takes a lot of time.
= 0.95(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination. Note that this option is deprecated in favor of "reserved_percentage" and will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/cinder-storpool.xml b/doc/common/tables/cinder-storpool.xml deleted file mode 100644 index c7bd77bdee..0000000000 --- a/doc/common/tables/cinder-storpool.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of StorPool volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 3(IntOpt) The default StorPool chain replication value. Used when creating a volume with no specified type if storpool_template is not set. Also used for calculating the apparent free space reported in the stats.
= None(StrOpt) The StorPool template for volumes with no type.
-
diff --git a/doc/common/tables/cinder-storwize.xml b/doc/common/tables/cinder-storwize.xml deleted file mode 100644 index 311e5d5ddb..0000000000 --- a/doc/common/tables/cinder-storwize.xml +++ /dev/null @@ -1,104 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IBM Storwise driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Allow tenants to specify QOS on create
= iSCSI(StrOpt) Connection protocol (iSCSI/FC)
= 120(IntOpt) Maximum number of seconds to wait for FlashCopy to be prepared.
= True(BoolOpt) Configure CHAP authentication for iSCSI connections (Default: Enabled)
= True(BoolOpt) Allows vdisk to multi host mapping
= False(BoolOpt) Connect with multipath (FC only; iSCSI multipath is controlled by Nova)
= True(BoolOpt) Indicate whether svc driver is compatible for NPIV setup. If it is compatible, it will allow no wwpns being returned on get_conn_fc_wwpns during initialize_connection. It should always be set to True. It will be deprecated and removed in M release.
= None(StrOpt) If operating in stretched cluster mode, specify the name of the pool in which mirrored copies are stored.Example: "pool2"
= True(BoolOpt) Storage system autoexpand parameter for volumes (True/False)
= False(BoolOpt) Storage system compression option for volumes
= True(BoolOpt) Enable Easy Tier for volumes
= 256(IntOpt) Storage system grain size parameter for volumes (32/64/128/256)
= 0(IntOpt) The I/O group in which to allocate volumes
= 2(IntOpt) Storage system space-efficiency parameter for volumes (percentage)
= 0(IntOpt) Storage system threshold for volume capacity warnings (percentage)
= volpool(StrOpt) Storage system storage pool for volumes
-
diff --git a/doc/common/tables/cinder-tintri.xml b/doc/common/tables/cinder-tintri.xml deleted file mode 100644 index 5ca652fd5a..0000000000 --- a/doc/common/tables/cinder-tintri.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Tintri volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= v310(StrOpt) API version for the storage system
= None(StrOpt) The hostname (or IP address) for the storage system
= None(StrOpt) Password for the storage system
= None(StrOpt) User name for the storage system
-
diff --git a/doc/common/tables/cinder-violin.xml b/doc/common/tables/cinder-violin.xml deleted file mode 100644 index 2180d8ab8d..0000000000 --- a/doc/common/tables/cinder-violin.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Violin volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) IP address or hostname of mg-a
= None(StrOpt) IP address or hostname of mg-b
= False(BoolOpt) Use igroups to manage targets and initiators
= 300(IntOpt) Global backend request timeout, in seconds.
-
diff --git a/doc/common/tables/cinder-vmware.xml b/doc/common/tables/cinder-vmware.xml deleted file mode 100644 index 14d1139f42..0000000000 --- a/doc/common/tables/cinder-vmware.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VMware configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 10(IntOpt) Number of times VMware ESX/vCenter server API must be retried upon connection related issues.
= None(StrOpt) CA bundle file to use in verifying the vCenter server certificate.
= None(MultiStrOpt) Name of a vCenter compute cluster where volumes should be created.
= None(StrOpt) IP address for connecting to VMware ESX/vCenter server.
= None(StrOpt) Password for authenticating with VMware ESX/vCenter server.
= None(StrOpt) Username for authenticating with VMware ESX/vCenter server.
= None(StrOpt) Optional string specifying the VMware vCenter server version. The driver attempts to retrieve the version from VMware vCenter server. Set this configuration only if you want to override the vCenter server version.
= 7200(IntOpt) Timeout in seconds for VMDK volume transfer between Cinder and Glance.
= False(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "vmware_ca_file" is set.
= 100(IntOpt) Max number of objects to be retrieved per batch. Query results will be obtained in batches from the server and not in one shot. Server may still limit the count to something less than the configured value.
= 0.5(FloatOpt) The interval (in seconds) for polling remote tasks invoked on VMware ESX/vCenter server.
= /tmp(StrOpt) Directory where virtual disks are stored during volume backup and restore.
= Volumes(StrOpt) Name of the vCenter inventory folder that will contain Cinder volumes. This folder will be created under "OpenStack/<project_folder>", where project_folder is of format "Project (<volume_project_id>)".
= None(StrOpt) Optional VIM service WSDL Location e.g http://<server>/vimService.wsdl. Optional over-ride to default location for bug work-arounds.
-
diff --git a/doc/common/tables/cinder-vzstorage.xml b/doc/common/tables/cinder-vzstorage.xml deleted file mode 100644 index 7e568058cc..0000000000 --- a/doc/common/tables/cinder-vzstorage.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VidZapper Storage volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(ListOpt) Mount options passed to the vzstorage client. See section of the pstorage-mount man page for details.
= $state_path/mnt(StrOpt) Base dir containing mount points for vzstorage shares.
= /etc/cinder/vzstorage_shares(StrOpt) File with the list of available vzstorage shares.
= True(BoolOpt) Create volumes as sparsed files which take no space rather than regular files when using raw format, in which case volume creation takes lot of time.
= 0.95(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination.
-
diff --git a/doc/common/tables/cinder-windows.xml b/doc/common/tables/cinder-windows.xml deleted file mode 100644 index f8cd7372d1..0000000000 --- a/doc/common/tables/cinder-windows.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Windows configuration options
Configuration option = Default valueDescription
[DEFAULT]
= C:\iSCSIVirtualDisks(StrOpt) Path to store VHD backed volumes
-
diff --git a/doc/common/tables/cinder-xio.xml b/doc/common/tables/cinder-xio.xml deleted file mode 100644 index 4522a2de92..0000000000 --- a/doc/common/tables/cinder-xio.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of X-IO volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Tell driver to use SSL for connection to backend storage if the driver supports it.
= 30(IntOpt) Number on retries to get completion status after issuing a command to ISE.
= 5(IntOpt) Number of retries (per port) when establishing connection to ISE management port.
= 1(IntOpt) Raid level for ISE volumes.
= 1(IntOpt) Interval (secs) between retries.
= 1(IntOpt) Default storage pool for volumes.
-
diff --git a/doc/common/tables/cinder-xiv.xml b/doc/common/tables/cinder-xiv.xml deleted file mode 100644 index 61bc87582d..0000000000 --- a/doc/common/tables/cinder-xiv.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IBM XIV and DS8000 volume driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Cluster name to use for creating volumes
= (StrOpt) IP address of SAN controller
= admin(StrOpt) Username for SAN controller
= (StrOpt) Password for SAN controller
= disabled(StrOpt) CHAP authentication mode, effective only for iscsi (disabled|enabled)
= iscsi(StrOpt) Connection type to the IBM Storage Array
= xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy(StrOpt) Proxy driver that connects to the IBM Storage Array
-
diff --git a/doc/common/tables/cinder-zeromq.xml b/doc/common/tables/cinder-zeromq.xml deleted file mode 100644 index 5f64f1ebf3..0000000000 --- a/doc/common/tables/cinder-zeromq.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/cinder-zfssa-iscsi.xml b/doc/common/tables/cinder-zfssa-iscsi.xml deleted file mode 100644 index 63e26a11e9..0000000000 --- a/doc/common/tables/cinder-zfssa-iscsi.xml +++ /dev/null @@ -1,112 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZFS Storage Appliance iSCSI driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) iSCSI initiator IQNs. (comma separated)
= (StrOpt) iSCSI initiators configuration.
= (StrOpt) iSCSI initiator group.
= (StrOpt) Secret of the iSCSI initiator CHAP user.
= (StrOpt) iSCSI initiator CHAP user (name).
= off(StrOpt) Data compression.
= latency(StrOpt) Synchronous write bias.
= False(BoolOpt) Flag to enable sparse (thin-provisioned): True, False.
= 8k(StrOpt) Block size.
= None(StrOpt) Storage pool name.
= None(StrOpt) Project name.
= (StrOpt) IP address used for replication data. (maybe the same as data ip)
= None(IntOpt) REST connection timeout. (seconds)
= tgt-grp(StrOpt) iSCSI target group name.
= None(StrOpt) Network interfaces of iSCSI targets. (comma separated)
= (StrOpt) Secret of the iSCSI target CHAP user.
= None(StrOpt) iSCSI target portal (Data-IP:Port, w.x.y.z:3260).
= (StrOpt) iSCSI target CHAP user (name).
-
diff --git a/doc/common/tables/cinder-zfssa-nfs.xml b/doc/common/tables/cinder-zfssa-nfs.xml deleted file mode 100644 index 9dedd77b4f..0000000000 --- a/doc/common/tables/cinder-zfssa-nfs.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZFS Storage Appliance NFS driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= os-cinder-cache(StrOpt) Name of directory inside zfssa_nfs_share where cache volumes are stored.
= os-cinder-cache(StrOpt) Name of ZFSSA project where cache volumes are stored.
= None(StrOpt) Data path IP address
= True(BoolOpt) Flag to enable local caching: True, False.
= 443(StrOpt) HTTPS port number
= (StrOpt) Options to be passed while mounting share over nfs
= (StrOpt) Storage pool name.
= NFSProject(StrOpt) Project name.
= nfs_share(StrOpt) Share name.
= off(StrOpt) Data compression.
= latency(StrOpt) Synchronous write bias-latency, throughput.
= None(IntOpt) REST connection timeout. (seconds)
-
diff --git a/doc/common/tables/cinder-zfssa.xml b/doc/common/tables/cinder-zfssa.xml deleted file mode 100644 index 4fe6cbe5b4..0000000000 --- a/doc/common/tables/cinder-zfssa.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of ZFS Storage Appliance iSCSI driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(IntOpt) REST connection timeout. (seconds)
-
diff --git a/doc/common/tables/cinder-zones.xml b/doc/common/tables/cinder-zones.xml deleted file mode 100644 index 55bf0369cf..0000000000 --- a/doc/common/tables/cinder-zones.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of zones configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Ensure that the new volumes are the same AZ as snapshot or source volume
-
diff --git a/doc/common/tables/cinder-zoning.xml b/doc/common/tables/cinder-zoning.xml deleted file mode 100644 index 82106939ae..0000000000 --- a/doc/common/tables/cinder-zoning.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of zoning configuration options
Configuration option = Default valueDescription
[DEFAULT]
= none(StrOpt) FC Zoning mode configured
[fc-zone-manager]
= None(StrOpt) Comma separated list of Fibre Channel fabric names. This list of names is used to retrieve other SAN credentials for connecting to each SAN fabric
= cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService(StrOpt) FC SAN Lookup Service
= cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver(StrOpt) FC Zone Driver responsible for zone management
= initiator-target(StrOpt) Zoning policy configured by user; valid values include "initiator-target" or "initiator"
-
diff --git a/doc/common/tables/cinder-zoning_fabric.xml b/doc/common/tables/cinder-zoning_fabric.xml deleted file mode 100644 index 1b7107b875..0000000000 --- a/doc/common/tables/cinder-zoning_fabric.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of zoning fabrics configuration options
Configuration option = Default valueDescription
[BRCD_FABRIC_EXAMPLE]
= (StrOpt) Management IP of fabric
= (StrOpt) Password for user
= 22(IntOpt) Connecting port
= (StrOpt) Fabric user ID
= None(StrOpt) Principal switch WWN of the fabric
= True(BoolOpt) overridden zoning activation state
= None(StrOpt) overridden zone name prefix
= initiator-target(StrOpt) overridden zoning policy
-
diff --git a/doc/common/tables/cinder-zoning_fabric_cisco.xml b/doc/common/tables/cinder-zoning_fabric_cisco.xml deleted file mode 100644 index 952f73728b..0000000000 --- a/doc/common/tables/cinder-zoning_fabric_cisco.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of cisco zoning fabrics configuration options
Configuration option = Default valueDescription
[CISCO_FABRIC_EXAMPLE]
= (StrOpt) Management IP of fabric
= (StrOpt) Password for user
= 22(IntOpt) Connecting port
= (StrOpt) Fabric user ID
= True(BoolOpt) overridden zoning activation state
= None(StrOpt) overridden zone name prefix
= initiator-target(StrOpt) overridden zoning policy
= None(StrOpt) VSAN of the Fabric
-
diff --git a/doc/common/tables/cinder-zoning_manager.xml b/doc/common/tables/cinder-zoning_manager.xml deleted file mode 100644 index 6c5c035823..0000000000 --- a/doc/common/tables/cinder-zoning_manager.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of zoning manager configuration options
Configuration option = Default valueDescription
[fc-zone-manager]
= cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.BrcdFCZoneClientCLI(StrOpt) Southbound connector for zoning operation
-
diff --git a/doc/common/tables/cinder-zoning_manager_cisco.xml b/doc/common/tables/cinder-zoning_manager_cisco.xml deleted file mode 100644 index 9e05e20396..0000000000 --- a/doc/common/tables/cinder-zoning_manager_cisco.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of cisco zoning manager configuration options
Configuration option = Default valueDescription
[fc-zone-manager]
= cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI(StrOpt) Southbound connector for zoning operation
-
diff --git a/doc/common/tables/glance-amqp.xml b/doc/common/tables/glance-amqp.xml deleted file mode 100644 index 6c953d41fa..0000000000 --- a/doc/common/tables/glance-amqp.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= image.localhost(StrOpt) Default publisher_id for outgoing notifications.
= (ListOpt) List of disabled notifications. A notification can be given either as a notification type to disable a single event, or as a notification group prefix to disable all events within a group. Example: if this config option is set to ["image.create", "metadef_namespace"], then "image.create" notification will not be sent after image is created and none of the notifications for metadefinition namespaces will be sent.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/glance-api.xml b/doc/common/tables/glance-api.xml deleted file mode 100644 index b38912d868..0000000000 --- a/doc/common/tables/glance-api.xml +++ /dev/null @@ -1,168 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= admin(StrOpt) Role used to identify an authenticated user as administrator.
= False(BoolOpt) Allow unauthenticated users to access the API with read-only privileges. This only applies when using ContextMiddleware.
= (ListOpt) A list of artifacts that are allowed in the format name or name-version. Empty list means that any artifact can be loaded.
= 900(IntOpt) Timeout for client connections' socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever.
= True(BoolOpt) Deploy the v1 OpenStack Images API.
= True(BoolOpt) Deploy the v1 OpenStack Registry API.
= True(BoolOpt) Deploy the v2 OpenStack Images API.
= True(BoolOpt) Deploy the v2 OpenStack Registry API.
= False(BoolOpt) Deploy the v3 OpenStack Objects API.
= True(BoolOpt) If False, server will return the header "Connection: close", If True, server will return "Connection: Keep-Alive" in its responses. In order to close the client socket connection explicitly after the response is sent and read successfully by the client, you simply have to set this option to False when you create a wsgi server.
= 1099511627776(IntOpt) Maximum size of image a user can upload in bytes. Defaults to 1099511627776 bytes (1 TB).WARNING: this value should only be increased after careful consideration and must be set to a value under 8 EB (9223372036854775808).
= True(BoolOpt) When false, no artifacts can be loaded regardless of available_plugins. When true, artifacts can be loaded.
= location_order(StrOpt) This value sets what strategy will be used to determine the image location order. Currently two strategies are packaged with Glance 'location_order' and 'store_type'.
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs
= 64(IntOpt) Limits request ID length.
= True(BoolOpt) When true, this option sets the owner of an image to be the tenant. Otherwise, the owner of the image will be the authenticated user issuing the request.
= None(StrOpt) Public url to use for versions endpoint. The default is None, which will use the request's host_url attribute to populate the URL base. If Glance is operating behind a proxy, you will want to change this to represent the proxy's URL.
= False(BoolOpt) Whether to pass through headers containing user and tenant information when making requests to the registry. This allows the registry to use the context middleware without keystonemiddleware's auth_token middleware, removing calls to the keystone auth service. It is recommended that when using this option, secure communication between glance api and glance registry is ensured by means other than auth_token middleware.
= False(BoolOpt) Whether to include the backend image locations in image properties. For example, if using the file system store a URL of "file:///path/to/image" will be returned to the user in the 'direct_url' meta-data field. Revealing storage location can be a security risk, so use this setting with caution! The overrides show_image_direct_url.
= 600(IntOpt) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes.
= True(BoolOpt) Whether to pass through the user token when making requests to the registry. To prevent failures with token expiration during big files upload, it is recommended to set this parameter to False.If "use_user_token" is not in effect, then admin credentials can be specified.
[glance_store]
= file(StrOpt) Default scheme to use to store image data. The scheme must be registered by one of the stores defined by the 'stores' config option.
= 0(IntOpt) Minimum interval seconds to execute updating dynamic storage capabilities based on backend status then. It's not a periodic routine, the update logic will be executed only when interval seconds elapsed and an operation of store has triggered. The feature will be enabled only when the option value greater then zero.
= file, http(ListOpt) List of stores enabled
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[paste_deploy]
= None(StrOpt) Name of the paste configuration file.
= None(StrOpt) Partial name of a pipeline in your paste configuration file with the service name removed. For example, if your paste section name is [pipeline:glance-api-keystone] use the value "keystone"
[store_type_location_strategy]
= (ListOpt) The store names to use to get store preference order. The name must be registered by one of the stores defined by the 'stores' config option. This option will be applied when you using 'store_type' option as image location strategy defined by the 'location_strategy' config option.
-
diff --git a/doc/common/tables/glance-auth_token.xml b/doc/common/tables/glance-auth_token.xml deleted file mode 100644 index b8f8d5596c..0000000000 --- a/doc/common/tables/glance-auth_token.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/glance-ca.xml b/doc/common/tables/glance-ca.xml deleted file mode 100644 index c39b87bdf0..0000000000 --- a/doc/common/tables/glance-ca.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CA and SSL configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) CA certificate file to use to verify connecting clients.
= None(StrOpt) Certificate file to use when starting API server securely.
= None(StrOpt) Private key file to use when starting API server securely.
-
diff --git a/doc/common/tables/glance-cinder.xml b/doc/common/tables/glance-cinder.xml deleted file mode 100644 index 1fc0047f04..0000000000 --- a/doc/common/tables/glance-cinder.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of cinder configuration options
Configuration option = Default valueDescription
[glance_store]
= False(BoolOpt) Allow to perform insecure SSL requests to cinder
= None(StrOpt) Location of ca certicates file to use for cinder client requests.
= volume:cinder:publicURL(StrOpt) Info to match when looking for cinder in the service catalog. Format is : separated values of the form: <service_type>:<service_name>:<endpoint_type>
= None(StrOpt) Override service catalog lookup with template for cinder endpoint e.g. http://localhost:8776/v1/%(project_id)s
= 3(IntOpt) Number of cinderclient retries on failed http calls
-
diff --git a/doc/common/tables/glance-common.xml b/doc/common/tables/glance-common.xml deleted file mode 100644 index 0991449882..0000000000 --- a/doc/common/tables/glance-common.xml +++ /dev/null @@ -1,164 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Whether to allow users to specify image properties beyond what the image schema provides
= 1000(IntOpt) Maximum permissible number of items that could be returned by a request
= 4096(IntOpt) The backlog value that will be used when creating the TCP listener socket.
= 0.0.0.0(StrOpt) Address to bind the server. Useful when selecting a particular network interface.
= None(IntOpt) The port on which the server will listen.
= glance.db.sqlalchemy.api(StrOpt) Python module path of data access API
= sha256(StrOpt) Digest algorithm which will be used for digital signature. Use the command "openssl list-message-digest-algorithms" to get the available algorithmssupported by the version of OpenSSL on the platform. Examples are "sha1", "sha256", "sha512", etc.
= 64(IntOpt) Size of executor thread pool.
= 10(IntOpt) Maximum number of locations allowed on an image. Negative values evaluate to unlimited.
= 128(IntOpt) Maximum number of image members per image. Negative values evaluate to unlimited.
= 128(IntOpt) Maximum number of properties allowed on an image. Negative values evaluate to unlimited.
= 128(IntOpt) Maximum number of tags allowed on an image. Negative values evaluate to unlimited.
= 25(IntOpt) Default value for the number of items returned by a request if not specified explicitly in the request
= None(ListOpt) Memcached servers or None for in process cache.
= None(StrOpt) AES key for encrypting store 'location' metadata. This includes, if used, Swift or S3 credentials. Should be set to a random string of length 16, 24 or 32 bytes
= /etc/glance/metadefs/(StrOpt) Path to the directory where json metadata files are stored
= None(StrOpt) The location of the property protection file.This file contains the rules for property protections and the roles/policies associated with it. If this config value is not specified, by default, property protections won't be enforced. If a value is specified and the file is not found, then the glance-api service will not start.
= roles(StrOpt) This config value indicates whether "roles" or "policies" are used in the property protection file.
= False(BoolOpt) Whether to include the backend image storage location in image properties. Revealing storage location can be a security risk, so use this setting with caution!
= 0(StrOpt) Set a system wide quota for every user. This value is the total capacity that a user can use across all storage systems. A value of 0 means unlimited.Optional unit can be specified for the value. Accepted units are B, KB, MB, GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no unit is specified then Bytes is assumed. Note that there should not be any space between value and unit and units are case sensitive.
= 4(IntOpt) The number of child process workers that will be created to service requests. The default will be equal to the number of CPUs available.
[glance_store]
= None(StrOpt) Region name of this node
[image_format]
= ami, ari, aki, bare, ovf, ova(ListOpt) Supported values for the 'container_format' image attribute
= ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, iso(ListOpt) Supported values for the 'disk_format' image attribute
[keystone_authtoken]
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
[task]
= taskflow(StrOpt) Specifies which task executor to be used to run the task scripts.
= 48(IntOpt) Time in hours for which a task lives after, either succeeding or failing
= None(StrOpt) Work dir for asynchronous task operations. The directory set here will be used to operate over images - normally before they are imported in the destination store. When providing work dir, make sure enough space is provided for concurrent tasks to run efficiently without running out of space. A rough estimation can be done by multiplying the number of `max_workers` - or the N of workers running - by an average image size (e.g 500MB). The image size estimation should be done based on the average size in your deployment. Note that depending on the tasks running you may need to multiply this number by some factor depending on what the task does. For example, you may want to double the available size if image conversion is enabled. All this being said, remember these are just estimations and you should do them based on the worst case scenario and be prepared to act in case they were wrong.
-
diff --git a/doc/common/tables/glance-cors.xml b/doc/common/tables/glance-cors.xml deleted file mode 100644 index 0f6e27d4db..0000000000 --- a/doc/common/tables/glance-cors.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/glance-database.xml b/doc/common/tables/glance-database.xml deleted file mode 100644 index a339dcfa5f..0000000000 --- a/doc/common/tables/glance-database.xml +++ /dev/null @@ -1,120 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/glance-debug.xml b/doc/common/tables/glance-debug.xml deleted file mode 100644 index 5181c56147..0000000000 --- a/doc/common/tables/glance-debug.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Enable eventlet backdoor. Acceptable values are 0, <port>, and <start>:<end>, where 0 results in listening on a random tcp port number; <port> results in listening on the specified port number (and not enabling backdoor if that port is in use); and <start>:<end> results in listening on the smallest unused port number within the specified range of port numbers. The chosen port is displayed in the service's log file.
-
diff --git a/doc/common/tables/glance-elasticsearch.xml b/doc/common/tables/glance-elasticsearch.xml deleted file mode 100644 index ee55710637..0000000000 --- a/doc/common/tables/glance-elasticsearch.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Elasticsearch configuration options
Configuration option = Default valueDescription
[elasticsearch]
= 127.0.0.1:9200(ListOpt) List of nodes where Elasticsearch instances are running. A single node should be defined as an IP address and port number.
-
diff --git a/doc/common/tables/glance-filesystem.xml b/doc/common/tables/glance-filesystem.xml deleted file mode 100644 index c798cf01eb..0000000000 --- a/doc/common/tables/glance-filesystem.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of filesystem configuration options
Configuration option = Default valueDescription
[glance_store]
= None(StrOpt) Directory to which the Filesystem backend store writes images.
= None(MultiStrOpt) List of directories and its priorities to which the Filesystem backend store writes images.
= 0(IntOpt) The required permission for created image file. In this way the user other service used, e.g. Nova, who consumes the image could be the exclusive member of the group that owns the files created. Assigning it less then or equal to zero means don't change the default permission of the file. This value will be decoded as an octal digit.
= None(StrOpt) The path to a file which contains the metadata to be returned with any location associated with this store. The file must contain a valid JSON object. The object should contain the keys 'id' and 'mountpoint'. The value for both keys should be 'string'.
-
diff --git a/doc/common/tables/glance-gridfs.xml b/doc/common/tables/glance-gridfs.xml deleted file mode 100644 index 15b21e7874..0000000000 --- a/doc/common/tables/glance-gridfs.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of GridFS configuration options
Configuration option = Default valueDescription
[glance_store]
= None(StrOpt) Database to use
= None(StrOpt) Hostname or IP address of the instance to connect to, or a mongodb URI, or a list of hostnames / mongodb URIs. If host is an IPv6 literal it must be enclosed in '[' and ']' characters following the RFC2732 URL syntax (e.g. '[::1]' for localhost)
-
diff --git a/doc/common/tables/glance-imagecache.xml b/doc/common/tables/glance-imagecache.xml deleted file mode 100644 index 1834cd99b2..0000000000 --- a/doc/common/tables/glance-imagecache.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of flagmappings configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Turn on/off delayed delete.
= None(StrOpt) Base directory that the Image Cache uses.
= sqlite(StrOpt) The driver to use for image cache management.
= 10737418240(IntOpt) The upper limit (the maximum size of accumulated cache in bytes) beyond which pruner, if running, starts cleaning the images cache.
= cache.db(StrOpt) The path to the sqlite file database that will be used for image cache management.
= 86400(IntOpt) The amount of time to let an image remain in the cache without being accessed.
= 1(IntOpt) The size of thread pool to be used for scrubbing images. The default is one, which signifies serial scrubbing. Any value above one indicates the max number of images that may be scrubbed in parallel.
= 0(IntOpt) The amount of time in seconds to delay before performing a delete.
-
diff --git a/doc/common/tables/glance-logging.xml b/doc/common/tables/glance-logging.xml deleted file mode 100644 index 95978fe20f..0000000000 --- a/doc/common/tables/glance-logging.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
-
diff --git a/doc/common/tables/glance-policy.xml b/doc/common/tables/glance-policy.xml deleted file mode 100644 index 862724e90d..0000000000 --- a/doc/common/tables/glance-policy.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of policy configuration options
Configuration option = Default valueDescription
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
-
diff --git a/doc/common/tables/glance-profiler.xml b/doc/common/tables/glance-profiler.xml deleted file mode 100644 index 88f14454c1..0000000000 --- a/doc/common/tables/glance-profiler.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of profiler configuration options
Configuration option = Default valueDescription
[profiler]
= False(BoolOpt) If False fully disable profiling feature.
= False(BoolOpt) If False doesn't trace SQL requests.
-
diff --git a/doc/common/tables/glance-qpid.xml b/doc/common/tables/glance-qpid.xml deleted file mode 100644 index 14acdb03ef..0000000000 --- a/doc/common/tables/glance-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/glance-rabbitmq.xml b/doc/common/tables/glance-rabbitmq.xml deleted file mode 100644 index 135e73740a..0000000000 --- a/doc/common/tables/glance-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/glance-rbd.xml b/doc/common/tables/glance-rbd.xml deleted file mode 100644 index 0a78b27781..0000000000 --- a/doc/common/tables/glance-rbd.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RADOS Block Devices (RBD) configuration options
Configuration option = Default valueDescription
[glance_store]
= 0(IntOpt) Timeout value (in seconds) used when connecting to ceph cluster. If value <= 0, no timeout is set and default librados value is used.
= /etc/ceph/ceph.conf(StrOpt) Ceph configuration file path. If <None>, librados will locate the default config. If using cephx authentication, this file should include a reference to the right keyring in a client.<USER> section
= 8(IntOpt) RADOS images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two.
= localhost(StrOpt) RADOS pool in which images are stored.
= None(StrOpt) RADOS user to authenticate as (only applicable if using Cephx. If <None>, a default will be chosen based on the client. section in rbd_store_ceph_conf)
-
diff --git a/doc/common/tables/glance-redis.xml b/doc/common/tables/glance-redis.xml deleted file mode 100644 index 03af852f35..0000000000 --- a/doc/common/tables/glance-redis.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/glance-registry.xml b/doc/common/tables/glance-registry.xml deleted file mode 100644 index d7c378063a..0000000000 --- a/doc/common/tables/glance-registry.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of registry configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) The administrators password. If "use_user_token" is not in effect, then admin credentials can be specified.
= None(StrOpt) The tenant name of the administrative user. If "use_user_token" is not in effect, then admin tenant name can be specified.
= None(StrOpt) The administrators user name. If "use_user_token" is not in effect, then admin credentials can be specified.
= None(StrOpt) The region for the authentication service. If "use_user_token" is not in effect and using keystone auth, then region name can be specified.
= noauth(StrOpt) The strategy to use for authentication. If "use_user_token" is not in effect, then auth strategy can be specified.
= None(StrOpt) The URL to the keystone service. If "use_user_token" is not in effect and using keystone auth, then URL of keystone can be specified.
= None(StrOpt) The path to the certifying authority cert file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file.
= None(StrOpt) The path to the cert file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment variable to a filepath of the CA cert file
= False(BoolOpt) When using SSL in connections to the registry server, do not require validation via a certifying authority. This is the registry's equivalent of specifying --insecure on the command line using glanceclient for the API.
= None(StrOpt) The path to the key file to use in SSL connections to the registry server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment variable to a filepath of the key file
= http(StrOpt) The protocol to use for communication with the registry server. Either http or https.
= 600(IntOpt) The period of time, in seconds, that the API server will wait for a registry request to complete. A value of 0 implies no timeout.
= 0.0.0.0(StrOpt) Address to find the registry server.
= 9191(IntOpt) Port the registry server is listening on.
-
diff --git a/doc/common/tables/glance-replicator.xml b/doc/common/tables/glance-replicator.xml deleted file mode 100644 index dbe34303a5..0000000000 --- a/doc/common/tables/glance-replicator.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of replicator configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(ListOpt) Arguments for the command
= 65536(IntOpt) Amount of data to transfer per HTTP write.
= None(StrOpt) Command to be given to replicator
= created_at date deleted_at location updated_at(StrOpt) List of fields to not replicate.
= (StrOpt) Pass in your authentication token if you have one. This is the token used for the master.
= False(BoolOpt) Only replicate metadata, not images.
= (StrOpt) Pass in your authentication token if you have one. This is the token used for the slave.
= (StrOpt) Pass in your authentication token if you have one. If you use this option the same token is used for both the master and the slave.
-
diff --git a/doc/common/tables/glance-rpc.xml b/doc/common/tables/glance-rpc.xml deleted file mode 100644 index 1915796be5..0000000000 --- a/doc/common/tables/glance-rpc.xml +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= glance.common.exception, exceptions(ListOpt) Modules of exceptions that are permitted to be recreated upon receiving exception data from an rpc call.
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
-
diff --git a/doc/common/tables/glance-s3.xml b/doc/common/tables/glance-s3.xml deleted file mode 100644 index 9d9315e70a..0000000000 --- a/doc/common/tables/glance-s3.xml +++ /dev/null @@ -1,100 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of S3 configuration options
Configuration option = Default valueDescription
[glance_store]
= None(StrOpt) The S3 query token access key.
= None(StrOpt) The S3 bucket to be used to store the Glance data.
= subdomain(StrOpt) The S3 calling format used to determine the bucket. Either subdomain or path can be used.
= False(BoolOpt) A boolean to determine if the S3 bucket should be created on upload if it does not exist or if an error should be returned to the user.
= False(BoolOpt) Enable the use of a proxy.
= None(StrOpt) The host where the S3 server is listening.
= 10(IntOpt) What multipart upload part size, in MB, should S3 use when uploading parts. The size must be greater than or equal to 5M.
= 100(IntOpt) What size, in MB, should S3 start chunking image files and do a multipart upload in S3.
= None(StrOpt) The local directory where uploads will be staged before they are transferred into S3.
= None(StrOpt) Address or hostname for the proxy server.
= None(StrOpt) The password to use when connecting over a proxy.
= 8080(IntOpt) The port to use when connecting over a proxy.
= None(StrOpt) The username to connect to the proxy.
= None(StrOpt) The S3 query token secret key.
= 10(IntOpt) The number of thread pools to perform a multipart upload in S3.
-
diff --git a/doc/common/tables/glance-scrubber.xml b/doc/common/tables/glance-scrubber.xml deleted file mode 100644 index d07ed457b2..0000000000 --- a/doc/common/tables/glance-scrubber.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of scrubber configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 300(IntOpt) Loop time between checking for new items to schedule for delete.
-
diff --git a/doc/common/tables/glance-sheepdog.xml b/doc/common/tables/glance-sheepdog.xml deleted file mode 100644 index 5f65aed52c..0000000000 --- a/doc/common/tables/glance-sheepdog.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Sheepdog configuration options
Configuration option = Default valueDescription
[glance_store]
= localhost(StrOpt) IP address of sheep daemon.
= 64(IntOpt) Images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two.
= 7000(IntOpt) Port of sheep daemon.
-
diff --git a/doc/common/tables/glance-swift.xml b/doc/common/tables/glance-swift.xml deleted file mode 100644 index fbe1abcfde..0000000000 --- a/doc/common/tables/glance-swift.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of swift configuration options
Configuration option = Default valueDescription
[DEFAULT]
= ref1(StrOpt) The reference to the default swift account/backing store parameters to use for adding new images.
= None(StrOpt) The address where the Swift authentication service is listening.(deprecated)
= None(StrOpt) The config file that has the swift account(s)configs.
= None(StrOpt) Auth key for the user authenticating against the Swift authentication service. (deprecated)
= None(StrOpt) The user to authenticate against the Swift authentication service (deprecated)
-
diff --git a/doc/common/tables/glance-taskflow.xml b/doc/common/tables/glance-taskflow.xml deleted file mode 100644 index bd61e568c4..0000000000 --- a/doc/common/tables/glance-taskflow.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of TaskFlow configuration options
Configuration option = Default valueDescription
[taskflow_executor]
= None(StrOpt) The format to which images will be automatically converted.
= parallel(StrOpt) The mode in which the engine will run. Can be 'serial' or 'parallel'.
= 10(IntOpt) The number of parallel activities executed at the same time by the engine. The value can be greater than one when the engine mode is 'parallel'.
-
diff --git a/doc/common/tables/glance-testing.xml b/doc/common/tables/glance-testing.xml deleted file mode 100644 index 2bf54d1ded..0000000000 --- a/doc/common/tables/glance-testing.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of testing configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) The hostname/IP of the pydev process listening for debug connections
= 5678(IntOpt) The port on which a pydev process is listening for connections.
-
diff --git a/doc/common/tables/glance-vmware.xml b/doc/common/tables/glance-vmware.xml deleted file mode 100644 index 2ec11b5992..0000000000 --- a/doc/common/tables/glance-vmware.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VMware configuration options
Configuration option = Default valueDescription
[glance_store]
= False(BoolOpt) Allow to perform insecure SSL requests to ESX/VC.
= 10(IntOpt) Number of times VMware ESX/VC server API must be retried upon connection related issues.
= ha-datacenter(StrOpt) DEPRECATED. Inventory path to a datacenter. If the vmware_server_host specified is an ESX/ESXi, the vmware_datacenter_path is optional. If specified, it should be "ha-datacenter". This option is deprecated in favor of vmware_datastores and will be removed in the Liberty release.
= None(StrOpt) DEPRECATED. Datastore associated with the datacenter. This option is deprecated in favor of vmware_datastores and will be removed in the Liberty release.
= None(MultiStrOpt) A list of datastores where the image can be stored. This option may be specified multiple times for specifying multiple datastores. Either one of vmware_datastore_name or vmware_datastores is required. The datastore name should be specified after its datacenter path, seperated by ":". An optional weight may be given after the datastore name, seperated again by ":". Thus, the required format becomes <datacenter_path>:<datastore_name>:<optional_weight>. When adding an image, the datastore with highest weight will be selected, unless there is not enough free space available in cases where the image size is already known. If no weight is given, it is assumed to be zero and the directory will be considered for selection last. If multiple datastores have the same weight, then the one with the most free space available is selected.
= None(StrOpt) ESX/ESXi or vCenter Server target system. The server value can be an IP address or a DNS name.
= None(StrOpt) Password for authenticating with VMware ESX/VC server.
= None(StrOpt) Username for authenticating with VMware ESX/VC server.
= /openstack_glance(StrOpt) The name of the directory where the glance images will be stored in the VMware datastore.
= 5(IntOpt) The interval used for polling remote tasks invoked on VMware ESX/VC server.
-
diff --git a/doc/common/tables/glance-zeromq.xml b/doc/common/tables/glance-zeromq.xml deleted file mode 100644 index 57150613bc..0000000000 --- a/doc/common/tables/glance-zeromq.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/heat-amqp.xml b/doc/common/tables/heat-amqp.xml deleted file mode 100644 index 65b1893c16..0000000000 --- a/doc/common/tables/heat-amqp.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= INFO(StrOpt) Default notification level for outgoing notifications.
= None(StrOpt) Default publisher_id for outgoing notifications.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/heat-api.xml b/doc/common/tables/heat-api.xml deleted file mode 100644 index 8835f9e32a..0000000000 --- a/doc/common/tables/heat-api.xml +++ /dev/null @@ -1,248 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 5(IntOpt) Number of times to retry to bring a resource to a non-error state. Set to 0 to disable retries.
= False(BoolOpt) Enable the preview Stack Abandon feature.
= False(BoolOpt) Enable the preview Stack Adopt feature.
= False(BoolOpt) Encrypt template parameters that were marked as hidden and also all the resource properties before storing them in database.
= (StrOpt) URL of the Heat metadata server.
= heat_stack_user(StrOpt) Keystone role for heat template-defined users.
= None(StrOpt) URL of the Heat waitcondition server.
= (StrOpt) URL of the Heat CloudWatch server.
= data-processing-cluster(ListOpt) Stacks containing these tag names will be hidden. Multiple tags should be given in a comma-delimited list (eg. hidden_stack_tags=hide_me,me_too).
= 1048576(IntOpt) Maximum raw byte size of JSON request body. Should be larger than max_template_size.
= 4(IntOpt) Number of heat-engine processes to fork and run.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine which the original request protocol scheme was, even if it was removed by an SSL terminator proxy.
= 3600(IntOpt) Timeout in seconds for stack action (ie. create or update).
= None(StrOpt) Keystone username, a user with roles sufficient to manage users and projects in the stack_user_domain.
= None(StrOpt) Keystone password for stack_domain_admin user.
= False(BoolOpt) When this feature is enabled, scheduler hints identifying the heat stack context of a server or volume resource are passed to the configured schedulers in nova and cinder, for creates done using heat resource types OS::Cinder::Volume, OS::Nova::Server, and AWS::EC2::Instance. heat_root_stack_id will be set to the id of the root stack of the resource, heat_stack_id will be set to the id of the resource's parent stack, heat_stack_name will be set to the name of the resource's parent stack, heat_path_in_stack will be set to a list of tuples, (stackresourcename, stackname) with list[0] being (None, rootstackname), heat_resource_name will be set to the resource's name, and heat_resource_uuid will be set to the resource's orchestration id.
= None(StrOpt) Keystone domain ID which contains heat template-defined users. If this option is set, stack_user_domain_name option will be ignored.
= None(StrOpt) Keystone domain name which contains heat template-defined users. If `stack_user_domain_id` option is set, this option is ignored.
= (ListOpt) Subset of trustor roles to be delegated to heat. If left unset, all roles of a user will be delegated to heat when creating a stack.
[auth_password]
= (ListOpt) Allowed keystone endpoints for auth_uri when multi_cloud is enabled. At least one endpoint needs to be specified.
= False(BoolOpt) Allow orchestration of multiple clouds.
[ec2authtoken]
= (ListOpt) Allowed keystone endpoints for auth_uri when multi_cloud is enabled. At least one endpoint needs to be specified.
= None(StrOpt) Authentication Endpoint URI.
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= False(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
= False(BoolOpt) Allow orchestration of multiple clouds.
[eventlet_opts]
= 900(IntOpt) Timeout for client connections' socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever.
= True(BoolOpt) If False, closes the client socket connection explicitly.
[heat_api]
= 4096(IntOpt) Number of backlog requests to configure the socket with.
= 0.0.0.0(StrOpt) Address to bind the server. Useful when selecting a particular network interface.
= 8004(IntOpt) The port on which the server will listen.
= None(StrOpt) Location of the SSL certificate file to use for SSL mode.
= None(StrOpt) Location of the SSL key file to use for enabling SSL mode.
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
= 600(IntOpt) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes.
= 4(IntOpt) Number of workers for Heat service.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
[oslo_versionedobjects]
= False(BoolOpt) Make exception message format errors fatal
[paste_deploy]
= api-paste.ini(StrOpt) The API paste config file to use.
= None(StrOpt) The flavor to use.
-
diff --git a/doc/common/tables/heat-auth_token.xml b/doc/common/tables/heat-auth_token.xml deleted file mode 100644 index bb7950baa8..0000000000 --- a/doc/common/tables/heat-auth_token.xml +++ /dev/null @@ -1,192 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/heat-cfn_api.xml b/doc/common/tables/heat-cfn_api.xml deleted file mode 100644 index 3cbf760ffe..0000000000 --- a/doc/common/tables/heat-cfn_api.xml +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Cloudformation-compatible API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 1(StrOpt) Instance connection to CFN/CW API validate certs if SSL is used.
= 0(StrOpt) Instance connection to CFN/CW API via https.
[heat_api_cfn]
= 4096(IntOpt) Number of backlog requests to configure the socket with.
= 0.0.0.0(StrOpt) Address to bind the server. Useful when selecting a particular network interface.
= 8000(IntOpt) The port on which the server will listen.
= None(StrOpt) Location of the SSL certificate file to use for SSL mode.
= None(StrOpt) Location of the SSL key file to use for enabling SSL mode.
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
= 600(IntOpt) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes.
= 0(IntOpt) Number of workers for Heat service.
-
diff --git a/doc/common/tables/heat-clients.xml b/doc/common/tables/heat-clients.xml deleted file mode 100644 index 0f5286e31b..0000000000 --- a/doc/common/tables/heat-clients.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of clients configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Default region name used to get services endpoints.
[clients]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= publicURL(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= False(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_backends.xml b/doc/common/tables/heat-clients_backends.xml deleted file mode 100644 index a9ae1ad312..0000000000 --- a/doc/common/tables/heat-clients_backends.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of client backends configuration options
Configuration option = Default valueDescription
[DEFAULT]
= heat.engine.clients.OpenStackClients(StrOpt) Fully qualified class name to use as a client backend.
-
diff --git a/doc/common/tables/heat-clients_ceilometer.xml b/doc/common/tables/heat-clients_ceilometer.xml deleted file mode 100644 index 1922a7ddec..0000000000 --- a/doc/common/tables/heat-clients_ceilometer.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ceilometer clients configuration options
Configuration option = Default valueDescription
[clients_ceilometer]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_cinder.xml b/doc/common/tables/heat-clients_cinder.xml deleted file mode 100644 index 6bcb666231..0000000000 --- a/doc/common/tables/heat-clients_cinder.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of cinder clients configuration options
Configuration option = Default valueDescription
[clients_cinder]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= False(BoolOpt) Allow client's debug log output.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_glance.xml b/doc/common/tables/heat-clients_glance.xml deleted file mode 100644 index b9936d0433..0000000000 --- a/doc/common/tables/heat-clients_glance.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of glance clients configuration options
Configuration option = Default valueDescription
[clients_glance]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_heat.xml b/doc/common/tables/heat-clients_heat.xml deleted file mode 100644 index 9fe3156fdb..0000000000 --- a/doc/common/tables/heat-clients_heat.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of heat clients configuration options
Configuration option = Default valueDescription
[clients_heat]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
= (StrOpt) Optional heat url in format like http://0.0.0.0:8004/v1/%(tenant_id)s.
-
diff --git a/doc/common/tables/heat-clients_keystone.xml b/doc/common/tables/heat-clients_keystone.xml deleted file mode 100644 index 11cb63848e..0000000000 --- a/doc/common/tables/heat-clients_keystone.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of keystone clients configuration options
Configuration option = Default valueDescription
[clients_keystone]
= (StrOpt) Unversioned keystone url in format like http://0.0.0.0:5000.
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_neutron.xml b/doc/common/tables/heat-clients_neutron.xml deleted file mode 100644 index 755da7de92..0000000000 --- a/doc/common/tables/heat-clients_neutron.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of neutron clients configuration options
Configuration option = Default valueDescription
[clients_neutron]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_nova.xml b/doc/common/tables/heat-clients_nova.xml deleted file mode 100644 index cb6af59c85..0000000000 --- a/doc/common/tables/heat-clients_nova.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of nova clients configuration options
Configuration option = Default valueDescription
[clients_nova]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= False(BoolOpt) Allow client's debug log output.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_sahara.xml b/doc/common/tables/heat-clients_sahara.xml deleted file mode 100644 index af8f6ab8bb..0000000000 --- a/doc/common/tables/heat-clients_sahara.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of sahara clients configuration options
Configuration option = Default valueDescription
[clients_sahara]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_swift.xml b/doc/common/tables/heat-clients_swift.xml deleted file mode 100644 index 402cce979f..0000000000 --- a/doc/common/tables/heat-clients_swift.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of swift clients configuration options
Configuration option = Default valueDescription
[clients_swift]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-clients_trove.xml b/doc/common/tables/heat-clients_trove.xml deleted file mode 100644 index a6ddba0a99..0000000000 --- a/doc/common/tables/heat-clients_trove.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of trove clients configuration options
Configuration option = Default valueDescription
[clients_trove]
= None(StrOpt) Optional CA cert file to use in SSL connections.
= None(StrOpt) Optional PEM-formatted certificate chain file.
= None(StrOpt) Type of endpoint in Identity service catalog to use for communication with the OpenStack service.
= None(BoolOpt) If set, then the server's certificate will not be verified.
= None(StrOpt) Optional PEM-formatted file that contains the private key.
-
diff --git a/doc/common/tables/heat-cloudwatch_api.xml b/doc/common/tables/heat-cloudwatch_api.xml deleted file mode 100644 index 1249184458..0000000000 --- a/doc/common/tables/heat-cloudwatch_api.xml +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CloudWatch API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Enable the legacy OS::Heat::CWLiteAlarm resource.
= (StrOpt) URL of the Heat CloudWatch server.
[heat_api_cloudwatch]
= 4096(IntOpt) Number of backlog requests to configure the socket with.
= 0.0.0.0(StrOpt) Address to bind the server. Useful when selecting a particular network interface.
= 8003(IntOpt) The port on which the server will listen.
= None(StrOpt) Location of the SSL certificate file to use for SSL mode.
= None(StrOpt) Location of the SSL key file to use for enabling SSL mode.
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs.)
= 600(IntOpt) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes.
= 0(IntOpt) Number of workers for Heat service.
-
diff --git a/doc/common/tables/heat-common.xml b/doc/common/tables/heat-common.xml deleted file mode 100644 index ba9da6f92e..0000000000 --- a/doc/common/tables/heat-common.xml +++ /dev/null @@ -1,177 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Enables engine with convergence architecture. All stacks with this option will be created using convergence engine .
= CFN_SIGNAL(StrOpt) Template default for how the server should signal to heat with the deployment output values. CFN_SIGNAL will allow an HTTP POST to a CFN keypair signed URL (requires enabled heat-api-cfn). TEMP_URL_SIGNAL will create a Swift TempURL to be signaled via HTTP PUT (requires object-store endpoint which supports TempURL). HEAT_SIGNAL will allow calls to the Heat API resource-signal using the provided keystone credentials. ZAQAR_SIGNAL will create a dedicated zaqar queue to be signaled using the provided keystone credentials.
= POLL_SERVER_CFN(StrOpt) Template default for how the server should receive the metadata required for software configuration. POLL_SERVER_CFN will allow calls to the cfn API action DescribeStackResource authenticated with the provided keypair (requires enabled heat-api-cfn). POLL_SERVER_HEAT will allow calls to the Heat API resource-show using the provided keystone credentials (requires keystone v3 API, and configured stack_user_* config options). POLL_TEMP_URL will create and populate a Swift TempURL with metadata for polling (requires object-store endpoint which supports TempURL).ZAQAR_MESSAGE will create a dedicated zaqar queue and post the metadata for polling.
= trusts(StrOpt) Select deferred auth method, stored password or trusts.
= /etc/heat/environment.d(StrOpt) The directory to search for environment files.
= 240(IntOpt) Error wait time in seconds for stack action (ie. create or update).
= 10(IntOpt) Controls how many events will be pruned whenever a stack's events exceed max_events_per_stack. Set this lower to keep more events at the expense of more frequent purges.
= 64(IntOpt) Size of executor thread pool.
= localhost(StrOpt) Name of the engine node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.
= heat.common.heat_keystoneclient.KeystoneClientV3(StrOpt) Fully qualified class name to use as a keystone backend.
= None(ListOpt) Memcached servers or None for in process cache.
= (StrOpt) Password for Redis server (optional).
= 60(IntOpt) Seconds between running periodic tasks.
= /usr/lib64/heat, /usr/lib/heat, /usr/local/lib/heat, /usr/local/lib64/heat(ListOpt) List of directories to search for plug-ins.
= 6379(IntOpt) Use this port to connect to redis host.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[cache]
= dogpile.cache.null(StrOpt) Dogpile.cache backend module. It is recommended that Memcache with pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be used in production deployments. Small workloads (single process) like devstack can use the dogpile.cache.memory backend.
= [](MultiStrOpt) Arguments supplied to the backend module. Specify this option once per argument to be passed to the dogpile.cache backend. Example format: "<argname>:<value>".
= cache.oslo(StrOpt) Prefix for building the configuration dictionary for the cache region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name.
= False(BoolOpt) Extra debugging from the cache backend (cache keys, get/set/delete/etc calls). This is only really useful if you need to see the specific cache-backend get/set/delete calls with the keys/values. Typically this should be left set to false.
= False(BoolOpt) Global toggle for caching.
= 600(IntOpt) Default TTL, in seconds, for any cached item in the dogpile.cache region. This applies to any cached method that doesn't have an explicit cache expiration time defined for it.
= 300(IntOpt) Number of seconds memcached server is considered dead before it is tried again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
= 10(IntOpt) Number of seconds that an operation will wait to get a memcache client connection.
= 10(IntOpt) Max total number of open connections to every memcached server. (oslo_cache.memcache_pool backend only).
= 60(IntOpt) Number of seconds a connection to memcached is held unused in the pool before it is closed. (oslo_cache.memcache_pool backend only).
= localhost:11211(ListOpt) Memcache servers in the format of "host:port". (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
= 3(IntOpt) Timeout in seconds for every call to a server. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
= (ListOpt) Proxy classes to import that will affect the way the dogpile.cache backend functions. See the dogpile.cache documentation on changing-backend-behavior.
[constraint_validation_cache]
= True(BoolOpt) Toggle to enable/disable caching when Orchestration Engine validates property constraints of stack.During property validation with constraints Orchestration Engine caches requests to other OpenStack services. Please note that the global toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use this feature.
= 60(IntOpt) TTL, in seconds, for any cached item in the dogpile.cache region used for caching of validation constraints.
[revision]
= unknown(StrOpt) Heat build revision. If you would prefer to manage your build revision separately, you can move this section to a different file and add it as another config option.
-
diff --git a/doc/common/tables/heat-cors.xml b/doc/common/tables/heat-cors.xml deleted file mode 100644 index b5929c3461..0000000000 --- a/doc/common/tables/heat-cors.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/heat-crypt.xml b/doc/common/tables/heat-crypt.xml deleted file mode 100644 index 78f1d7b2ef..0000000000 --- a/doc/common/tables/heat-crypt.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of crypt configuration options
Configuration option = Default valueDescription
[DEFAULT]
= notgood but just long enough i t(StrOpt) Key used to encrypt authentication info in the database. Length of this key must be 32 characters.
-
diff --git a/doc/common/tables/heat-database.xml b/doc/common/tables/heat-database.xml deleted file mode 100644 index 57e39d6432..0000000000 --- a/doc/common/tables/heat-database.xml +++ /dev/null @@ -1,120 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/heat-loadbalancer.xml b/doc/common/tables/heat-loadbalancer.xml deleted file mode 100644 index b6ef33491e..0000000000 --- a/doc/common/tables/heat-loadbalancer.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of load balancer configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Custom template for the built-in loadbalancer nested stack.
-
diff --git a/doc/common/tables/heat-logging.xml b/doc/common/tables/heat-logging.xml deleted file mode 100644 index 2eae7c13e9..0000000000 --- a/doc/common/tables/heat-logging.xml +++ /dev/null @@ -1,120 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
-
diff --git a/doc/common/tables/heat-metadata_api.xml b/doc/common/tables/heat-metadata_api.xml deleted file mode 100644 index cd9b59df4b..0000000000 --- a/doc/common/tables/heat-metadata_api.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of metadata API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) URL of the Heat metadata server.
-
diff --git a/doc/common/tables/heat-notification.xml b/doc/common/tables/heat-notification.xml deleted file mode 100644 index c9868cc4f3..0000000000 --- a/doc/common/tables/heat-notification.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of notification configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Deprecated.
-
diff --git a/doc/common/tables/heat-qpid.xml b/doc/common/tables/heat-qpid.xml deleted file mode 100644 index 1fb00551d2..0000000000 --- a/doc/common/tables/heat-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/heat-quota.xml b/doc/common/tables/heat-quota.xml deleted file mode 100644 index 98d611c913..0000000000 --- a/doc/common/tables/heat-quota.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of quota configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 1000(IntOpt) Maximum events that will be available per stack. Older events will be deleted when this is reached. Set to 0 for unlimited events per stack.
= 5(IntOpt) Maximum depth allowed when using nested stacks.
= 1000(IntOpt) Maximum resources allowed per top-level stack. -1 stands for unlimited.
= 100(IntOpt) Maximum number of stacks any one tenant may have active at one time.
= 524288(IntOpt) Maximum raw byte size of any template.
-
diff --git a/doc/common/tables/heat-rabbitmq.xml b/doc/common/tables/heat-rabbitmq.xml deleted file mode 100644 index 8274efb0b2..0000000000 --- a/doc/common/tables/heat-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/heat-redis.xml b/doc/common/tables/heat-redis.xml deleted file mode 100644 index fb78bfbcfe..0000000000 --- a/doc/common/tables/heat-redis.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/heat-rpc.xml b/doc/common/tables/heat-rpc.xml deleted file mode 100644 index 7ac17f8c6a..0000000000 --- a/doc/common/tables/heat-rpc.xml +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 2(IntOpt) RPC timeout for the engine liveness check that is used for stack locking.
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
-
diff --git a/doc/common/tables/heat-testing.xml b/doc/common/tables/heat-testing.xml deleted file mode 100644 index 13159f7e59..0000000000 --- a/doc/common/tables/heat-testing.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of testing configuration options
Configuration option = Default valueDescription
[profiler]
= False(BoolOpt) If False fully disable profiling feature.
= False(BoolOpt) If False do not trace SQL requests.
-
diff --git a/doc/common/tables/heat-trustee.xml b/doc/common/tables/heat-trustee.xml deleted file mode 100644 index d042432d72..0000000000 --- a/doc/common/tables/heat-trustee.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of trustee configuration options
Configuration option = Default valueDescription
[trustee]
= None(StrOpt) Name of the plugin to load
= None(StrOpt) Config Section from which to load plugin specific options
-
diff --git a/doc/common/tables/heat-waitcondition_api.xml b/doc/common/tables/heat-waitcondition_api.xml deleted file mode 100644 index 321f0e845b..0000000000 --- a/doc/common/tables/heat-waitcondition_api.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of waitcondition API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) URL of the Heat waitcondition server.
-
diff --git a/doc/common/tables/heat-zeromq.xml b/doc/common/tables/heat-zeromq.xml deleted file mode 100644 index e02cbd1e81..0000000000 --- a/doc/common/tables/heat-zeromq.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/ironic-agent.xml b/doc/common/tables/ironic-agent.xml deleted file mode 100644 index 9a4e186fa8..0000000000 --- a/doc/common/tables/ironic-agent.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of agent configuration options
Configuration option = Default valueDescription
[agent]
= v1(StrOpt) API version to use for communicating with the ramdisk agent.
= nofb nomodeset vga=normal(StrOpt) DEPRECATED. Additional append parameters for baremetal PXE boot. This option is deprecated and will be removed in Mitaka release. Please use [pxe]pxe_append_params instead.
= $pybasedir/drivers/modules/agent_config.template(StrOpt) DEPRECATED. Template file for PXE configuration. This option is deprecated and will be removed in Mitaka release. Please use [pxe]pxe_config_template instead.
= 300(IntOpt) Maximum interval (in seconds) for agent heartbeats.
= True(BoolOpt) Whether Ironic will manage booting of the agent ramdisk. If set to False, you will need to configure your mechanism to allow booting the agent ramdisk.
= 0(IntOpt) The memory size in MiB consumed by agent when it is booted on a bare metal node. This is used for checking if the image can be downloaded and deployed on the bare metal node after booting agent ramdisk. This may be set according to the memory consumed by the agent ramdisk image.
= 6(IntOpt) Number of times to retry getting power state to check if bare metal node has been powered off after a soft power off.
= 5(IntOpt) Amount of time (in seconds) to wait between polling power state after trigger soft poweroff.
-
diff --git a/doc/common/tables/ironic-amqp.xml b/doc/common/tables/ironic-amqp.xml deleted file mode 100644 index e0c32024a5..0000000000 --- a/doc/common/tables/ironic-amqp.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/ironic-amt.xml b/doc/common/tables/ironic-amt.xml deleted file mode 100644 index 330ff08676..0000000000 --- a/doc/common/tables/ironic-amt.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMT configuration options
Configuration option = Default valueDescription
[amt]
= 10(IntOpt) Amount of time (in seconds) to wait, before retrying an AMT operation
= 3(IntOpt) Maximum number of times to attempt an AMT operation, before failing
= http(StrOpt) Protocol used for AMT endpoint, support http/https
-
diff --git a/doc/common/tables/ironic-api.xml b/doc/common/tables/ironic-api.xml deleted file mode 100644 index 4e4709ab41..0000000000 --- a/doc/common/tables/ironic-api.xml +++ /dev/null @@ -1,128 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[api]
= 0.0.0.0(StrOpt) The IP address on which ironic-api listens.
= 1000(IntOpt) The maximum number of items returned in a single response from a collection resource.
= 6385(IntOpt) The TCP port on which ironic-api listens.
= None(StrOpt) Public URL to use when building the links to the API resources (for example, "https://ironic.rocks:6384"). If None the links will be built using the request's host URL. If the API is operating behind a proxy, you will want to change this to represent the proxy's URL. Defaults to None.
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_versionedobjects]
= False(BoolOpt) Make exception message format errors fatal
-
diff --git a/doc/common/tables/ironic-auth.xml b/doc/common/tables/ironic-auth.xml deleted file mode 100644 index 3ddd24b8d3..0000000000 --- a/doc/common/tables/ironic-auth.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization configuration options
Configuration option = Default valueDescription
[DEFAULT]
= keystone(StrOpt) Authentication strategy used by ironic-api: one of "keystone" or "noauth". "noauth" should not be used in a production environment because all authentication will be disabled.
-
diff --git a/doc/common/tables/ironic-auth_token.xml b/doc/common/tables/ironic-auth_token.xml deleted file mode 100644 index 0081d76d13..0000000000 --- a/doc/common/tables/ironic-auth_token.xml +++ /dev/null @@ -1,192 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/ironic-cisco_ucs.xml b/doc/common/tables/ironic-cisco_ucs.xml deleted file mode 100644 index 288125b621..0000000000 --- a/doc/common/tables/ironic-cisco_ucs.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Cisco UCS configuration options
Configuration option = Default valueDescription
[cimc]
= 10(IntOpt) Amount of time in seconds to wait in between power operations
= 6(IntOpt) Number of times a power operation needs to be retried
[cisco_ucs]
= 5(IntOpt) Amount of time in seconds to wait in between power operations
= 6(IntOpt) Number of times a power operation needs to be retried
-
diff --git a/doc/common/tables/ironic-common.xml b/doc/common/tables/ironic-common.xml deleted file mode 100644 index 6eb436650c..0000000000 --- a/doc/common/tables/ironic-common.xml +++ /dev/null @@ -1,116 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /usr/local/bin(StrOpt) Directory where ironic binaries are installed.
= pxe_ipmitool(ListOpt) Specify the list of drivers to load during service initialization. Missing drivers, or drivers which fail to initialize, will prevent the conductor service from starting. The option default is a recommended set of production-oriented drivers. A complete list of drivers present on your system may be found by enumerating the "ironic.drivers" entrypoint. An example may be found in the developer documentation online.
= 64(IntOpt) Size of executor thread pool.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= True(BoolOpt) If True, convert backing images to "raw" disk image format.
= $pybasedir/common/grub_conf.template(StrOpt) Template file for grub configuration file.
= 1(IntOpt) [Experimental Feature] Number of hosts to map onto each hash partition. Setting this to more than one will cause additional conductor services to prepare deployment environments and potentially allow the Ironic cluster to recover more quickly if a conductor instance is terminated.
= 5(IntOpt) Exponent to determine number of hash partitions to use when distributing load across conductors. Larger values will result in more even distribution of load and less load when rebalancing the ring, but more memory usage. Number of partitions per conductor is (2^hash_partition_exponent). This determines the granularity of rebalancing: given 10 hosts, and an exponent of the 2, there are 40 partitions in the ring.A few thousand partitions should make rebalancing smooth in most cases. The default is suitable for up to a few hundred conductors. Too many partitions has a CPU impact.
= sd-52009.dedibox.fr(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address. However, the node name must be valid within an AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address.
= /usr/lib/syslinux/isolinux.bin(StrOpt) Path to isolinux binary file.
= $pybasedir/common/isolinux_config.template(StrOpt) Template file for isolinux configuration file.
= None(ListOpt) Memcached servers or None for in process cache.
= 10.0.0.1(StrOpt) IP address of this host. If unset, will determine the IP programmatically. If unable to do so, will use "127.0.0.1".
= False(BoolOpt) Run image downloads and raw format conversions in parallel.
= 60(IntOpt) Seconds between running periodic tasks.
= /usr/lib/python/site-packages/ironic(StrOpt) Directory where the ironic python module is installed.
= /etc/ironic/rootwrap.conf(StrOpt) Path to the rootwrap configuration file to use for running commands as root.
= $pybasedir(StrOpt) Top-level directory for maintaining ironic's state.
= /tmp(StrOpt) Temporary working directory, default is Python temp dir.
-
diff --git a/doc/common/tables/ironic-conductor.xml b/doc/common/tables/ironic-conductor.xml deleted file mode 100644 index 83bfb458b2..0000000000 --- a/doc/common/tables/ironic-conductor.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of conductor configuration options
Configuration option = Default valueDescription
[conductor]
= None(StrOpt) URL of Ironic API service. If not set ironic can get the current value from the keystone service catalog.
= 60(IntOpt) Interval between checks of provision timeouts, in seconds.
= 1800(IntOpt) Timeout (seconds) to wait for a callback from the ramdisk doing the cleaning. If the timeout is reached the node will be put in the "clean failed" provision state. Set to 0 to disable timeout.
= True(BoolOpt) Cleaning is a configurable set of steps, such as erasing disk drives, that are performed on the node to ensure it is in a baseline state and ready to be deployed to. This is done after instance deletion, and during the transition from a "managed" to "available" state. When enabled, the particular steps performed to clean a node depend on which driver that node is managed by; see the individual driver's documentation for details. NOTE: The introduction of the cleaning operation causes instance deletion to take significantly longer. In an environment where all tenants are trusted (eg, because there is only one tenant), this option could be safely disabled.
= ironic_configdrive_container(StrOpt) Name of the Swift container to store config drive data. Used when configdrive_use_swift is True.
= False(BoolOpt) Whether to upload the config drive to Swift.
= 1800(IntOpt) Timeout (seconds) to wait for a callback from a deploy ramdisk. Set to 0 to disable timeout.
= True(BoolOpt) During sync_power_state, should the hardware power state be set to the state recorded in the database (True) or should the database be updated based on the hardware state (False).
= 10(IntOpt) Seconds between conductor heart beats.
= 60(IntOpt) Maximum time (in seconds) since the last check-in of a conductor. A conductor is considered inactive when this time has been exceeded.
= 1800(IntOpt) Timeout (seconds) for waiting for node inspection. 0 - unlimited.
= 3(IntOpt) Number of attempts to grab a node lock.
= 1(IntOpt) Seconds to sleep between node lock attempts.
= 8(IntOpt) Maximum number of worker threads that can be started simultaneously by a periodic task. Should be less than RPC thread pool size.
= 3(IntOpt) During sync_power_state failures, limit the number of times Ironic should try syncing the hardware node power state with the node power state in DB
= False(BoolOpt) Enable sending sensor data message via the notification bus
= 600(IntOpt) Seconds between conductor sending sensor data message to ceilometer via the notification bus.
= ALL(ListOpt) List of comma separated meter types which need to be sent to Ceilometer. The default value, "ALL", is a special value meaning send all the sensor data.
= 180(IntOpt) When conductors join or leave the cluster, existing conductors may need to update any persistent local state as nodes are moved around the cluster. This option controls how often, in seconds, each conductor will check for nodes that it should "take over". Set it to a negative value to disable the check entirely.
= 60(IntOpt) Interval between syncing the node power state to the database, in seconds.
= 100(IntOpt) The size of the workers greenthread pool.
-
diff --git a/doc/common/tables/ironic-console.xml b/doc/common/tables/ironic-console.xml deleted file mode 100644 index 7719d271d3..0000000000 --- a/doc/common/tables/ironic-console.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of console configuration options
Configuration option = Default valueDescription
[console]
= 1(IntOpt) Time interval (in seconds) for checking the status of console subprocess.
= 10(IntOpt) Time (in seconds) to wait for the console subprocess to start.
= shellinaboxd(StrOpt) Path to serial console terminal program
= None(StrOpt) Directory containing the terminal SSL cert(PEM) for serial console access
= None(StrOpt) Directory for holding terminal pid files. If not specified, the temporary directory will be used.
-
diff --git a/doc/common/tables/ironic-database.xml b/doc/common/tables/ironic-database.xml deleted file mode 100644 index f3ca4db654..0000000000 --- a/doc/common/tables/ironic-database.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= InnoDB(StrOpt) MySQL engine to use.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/ironic-debug.xml b/doc/common/tables/ironic-debug.xml deleted file mode 100644 index c97d2e4e8c..0000000000 --- a/doc/common/tables/ironic-debug.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Enable pecan debug mode. WARNING: this is insecure and should not be used in a production environment.
-
diff --git a/doc/common/tables/ironic-deploy.xml b/doc/common/tables/ironic-deploy.xml deleted file mode 100644 index 36f8da5193..0000000000 --- a/doc/common/tables/ironic-deploy.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of deploy configuration options
Configuration option = Default valueDescription
[deploy]
= 1M(StrOpt) Block size to use when writing to the nodes disk.
= 200(IntOpt) Size of EFI system partition in MiB when configuring UEFI systems for local boot.
= 1(IntOpt) Number of iterations to be run for erasing devices.
= None(IntOpt) Priority to run in-band erase devices via the Ironic Python Agent ramdisk. If unset, will use the priority set in the ramdisk (defaults to 10 for the GenericHardwareManager). If set to 0, will not run during cleaning.
= /httpboot(StrOpt) ironic-conductor node's HTTP root path.
= None(StrOpt) ironic-conductor node's HTTP server URL. Example: http://192.1.2.3:8080
= 3(IntOpt) Maximum attempts to verify an iSCSI connection is active, sleeping 1 second between attempts.
-
diff --git a/doc/common/tables/ironic-dhcp.xml b/doc/common/tables/ironic-dhcp.xml deleted file mode 100644 index d3601597c2..0000000000 --- a/doc/common/tables/ironic-dhcp.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of DHCP configuration options
Configuration option = Default valueDescription
[dhcp]
= neutron(StrOpt) DHCP provider to use. "neutron" uses Neutron, and "none" uses a no-op provider.
-
diff --git a/doc/common/tables/ironic-disk_partitioner.xml b/doc/common/tables/ironic-disk_partitioner.xml deleted file mode 100644 index 060193868d..0000000000 --- a/doc/common/tables/ironic-disk_partitioner.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of disk partitioner configuration options
Configuration option = Default valueDescription
[disk_partitioner]
= 1(IntOpt) After Ironic has completed creating the partition table, it continues to check for activity on the attached iSCSI device status at this interval prior to copying the image to the node, in seconds
= 20(IntOpt) The maximum number of times to check that the device is not accessed by another process. If the device is still busy after that, the disk partitioning will be treated as having failed.
-
diff --git a/doc/common/tables/ironic-drac.xml b/doc/common/tables/ironic-drac.xml deleted file mode 100644 index d3c79cf49a..0000000000 --- a/doc/common/tables/ironic-drac.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of DRAC configuration options
Configuration option = Default valueDescription
[drac]
= 5(IntOpt) In case there is a communication failure, the DRAC client is going to resend the request as many times as defined in this setting.
= 5(IntOpt) In case there is a communication failure, the DRAC client is going to wait for as many seconds as defined in this setting before resending the request.
-
diff --git a/doc/common/tables/ironic-glance.xml b/doc/common/tables/ironic-glance.xml deleted file mode 100644 index fd60c364df..0000000000 --- a/doc/common/tables/ironic-glance.xml +++ /dev/null @@ -1,100 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of glance configuration options
Configuration option = Default valueDescription
[glance]
= (ListOpt) A list of URL schemes that can be downloaded directly via the direct_url. Currently supported schemes: [file].
= keystone(StrOpt) Authentication strategy to use when connecting to glance. Only "keystone" and "noauth" are currently supported by ironic.
= False(BoolOpt) Allow to perform insecure SSL (https) requests to glance.
= None(ListOpt) A list of the glance api servers available to ironic. Prefix with https:// for SSL-based glance API servers. Format is [hostname|IP]:port.
= $my_ip(StrOpt) Default glance hostname or IP address.
= 0(IntOpt) Number of retries when downloading an image from glance.
= 9292(IntOpt) Default glance port.
= http(StrOpt) Default protocol to use when connecting to glance. Set to https for SSL.
= None(StrOpt) The account that Glance uses to communicate with Swift. The format is "AUTH_uuid". "uuid" is the UUID for the account configured in the glance-api.conf. Required for temporary URLs. For example: "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary URL format: "endpoint_url/api_version/account/container/object_id"
= v1(StrOpt) The Swift API version to create a temporary URL for. Defaults to "v1". Swift temporary URL format: "endpoint_url/api_version/account/container/object_id"
= glance(StrOpt) The Swift container Glance is configured to store its images in. Defaults to "glance", which is the default in glance-api.conf. Swift temporary URL format: "endpoint_url/api_version/account/container/object_id"
= None(StrOpt) The "endpoint" (scheme, hostname, optional port) for the Swift URL of the form "endpoint_url/api_version/account/container/object_id". Do not include trailing "/". For example, use "https://swift.example.com". Required for temporary URLs.
= 0(IntOpt) This should match a config by the same name in the Glance configuration file. When set to 0, a single-tenant store will only use one container to store all images. When set to an integer value between 1 and 32, a single-tenant store will use multiple containers to store images, and this value will determine how many containers are created.
= 1200(IntOpt) The length of time in seconds that the temporary URL will be valid for. Defaults to 20 minutes. If some deploys get a 401 response code when trying to download from the temporary URL, try raising this duration.
= None(StrOpt) The secret token given to Swift to allow temporary URL downloads. Required for temporary URLs.
-
diff --git a/doc/common/tables/ironic-iboot.xml b/doc/common/tables/ironic-iboot.xml deleted file mode 100644 index 78f3f47609..0000000000 --- a/doc/common/tables/ironic-iboot.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of iBoot Web Power Switch configuration options
Configuration option = Default valueDescription
[iboot]
= 3(IntOpt) Maximum retries for iBoot operations
= 1(IntOpt) Time between retry attempts for iBoot operations
-
diff --git a/doc/common/tables/ironic-ilo.xml b/doc/common/tables/ironic-ilo.xml deleted file mode 100644 index d033d2c364..0000000000 --- a/doc/common/tables/ironic-ilo.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of iLO configuration options
Configuration option = Default valueDescription
[ilo]
= 0(IntOpt) Priority for clear_secure_boot_keys clean step. This step is not enabled by default. It can be enabled to to clear all secure boot keys enrolled with iLO.
= None(IntOpt) Priority for erase devices clean step. If unset, it defaults to 10. If set to 0, the step will be disabled and will not run during cleaning.
= 10(IntOpt) Priority for reset_bios_to_default clean step.
= 1(IntOpt) Priority for reset_ilo clean step.
= 30(IntOpt) Priority for reset_ilo_credential clean step. This step requires "ilo_change_password" parameter to be updated in nodes's driver_info with the new password.
= 20(IntOpt) Priority for reset_secure_boot_keys clean step. This step will reset the secure boot keys to manufacturing defaults.
= 443(IntOpt) Port to be used for iLO operations
= 60(IntOpt) Timeout (in seconds) for iLO operations
= 6(IntOpt) Number of times a power operation needs to be retried
= 2(IntOpt) Amount of time in seconds to wait in between power operations
= ironic_ilo_container(StrOpt) The Swift iLO container to store data.
= 900(IntOpt) Amount of time in seconds for Swift objects to auto-expire.
= False(BoolOpt) Set this to True to use http web server to host floppy images and generated boot ISO. This requires http_root and http_url to be configured in the [deploy] section of the config file. If this is set to False, then Ironic will use Swift to host the floppy images and generated boot_iso.
-
diff --git a/doc/common/tables/ironic-inspector.xml b/doc/common/tables/ironic-inspector.xml deleted file mode 100644 index bce11040c3..0000000000 --- a/doc/common/tables/ironic-inspector.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of inspector configuration options
Configuration option = Default valueDescription
[inspector]
= False(BoolOpt) whether to enable inspection using ironic-inspector
= None(StrOpt) ironic-inspector HTTP endpoint. If this is not set, the ironic-inspector client default (http://127.0.0.1:5050) will be used.
= 60(IntOpt) period (in seconds) to check status of nodes on inspection
-
diff --git a/doc/common/tables/ironic-ipmi.xml b/doc/common/tables/ironic-ipmi.xml deleted file mode 100644 index c6ab0b7fe8..0000000000 --- a/doc/common/tables/ironic-ipmi.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IPMI configuration options
Configuration option = Default valueDescription
[ipmi]
= 5(IntOpt) Minimum time, in seconds, between IPMI operations sent to a server. There is a risk with some hardware that setting this too low may cause the BMC to crash. Recommended setting is 5 seconds.
= 60(IntOpt) Maximum time in seconds to retry IPMI operations. There is a tradeoff when setting this value. Setting this too low may cause older BMCs to crash and require a hard reset. However, setting too high can cause the sync power state periodic task to hang when there are slow or unresponsive BMCs.
-
diff --git a/doc/common/tables/ironic-irmc.xml b/doc/common/tables/ironic-irmc.xml deleted file mode 100644 index fa4d2410d4..0000000000 --- a/doc/common/tables/ironic-irmc.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of iRMC configuration options
Configuration option = Default valueDescription
[irmc]
= basic(StrOpt) Authentication method to be used for iRMC operations, either "basic" or "digest"
= 60(IntOpt) Timeout (in seconds) for iRMC operations
= 443(IntOpt) Port to be used for iRMC operations, either 80 or 443
= None(StrOpt) IP of remote image server
= share(StrOpt) share name of remote_image_server
= /remote_image_share_root(StrOpt) Ironic conductor node's "NFS" or "CIFS" root path
= CIFS(StrOpt) Share type of virtual media, either "NFS" or "CIFS"
= (StrOpt) Domain name of remote_image_user_name
= None(StrOpt) User name of remote_image_server
= None(StrOpt) Password of remote_image_user_name
= ipmitool(StrOpt) Sensor data retrieval method, either "ipmitool" or "scci"
-
diff --git a/doc/common/tables/ironic-keystone.xml b/doc/common/tables/ironic-keystone.xml deleted file mode 100644 index e77e8618ff..0000000000 --- a/doc/common/tables/ironic-keystone.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of keystone configuration options
Configuration option = Default valueDescription
[keystone]
= None(StrOpt) The region used for getting endpoints of OpenStackservices.
-
diff --git a/doc/common/tables/ironic-logging.xml b/doc/common/tables/ironic-logging.xml deleted file mode 100644 index 3f4c001027..0000000000 --- a/doc/common/tables/ironic-logging.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Used if there is a formatting error when generating an exception message (a programming error). If True, raise an exception; if False, use the unformatted message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
-
diff --git a/doc/common/tables/ironic-neutron.xml b/doc/common/tables/ironic-neutron.xml deleted file mode 100644 index 600023d41f..0000000000 --- a/doc/common/tables/ironic-neutron.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of neutron configuration options
Configuration option = Default valueDescription
[neutron]
= keystone(StrOpt) Default authentication strategy to use when connecting to neutron. Can be either "keystone" or "noauth". Running neutron in noauth mode (related to but not affected by this setting) is insecure and should only be used for testing.
= None(StrOpt) UUID of the network to create Neutron ports on when booting to a ramdisk for cleaning/zapping using Neutron DHCP
= 3(IntOpt) Client retries in the case of a failed request.
= http://$my_ip:9696(StrOpt) URL for connecting to neutron.
= 30(IntOpt) Timeout value for connecting to neutron in seconds.
-
diff --git a/doc/common/tables/ironic-policy.xml b/doc/common/tables/ironic-policy.xml deleted file mode 100644 index c9837a6f09..0000000000 --- a/doc/common/tables/ironic-policy.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of policy configuration options
Configuration option = Default valueDescription
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
-
diff --git a/doc/common/tables/ironic-pxe.xml b/doc/common/tables/ironic-pxe.xml deleted file mode 100644 index 283ca1f286..0000000000 --- a/doc/common/tables/ironic-pxe.xml +++ /dev/null @@ -1,104 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of PXE configuration options
Configuration option = Default valueDescription
[pxe]
= ext4(StrOpt) Default file system format for ephemeral partition, if one is created.
= cciss/c0d0,sda,hda,vda(StrOpt) The disk devices to scan while doing the deploy.
= 20480(IntOpt) Maximum size (in MiB) of cache for master images, including those in use.
= 10080(IntOpt) Maximum TTL (in minutes) for old master images in cache.
= /var/lib/ironic/images/(StrOpt) On the ironic-conductor node, directory where images are stored on disk.
= /var/lib/ironic/master_images(StrOpt) On the ironic-conductor node, directory where master instance images are stored on disk.
= $pybasedir/drivers/modules/boot.ipxe(StrOpt) On ironic-conductor node, the path to the main iPXE script file.
= False(BoolOpt) Enable iPXE boot.
= nofb nomodeset vga=normal(StrOpt) Additional append parameters for baremetal PXE boot.
= pxelinux.0(StrOpt) Bootfile DHCP parameter.
= $pybasedir/drivers/modules/pxe_config.template(StrOpt) On ironic-conductor node, template file for PXE configuration.
= /tftpboot/master_images(StrOpt) On ironic-conductor node, directory where master TFTP images are stored on disk.
= /tftpboot(StrOpt) ironic-conductor node's TFTP root path.
= $my_ip(StrOpt) IP address of ironic-conductor node's TFTP server.
= elilo.efi(StrOpt) Bootfile DHCP parameter for UEFI boot mode.
= $pybasedir/drivers/modules/elilo_efi_pxe_config.template(StrOpt) On ironic-conductor node, template file for PXE configuration for UEFI boot loader.
-
diff --git a/doc/common/tables/ironic-qpid.xml b/doc/common/tables/ironic-qpid.xml deleted file mode 100644 index e70b460bba..0000000000 --- a/doc/common/tables/ironic-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/ironic-rabbitmq.xml b/doc/common/tables/ironic-rabbitmq.xml deleted file mode 100644 index aedd49a874..0000000000 --- a/doc/common/tables/ironic-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/ironic-redis.xml b/doc/common/tables/ironic-redis.xml deleted file mode 100644 index 14133740b1..0000000000 --- a/doc/common/tables/ironic-redis.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/ironic-rpc.xml b/doc/common/tables/ironic-rpc.xml deleted file mode 100644 index d2af77385d..0000000000 --- a/doc/common/tables/ironic-rpc.xml +++ /dev/null @@ -1,138 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
-
diff --git a/doc/common/tables/ironic-seamicro.xml b/doc/common/tables/ironic-seamicro.xml deleted file mode 100644 index 51d922c8c3..0000000000 --- a/doc/common/tables/ironic-seamicro.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SeaMicro configuration options
Configuration option = Default valueDescription
[seamicro]
= 10(IntOpt) Seconds to wait for power action to be completed
= 3(IntOpt) Maximum retries for SeaMicro operations
-
diff --git a/doc/common/tables/ironic-snmp.xml b/doc/common/tables/ironic-snmp.xml deleted file mode 100644 index 55caef6891..0000000000 --- a/doc/common/tables/ironic-snmp.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of SNMP configuration options
Configuration option = Default valueDescription
[snmp]
= 10(IntOpt) Seconds to wait for power action to be completed
-
diff --git a/doc/common/tables/ironic-ssh.xml b/doc/common/tables/ironic-ssh.xml deleted file mode 100644 index 3b7ed09776..0000000000 --- a/doc/common/tables/ironic-ssh.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of SSH configuration options
Configuration option = Default valueDescription
[ssh]
= qemu:///system(StrOpt) libvirt URI
-
diff --git a/doc/common/tables/ironic-swift.xml b/doc/common/tables/ironic-swift.xml deleted file mode 100644 index 74829e111f..0000000000 --- a/doc/common/tables/ironic-swift.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of swift configuration options
Configuration option = Default valueDescription
[swift]
= 2(IntOpt) Maximum number of times to retry a Swift request, before failing.
-
diff --git a/doc/common/tables/ironic-virtualbox.xml b/doc/common/tables/ironic-virtualbox.xml deleted file mode 100644 index 1aa897ee9e..0000000000 --- a/doc/common/tables/ironic-virtualbox.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of VirtualBox configuration options
Configuration option = Default valueDescription
[virtualbox]
= 18083(IntOpt) Port on which VirtualBox web service is listening.
-
diff --git a/doc/common/tables/ironic-zeromq.xml b/doc/common/tables/ironic-zeromq.xml deleted file mode 100644 index 04466dc0de..0000000000 --- a/doc/common/tables/ironic-zeromq.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/keystone-amqp.xml b/doc/common/tables/keystone-amqp.xml deleted file mode 100644 index 1e30568f13..0000000000 --- a/doc/common/tables/keystone-amqp.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= keystone(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= None(StrOpt) Default publisher_id for outgoing notifications
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= basic(StrOpt) Define the notification format for Identity Service events. A "basic" notification has information about the resource being operated on. A "cadf" notification has the same information, as well as information about the initiator of the event.
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/keystone-api.xml b/doc/common/tables/keystone-api.xml deleted file mode 100644 index 95af265494..0000000000 --- a/doc/common/tables/keystone-api.xml +++ /dev/null @@ -1,190 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) The base admin endpoint URL for Keystone that is advertised to clients (NOTE: this does NOT affect how Keystone listens for connections). Defaults to the base host URL of the request. E.g. a request to http://server:35357/v3/users will default to http://server:35357. You should only need to set this value if the base URL contains a path (e.g. /prefix/v3) or the endpoint should be found on a different server.
= ADMIN(StrOpt) A "shared secret" that can be used to bootstrap Keystone. This "token" does not represent a user, and carries no explicit authorization. To disable in production (highly recommended), remove AdminTokenAuthMiddleware from your paste application pipelines (for example, in keystone-paste.ini).
= True(BoolOpt) Set this to false if you want to enable the ability for user, group and project entities to be moved between domains by updating their domain_id. Allowing such movement is not recommended if the scope of a domain admin is being restricted by use of an appropriate policy file (see policy.v3cloudsample as an example).
= None(IntOpt) The maximum number of entities that will be returned in a collection, with no limit set by default. This global limit may be then overridden for a specific driver, by specifying a list_limit in the appropriate section (e.g. [assignment]).
= 64(IntOpt) Limit the sizes of user & project ID/names.
= 5(IntOpt) Maximum depth of the project hierarchy. WARNING: setting it to a large value may adversely impact performance.
= 8192(IntOpt) Similar to max_param_size, but provides an exception for token values.
= 9fe2ff9ee4384b1894a90878d3e92bab(StrOpt) Similar to the member_role_name option, this represents the default role ID used to associate users with their default projects in the v2 API. This will be used as the explicit role where one is not specified by the v2 API.
= _member_(StrOpt) This is the role name used in combination with the member_role_id option; see that option for more detail.
= None(StrOpt) The base public endpoint URL for Keystone that is advertised to clients (NOTE: this does NOT affect how Keystone listens for connections). Defaults to the base host URL of the request. E.g. a request to http://server:5000/v3/users will default to http://server:5000. You should only need to set this value if the base URL contains a path (e.g. /prefix/v3) or the endpoint should be found on a different server.
= None(StrOpt) The HTTP header used to determine the scheme for the original request, even if it was removed by an SSL terminating proxy. Typical value is "HTTP_X_FORWARDED_PROTO".
= False(BoolOpt) If set to true, strict password length checking is performed for password manipulation. If a password exceeds the maximum length, the operation will fail with an HTTP 403 Forbidden error. If set to false, passwords are automatically truncated to the maximum length.
[endpoint_filter]
= sql(StrOpt) Entrypoint for the endpoint filter backend driver in the keystone.endpoint_filter namespace.
= True(BoolOpt) Toggle to return all active endpoints if no filter exists.
[endpoint_policy]
= sql(StrOpt) Entrypoint for the endpoint policy backend driver in the keystone.endpoint_policy namespace.
= True(BoolOpt) Enable endpoint_policy functionality.
[eventlet_server]
= 0.0.0.0(StrOpt) The IP address of the network interface for the admin service to listen on.
= 35357(IntOpt) The port number which the admin service listens on.
= None(IntOpt) The number of worker processes to serve the admin eventlet application. Defaults to number of CPUs (minimum of 2).
= 900(IntOpt) Timeout for socket operations on a client connection. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever.
= 0.0.0.0(StrOpt) The IP address of the network interface for the public service to listen on.
= 5000(IntOpt) The port number which the public service listens on.
= None(IntOpt) The number of worker processes to serve the public eventlet application. Defaults to number of CPUs (minimum of 2).
= False(BoolOpt) Set this to true if you want to enable TCP_KEEPALIVE on server sockets, i.e. sockets used by the Keystone wsgi server for client connections.
= 600(IntOpt) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only applies if tcp_keepalive is true.
= True(BoolOpt) If set to false, disables keepalives on the server; all connections will be closed after serving one request.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[paste_deploy]
= keystone-paste.ini(StrOpt) Name of the paste configuration file that defines the available pipelines.
[resource]
= None(IntOpt) TTL (in seconds) to cache resource data. This has no effect unless global caching is enabled.
= True(BoolOpt) Toggle for resource caching. This has no effect unless global caching is enabled.
= None(StrOpt) Entrypoint for the resource backend driver in the keystone.resource namespace. Supplied drivers are ldap and sql. If a resource driver is not specified, the assignment driver will choose the resource driver.
= None(IntOpt) Maximum number of entities that will be returned in a resource collection.
-
diff --git a/doc/common/tables/keystone-assignment.xml b/doc/common/tables/keystone-assignment.xml deleted file mode 100644 index 90fd0eca3a..0000000000 --- a/doc/common/tables/keystone-assignment.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of assignment configuration options
Configuration option = Default valueDescription
[assignment]
= None(StrOpt) Entrypoint for the assignment backend driver in the keystone.assignment namespace. Supplied drivers are ldap and sql. If an assignment driver is not specified, the identity driver will choose the assignment driver.
-
diff --git a/doc/common/tables/keystone-auth.xml b/doc/common/tables/keystone-auth.xml deleted file mode 100644 index 6c6bad99ee..0000000000 --- a/doc/common/tables/keystone-auth.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization configuration options
Configuration option = Default valueDescription
[auth]
= None(StrOpt) Entrypoint for the external (REMOTE_USER) auth plugin module in the keystone.auth.external namespace. Supplied drivers are DefaultDomain and Domain. The default driver is DefaultDomain.
= external, password, token, oauth1(ListOpt) Allowed authentication methods.
= None(StrOpt) Entrypoint for the oAuth1.0 auth plugin module in the keystone.auth.oauth1 namespace.
= None(StrOpt) Entrypoint for the password auth plugin module in the keystone.auth.password namespace.
= None(StrOpt) Entrypoint for the token auth plugin module in the keystone.auth.token namespace.
-
diff --git a/doc/common/tables/keystone-auth_token.xml b/doc/common/tables/keystone-auth_token.xml deleted file mode 100644 index 815d80a243..0000000000 --- a/doc/common/tables/keystone-auth_token.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/keystone-ca.xml b/doc/common/tables/keystone-ca.xml deleted file mode 100644 index b91eaccc1b..0000000000 --- a/doc/common/tables/keystone-ca.xml +++ /dev/null @@ -1,110 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CA and SSL configuration options
Configuration option = Default valueDescription
[eventlet_server_ssl]
= /etc/keystone/ssl/certs/ca.pem(StrOpt) Path of the CA cert file for SSL.
= False(BoolOpt) Require client certificate.
= /etc/keystone/ssl/certs/keystone.pem(StrOpt) Path of the certfile for SSL. For non-production environments, you may be interested in using `keystone-manage ssl_setup` to generate self-signed certificates.
= False(BoolOpt) Toggle for SSL support on the Keystone eventlet servers.
= /etc/keystone/ssl/private/keystonekey.pem(StrOpt) Path of the keyfile for SSL.
[signing]
= /etc/keystone/ssl/certs/ca.pem(StrOpt) Path of the CA for token signing.
= /etc/keystone/ssl/private/cakey.pem(StrOpt) Path of the CA key for token signing.
= /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com(StrOpt) Certificate subject (auto generated certificate) for token signing.
= /etc/keystone/ssl/certs/signing_cert.pem(StrOpt) Path of the certfile for token signing. For non-production environments, you may be interested in using `keystone-manage pki_setup` to generate self-signed certificates.
= 2048(IntOpt) Key size (in bits) for token signing cert (auto generated certificate).
= /etc/keystone/ssl/private/signing_key.pem(StrOpt) Path of the keyfile for token signing.
= 3650(IntOpt) Days the token signing cert is valid for (auto generated certificate).
[ssl]
= /etc/keystone/ssl/private/cakey.pem(StrOpt) Path of the CA key file for SSL.
= /C=US/ST=Unset/L=Unset/O=Unset/CN=localhost(StrOpt) SSL certificate subject (auto generated certificate).
= 1024(IntOpt) SSL key length (in bits) (auto generated certificate).
= 3650(IntOpt) Days the certificate is valid for once signed (auto generated certificate).
-
diff --git a/doc/common/tables/keystone-cache.xml b/doc/common/tables/keystone-cache.xml deleted file mode 100644 index 24ef0ddd34..0000000000 --- a/doc/common/tables/keystone-cache.xml +++ /dev/null @@ -1,111 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of cache configuration options
Configuration option = Default valueDescription
[cache]
= keystone.common.cache.noop(StrOpt) Dogpile.cache backend module. It is recommended that Memcache with pooling (keystone.cache.memcache_pool) or Redis (dogpile.cache.redis) be used in production deployments. Small workloads (single process) like devstack can use the dogpile.cache.memory backend.
= [](MultiStrOpt) Arguments supplied to the backend module. Specify this option once per argument to be passed to the dogpile.cache backend. Example format: "<argname>:<value>".
= cache.keystone(StrOpt) Prefix for building the configuration dictionary for the cache region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name.
= False(BoolOpt) Extra debugging from the cache backend (cache keys, get/set/delete/etc calls). This is only really useful if you need to see the specific cache-backend get/set/delete calls with the keys/values. Typically this should be left set to false.
= False(BoolOpt) Global toggle for all caching using the should_cache_fn mechanism.
= 600(IntOpt) Default TTL, in seconds, for any cached item in the dogpile.cache region. This applies to any cached method that doesn't have an explicit cache expiration time defined for it.
= 300(IntOpt) Number of seconds memcached server is considered dead before it is tried again. (dogpile.cache.memcache and keystone.cache.memcache_pool backends only).
= 10(IntOpt) Number of seconds that an operation will wait to get a memcache client connection.
= 10(IntOpt) Max total number of open connections to every memcached server. (keystone.cache.memcache_pool backend only).
= 60(IntOpt) Number of seconds a connection to memcached is held unused in the pool before it is closed. (keystone.cache.memcache_pool backend only).
= localhost:11211(ListOpt) Memcache servers in the format of "host:port". (dogpile.cache.memcache and keystone.cache.memcache_pool backends only).
= 3(IntOpt) Timeout in seconds for every call to a server. (dogpile.cache.memcache and keystone.cache.memcache_pool backends only).
= (ListOpt) Proxy classes to import that will affect the way the dogpile.cache backend functions. See the dogpile.cache documentation on changing-backend-behavior.
[memcache]
= 300(IntOpt) Number of seconds memcached server is considered dead before it is tried again. This is used by the key value store system (e.g. token pooled memcached persistence backend).
= 10(IntOpt) Number of seconds that an operation will wait to get a memcache client connection. This is used by the key value store system (e.g. token pooled memcached persistence backend).
= 10(IntOpt) Max total number of open connections to every memcached server. This is used by the key value store system (e.g. token pooled memcached persistence backend).
= 60(IntOpt) Number of seconds a connection to memcached is held unused in the pool before it is closed. This is used by the key value store system (e.g. token pooled memcached persistence backend).
-
diff --git a/doc/common/tables/keystone-catalog.xml b/doc/common/tables/keystone-catalog.xml deleted file mode 100644 index 405663f734..0000000000 --- a/doc/common/tables/keystone-catalog.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of catalog configuration options
Configuration option = Default valueDescription
[catalog]
= None(IntOpt) Time to cache catalog data (in seconds). This has no effect unless global and catalog caching are enabled.
= True(BoolOpt) Toggle for catalog caching. This has no effect unless global caching is enabled.
= sql(StrOpt) Entrypoint for the catalog backend driver in the keystone.catalog namespace. Supplied drivers are kvs, sql, templated, and endpoint_filter.sql
= None(IntOpt) Maximum number of entities that will be returned in a catalog collection.
= default_catalog.templates(StrOpt) Catalog template file name for use with the template catalog backend.
-
diff --git a/doc/common/tables/keystone-common.xml b/doc/common/tables/keystone-common.xml deleted file mode 100644 index 9ba900943a..0000000000 --- a/doc/common/tables/keystone-common.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 64(IntOpt) Size of executor thread pool.
= None(ListOpt) Memcached servers or None for in process cache.
[keystone_authtoken]
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
-
diff --git a/doc/common/tables/keystone-cors.xml b/doc/common/tables/keystone-cors.xml deleted file mode 100644 index f303d35d46..0000000000 --- a/doc/common/tables/keystone-cors.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/keystone-credential.xml b/doc/common/tables/keystone-credential.xml deleted file mode 100644 index 5dafcc1e06..0000000000 --- a/doc/common/tables/keystone-credential.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of credential configuration options
Configuration option = Default valueDescription
[credential]
= sql(StrOpt) Entrypoint for the credential backend driver in the keystone.credential namespace.
-
diff --git a/doc/common/tables/keystone-database.xml b/doc/common/tables/keystone-database.xml deleted file mode 100644 index e7c56be5fe..0000000000 --- a/doc/common/tables/keystone-database.xml +++ /dev/null @@ -1,120 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/keystone-debug.xml b/doc/common/tables/keystone-debug.xml deleted file mode 100644 index 8ab04c896b..0000000000 --- a/doc/common/tables/keystone-debug.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Host to connect to for remote debugger.
= None(IntOpt) Port to connect to for remote debugger.
= False(BoolOpt) Do not monkey-patch threading system modules.
[audit]
= openstack(StrOpt) namespace prefix for generated id
-
diff --git a/doc/common/tables/keystone-domain.xml b/doc/common/tables/keystone-domain.xml deleted file mode 100644 index 08ca17ea20..0000000000 --- a/doc/common/tables/keystone-domain.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of domain configuration options
Configuration option = Default valueDescription
[domain_config]
= 300(IntOpt) TTL (in seconds) to cache domain config data. This has no effect unless domain config caching is enabled.
= True(BoolOpt) Toggle for domain config caching. This has no effect unless global caching is enabled.
= sql(StrOpt) Entrypoint for the domain config backend driver in the keystone.resource.domain_config namespace.
-
diff --git a/doc/common/tables/keystone-federation.xml b/doc/common/tables/keystone-federation.xml deleted file mode 100644 index c8daa13abf..0000000000 --- a/doc/common/tables/keystone-federation.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of federation configuration options
Configuration option = Default valueDescription
[federation]
= (StrOpt) Value to be used when filtering assertion parameters from the environment.
= sql(StrOpt) Entrypoint for the federation backend driver in the keystone.federation namespace.
= Federated(StrOpt) A domain name that is reserved to allow federated ephemeral users to have a domain concept. Note that an admin will not be able to create a domain with this name or update an existing domain to this name. You are not advised to change this value unless you really have to.
= None(StrOpt) Value to be used to obtain the entity ID of the Identity Provider from the environment (e.g. if using the mod_shib plugin this value is `Shib-Identity-Provider`).
= /etc/keystone/sso_callback_template.html(StrOpt) Location of Single Sign-On callback handler, will return a token to a trusted dashboard host.
= [](MultiStrOpt) A list of trusted dashboard hosts. Before accepting a Single Sign-On request to return a token, the origin host must be a member of the trusted_dashboard list. This configuration option may be repeated for multiple values. For example: trusted_dashboard=http://acme.com/auth/websso trusted_dashboard=http://beta.com/auth/websso
-
diff --git a/doc/common/tables/keystone-fernet_tokens.xml b/doc/common/tables/keystone-fernet_tokens.xml deleted file mode 100644 index 1484fddb60..0000000000 --- a/doc/common/tables/keystone-fernet_tokens.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Fernet tokens configuration options
Configuration option = Default valueDescription
[fernet_tokens]
= /etc/keystone/fernet-keys/(StrOpt) Directory containing Fernet token keys.
= 3(IntOpt) This controls how many keys are held in rotation by keystone-manage fernet_rotate before they are discarded. The default value of 3 means that keystone will maintain one staged key, one primary key, and one secondary key. Increasing this value means that additional secondary keys will be kept in the rotation.
-
diff --git a/doc/common/tables/keystone-identity.xml b/doc/common/tables/keystone-identity.xml deleted file mode 100644 index 3a7d6cb395..0000000000 --- a/doc/common/tables/keystone-identity.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of identity configuration options
Configuration option = Default valueDescription
[identity]
= 600(IntOpt) Time to cache identity data (in seconds). This has no effect unless global and identity caching are enabled.
= True(BoolOpt) Toggle for identity caching. This has no effect unless global caching is enabled.
= default(StrOpt) This references the domain to use for all Identity API v2 requests (which are not aware of domains). A domain with this ID will be created for you by keystone-manage db_sync in migration 008. The domain referenced by this ID cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. There is nothing special about this domain, other than the fact that it must exist to order to maintain support for your v2 clients.
= /etc/keystone/domains(StrOpt) Path for Keystone to locate the domain specific identity configuration files if domain_specific_drivers_enabled is set to true.
= False(BoolOpt) Extract the domain specific configuration options from the resource backend where they have been stored with the domain data. This feature is disabled by default (in which case the domain specific options will be loaded from files in the domain configuration directory); set to true to enable.
= False(BoolOpt) A subset (or all) of domains can have their own identity driver, each with their own partial configuration options, stored in either the resource backend or in a file in a domain configuration directory (depending on the setting of domain_configurations_from_database). Only values specific to the domain need to be specified in this manner. This feature is disabled by default; set to true to enable.
= sql(StrOpt) Entrypoint for the identity backend driver in the keystone.identity namespace. Supplied drivers are ldap and sql.
= None(IntOpt) Maximum number of entities that will be returned in an identity collection.
= 4096(IntOpt) Maximum supported length for user passwords; decrease to improve performance.
-
diff --git a/doc/common/tables/keystone-kvs.xml b/doc/common/tables/keystone-kvs.xml deleted file mode 100644 index 2452c404ab..0000000000 --- a/doc/common/tables/keystone-kvs.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of KVS configuration options
Configuration option = Default valueDescription
[kvs]
= (ListOpt) Extra dogpile.cache backend modules to register with the dogpile.cache library.
= keystone.kvs(StrOpt) Prefix for building the configuration dictionary for the KVS region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name.
= 5(IntOpt) Default lock timeout (in seconds) for distributed locking.
= True(BoolOpt) Toggle to disable using a key-mangling function to ensure fixed length keys. This is toggle-able for debugging purposes, it is highly recommended to always leave this set to true.
-
diff --git a/doc/common/tables/keystone-ldap.xml b/doc/common/tables/keystone-ldap.xml deleted file mode 100644 index bc570f72f4..0000000000 --- a/doc/common/tables/keystone-ldap.xml +++ /dev/null @@ -1,372 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of LDAP configuration options
Configuration option = Default valueDescription
[ldap]
= default(StrOpt) The LDAP dereferencing option for queries. The "default" option falls back to using default dereferencing configured by your ldap.conf.
= False(BoolOpt) Delete subtrees using the subtree delete control. Only enable this option if your LDAP server supports subtree deletion.
= 60(IntOpt) End user auth connection lifetime in seconds.
= 100(IntOpt) End user auth connection pool size.
= None(BoolOpt) Override the system's default referral chasing behavior for queries.
= None(IntOpt) Sets the LDAP debugging level for LDAP calls. A value of 0 means that debugging is not enabled. This value is a bitmask, consult your LDAP documentation for possible values.
= cn=dumb,dc=nonexistent(StrOpt) DN of the "dummy member" to use when "use_dumb_member" is enabled.
= (ListOpt) Additional attribute mappings for groups. Attribute mapping format is <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry and user_attr is the Identity API attribute.
= True(BoolOpt) Allow group creation in LDAP backend.
= True(BoolOpt) Allow group deletion in LDAP backend.
= True(BoolOpt) Allow group update in LDAP backend.
= (ListOpt) List of attributes stripped off the group on update.
= description(StrOpt) LDAP attribute mapped to group description.
= None(StrOpt) LDAP search filter for groups.
= cn(StrOpt) LDAP attribute mapped to group id.
= member(StrOpt) LDAP attribute mapped to show group membership.
= ou(StrOpt) LDAP attribute mapped to group name.
= groupOfNames(StrOpt) LDAP objectclass for groups.
= None(StrOpt) Search base for groups. Defaults to the suffix value.
= 0(IntOpt) Maximum results per page; a value of zero ("0") disables paging.
= None(StrOpt) Password for the BindDN to query the LDAP server.
= 600(IntOpt) Connection lifetime in seconds.
= -1(IntOpt) Connector timeout in seconds. Value -1 indicates indefinite wait for response.
= 0.1(FloatOpt) Time span in seconds to wait between two reconnect trials.
= 3(IntOpt) Maximum count of reconnect trials.
= 10(IntOpt) Connection pool size.
= (ListOpt) Additional attribute mappings for projects. Attribute mapping format is <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry and user_attr is the Identity API attribute.
= True(BoolOpt) Allow project creation in LDAP backend.
= True(BoolOpt) Allow project deletion in LDAP backend.
= True(BoolOpt) Allow project update in LDAP backend.
= (ListOpt) List of attributes stripped off the project on update.
= description(StrOpt) LDAP attribute mapped to project description.
= businessCategory(StrOpt) LDAP attribute mapped to project domain_id.
= enabled(StrOpt) LDAP attribute mapped to project enabled.
= False(BoolOpt) If true, Keystone uses an alternative method to determine if a project is enabled or not by checking if they are a member of the "project_enabled_emulation_dn" group.
= None(StrOpt) DN of the group entry to hold enabled projects when using enabled emulation.
= None(StrOpt) LDAP search filter for projects.
= cn(StrOpt) LDAP attribute mapped to project id.
= member(StrOpt) LDAP attribute mapped to project membership for user.
= ou(StrOpt) LDAP attribute mapped to project name.
= groupOfNames(StrOpt) LDAP objectclass for projects.
= None(StrOpt) Search base for projects. Defaults to the suffix value.
= one(StrOpt) The LDAP scope for queries, "one" represents oneLevel/singleLevel and "sub" represents subtree/wholeSubtree options.
= (ListOpt) Additional attribute mappings for roles. Attribute mapping format is <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry and user_attr is the Identity API attribute.
= True(BoolOpt) Allow role creation in LDAP backend.
= True(BoolOpt) Allow role deletion in LDAP backend.
= True(BoolOpt) Allow role update in LDAP backend.
= (ListOpt) List of attributes stripped off the role on update.
= None(StrOpt) LDAP search filter for roles.
= cn(StrOpt) LDAP attribute mapped to role id.
= roleOccupant(StrOpt) LDAP attribute mapped to role membership.
= ou(StrOpt) LDAP attribute mapped to role name.
= organizationalRole(StrOpt) LDAP objectclass for roles.
= None(StrOpt) Search base for roles. Defaults to the suffix value.
= cn=example,cn=com(StrOpt) LDAP server suffix
= None(StrOpt) CA certificate directory path for communicating with LDAP servers.
= None(StrOpt) CA certificate file path for communicating with LDAP servers.
= demand(StrOpt) Specifies what checks to perform on client certificates in an incoming TLS session.
= ldap://localhost(StrOpt) URL for connecting to the LDAP server.
= False(BoolOpt) Enable LDAP connection pooling for end user authentication. If use_pool is disabled, then this setting is meaningless and is not used at all.
= False(BoolOpt) If true, will add a dummy member to groups. This is required if the objectclass for groups requires the "member" attribute.
= False(BoolOpt) Enable LDAP connection pooling.
= False(BoolOpt) Enable TLS for communicating with LDAP servers.
= None(StrOpt) User BindDN to query the LDAP server.
= (ListOpt) List of additional LDAP attributes used for mapping additional attribute mappings for users. Attribute mapping format is <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry and user_attr is the Identity API attribute.
= True(BoolOpt) Allow user creation in LDAP backend.
= True(BoolOpt) Allow user deletion in LDAP backend.
= True(BoolOpt) Allow user updates in LDAP backend.
= default_project_id(ListOpt) List of attributes stripped off the user on update.
= None(StrOpt) LDAP attribute mapped to default_project_id for users.
= enabled(StrOpt) LDAP attribute mapped to user enabled flag.
= True(StrOpt) Default value to enable users. This should match an appropriate int value if the LDAP server uses non-boolean (bitmask) values to indicate if a user is enabled or disabled. If this is not set to "True" the typical value is "512". This is typically used when "user_enabled_attribute = userAccountControl".
= False(BoolOpt) If true, Keystone uses an alternative method to determine if a user is enabled or not by checking if they are a member of the "user_enabled_emulation_dn" group.
= None(StrOpt) DN of the group entry to hold enabled users when using enabled emulation.
= False(BoolOpt) Invert the meaning of the boolean enabled values. Some LDAP servers use a boolean lock attribute where "true" means an account is disabled. Setting "user_enabled_invert = true" will allow these lock attributes to be used. This setting will have no effect if "user_enabled_mask" or "user_enabled_emulation" settings are in use.
= 0(IntOpt) Bitmask integer to indicate the bit that the enabled value is stored in if the LDAP server represents "enabled" as a bit on an integer rather than a boolean. A value of "0" indicates the mask is not used. If this is not set to "0" the typical value is "2". This is typically used when "user_enabled_attribute = userAccountControl".
= None(StrOpt) LDAP search filter for users.
= cn(StrOpt) LDAP attribute mapped to user id. WARNING: must not be a multivalued attribute.
= mail(StrOpt) LDAP attribute mapped to user email.
= sn(StrOpt) LDAP attribute mapped to user name.
= inetOrgPerson(StrOpt) LDAP objectclass for users.
= userPassword(StrOpt) LDAP attribute mapped to password.
= None(StrOpt) Search base for users. Defaults to the suffix value.
-
diff --git a/doc/common/tables/keystone-logging.xml b/doc/common/tables/keystone-logging.xml deleted file mode 100644 index 9d902b579c..0000000000 --- a/doc/common/tables/keystone-logging.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
-
diff --git a/doc/common/tables/keystone-mapping.xml b/doc/common/tables/keystone-mapping.xml deleted file mode 100644 index 3897b1843a..0000000000 --- a/doc/common/tables/keystone-mapping.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of mapping configuration options
Configuration option = Default valueDescription
[identity_mapping]
= True(BoolOpt) The format of user and group IDs changed in Juno for backends that do not generate UUIDs (e.g. LDAP), with keystone providing a hash mapping to the underlying attribute in LDAP. By default this mapping is disabled, which ensures that existing IDs will not change. Even when the mapping is enabled by using domain specific drivers, any users and groups from the default domain being handled by LDAP will still not be mapped to ensure their IDs remain backward compatible. Setting this value to False will enable the mapping for even the default LDAP driver. It is only safe to do this if you do not already have assignments for users and groups from the default LDAP domain, and it is acceptable for Keystone to provide the different IDs to clients than it did previously. Typically this means that the only time you can set this value to False is when configuring a fresh installation.
= sql(StrOpt) Entrypoint for the identity mapping backend driver in the keystone.identity.id_mapping namespace.
= sha256(StrOpt) Entrypoint for the public ID generator for user and group entities in the keystone.identity.id_generator namespace. The Keystone identity mapper only supports generators that produce no more than 64 characters.
-
diff --git a/doc/common/tables/keystone-memcache.xml b/doc/common/tables/keystone-memcache.xml deleted file mode 100644 index c6bb38a8ae..0000000000 --- a/doc/common/tables/keystone-memcache.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of memcache configuration options
Configuration option = Default valueDescription
[memcache]
= localhost:11211(ListOpt) Memcache servers in the format of "host:port".
= 3(IntOpt) Timeout in seconds for every call to a server. This is used by the key value store system (e.g. token pooled memcached persistence backend).
-
diff --git a/doc/common/tables/keystone-oauth.xml b/doc/common/tables/keystone-oauth.xml deleted file mode 100644 index 42e54a67e2..0000000000 --- a/doc/common/tables/keystone-oauth.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of OAuth configuration options
Configuration option = Default valueDescription
[oauth1]
= 86400(IntOpt) Duration (in seconds) for the OAuth Access Token.
= sql(StrOpt) Entrypoint for hte OAuth backend driver in the keystone.oauth1 namespace.
= 28800(IntOpt) Duration (in seconds) for the OAuth Request Token.
-
diff --git a/doc/common/tables/keystone-os_inherit.xml b/doc/common/tables/keystone-os_inherit.xml deleted file mode 100644 index e189c7c3d7..0000000000 --- a/doc/common/tables/keystone-os_inherit.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of os_inherit configuration options
Configuration option = Default valueDescription
[os_inherit]
= False(BoolOpt) role-assignment inheritance to projects from owning domain or from projects higher in the hierarchy can be optionally enabled.
-
diff --git a/doc/common/tables/keystone-policy.xml b/doc/common/tables/keystone-policy.xml deleted file mode 100644 index 4cb8769d04..0000000000 --- a/doc/common/tables/keystone-policy.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of policy configuration options
Configuration option = Default valueDescription
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
[policy]
= sql(StrOpt) Entrypoint for the policy backend driver in the keystone.policy namespace. Supplied drivers are rules and sql.
= None(IntOpt) Maximum number of entities that will be returned in a policy collection.
-
diff --git a/doc/common/tables/keystone-qpid.xml b/doc/common/tables/keystone-qpid.xml deleted file mode 100644 index 8d34c97346..0000000000 --- a/doc/common/tables/keystone-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/keystone-rabbitmq.xml b/doc/common/tables/keystone-rabbitmq.xml deleted file mode 100644 index 83bff2530f..0000000000 --- a/doc/common/tables/keystone-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/keystone-redis.xml b/doc/common/tables/keystone-redis.xml deleted file mode 100644 index fa0a4fc4ae..0000000000 --- a/doc/common/tables/keystone-redis.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/keystone-revoke.xml b/doc/common/tables/keystone-revoke.xml deleted file mode 100644 index a43247d81b..0000000000 --- a/doc/common/tables/keystone-revoke.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of revoke configuration options
Configuration option = Default valueDescription
[revoke]
= 3600(IntOpt) Time to cache the revocation list and the revocation events (in seconds). This has no effect unless global and token caching are enabled.
= True(BoolOpt) Toggle for revocation event caching. This has no effect unless global caching is enabled.
= sql(StrOpt) Entrypoint for an implementation of the backend for persisting revocation events in the keystone.revoke namespace. Supplied drivers are kvs and sql.
= 1800(IntOpt) This value (calculated in seconds) is added to token expiration before a revocation event may be removed from the backend.
-
diff --git a/doc/common/tables/keystone-role.xml b/doc/common/tables/keystone-role.xml deleted file mode 100644 index 9e2833c1fb..0000000000 --- a/doc/common/tables/keystone-role.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of role configuration options
Configuration option = Default valueDescription
[role]
= None(IntOpt) TTL (in seconds) to cache role data. This has no effect unless global caching is enabled.
= True(BoolOpt) Toggle for role caching. This has no effect unless global caching is enabled.
= None(StrOpt) Entrypoint for the role backend driver in the keystone.role namespace. Supplied drivers are ldap and sql.
= None(IntOpt) Maximum number of entities that will be returned in a role collection.
-
diff --git a/doc/common/tables/keystone-rpc.xml b/doc/common/tables/keystone-rpc.xml deleted file mode 100644 index a909dce57e..0000000000 --- a/doc/common/tables/keystone-rpc.xml +++ /dev/null @@ -1,127 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
-
diff --git a/doc/common/tables/keystone-saml.xml b/doc/common/tables/keystone-saml.xml deleted file mode 100644 index 5626458010..0000000000 --- a/doc/common/tables/keystone-saml.xml +++ /dev/null @@ -1,112 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SAML configuration options
Configuration option = Default valueDescription
[saml]
= 3600(IntOpt) Default TTL, in seconds, for any generated SAML assertion created by Keystone.
= /etc/keystone/ssl/certs/signing_cert.pem(StrOpt) Path of the certfile for SAML signing. For non-production environments, you may be interested in using `keystone-manage pki_setup` to generate self-signed certificates. Note, the path cannot contain a comma.
= None(StrOpt) Company of contact person.
= None(StrOpt) Email address of contact person.
= None(StrOpt) Given name of contact person
= None(StrOpt) Surname of contact person.
= None(StrOpt) Telephone number of contact person.
= other(StrOpt) The contact type describing the main point of contact for the identity provider.
= None(StrOpt) Entity ID value for unique Identity Provider identification. Usually FQDN is set with a suffix. A value is required to generate IDP Metadata. For example: https://keystone.example.com/v3/OS-FEDERATION/saml2/idp
= en(StrOpt) Language used by the organization.
= /etc/keystone/saml2_idp_metadata.xml(StrOpt) Path to the Identity Provider Metadata file. This file should be generated with the keystone-manage saml_idp_metadata command.
= None(StrOpt) Organization name to be displayed.
= None(StrOpt) Organization name the installation belongs to.
= None(StrOpt) URL of the organization.
= None(StrOpt) Identity Provider Single-Sign-On service value, required in the Identity Provider's metadata. A value is required to generate IDP Metadata. For example: https://keystone.example.com/v3/OS-FEDERATION/saml2/sso
= /etc/keystone/ssl/private/signing_key.pem(StrOpt) Path of the keyfile for SAML signing. Note, the path cannot contain a comma.
= ss:mem:(StrOpt) The prefix to use for the RelayState SAML attribute, used when generating ECP wrapped assertions.
= xmlsec1(StrOpt) Binary to be called for XML signing. Install the appropriate package, specify absolute path or adjust your PATH environment variable if the binary cannot be found.
-
diff --git a/doc/common/tables/keystone-security.xml b/doc/common/tables/keystone-security.xml deleted file mode 100644 index 0e894d90ea..0000000000 --- a/doc/common/tables/keystone-security.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of security configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 10000(IntOpt) The value passed as the keyword "rounds" to passlib's encrypt method.
-
diff --git a/doc/common/tables/keystone-token.xml b/doc/common/tables/keystone-token.xml deleted file mode 100644 index ca94dbd281..0000000000 --- a/doc/common/tables/keystone-token.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of token configuration options
Configuration option = Default valueDescription
[token]
= True(BoolOpt) Allow rescoping of scoped token. Setting allow_rescoped_scoped_token to false prevents a user from exchanging a scoped token for any other token.
= (ListOpt) External auth mechanisms that should add bind information to token, e.g., kerberos,x509.
= None(IntOpt) Time to cache tokens (in seconds). This has no effect unless global and token caching are enabled.
= True(BoolOpt) Toggle for token system caching. This has no effect unless global caching is enabled.
= sql(StrOpt) Entrypoint for the token persistence backend driver in the keystone.token.persistence namespace. Supplied drivers are kvs, memcache, memcache_pool, and sql.
= permissive(StrOpt) Enforcement policy on tokens presented to Keystone with bind information. One of disabled, permissive, strict, required or a specifically required bind mode, e.g., kerberos or x509 to require binding to that authentication.
= 3600(IntOpt) Amount of time a token should remain valid (in seconds).
= md5(StrOpt) The hash algorithm to use for PKI tokens. This can be set to any algorithm that hashlib supports. WARNING: Before changing this value, the auth_token middleware must be configured with the hash_algorithms, otherwise token revocation will not be processed correctly.
= uuid(StrOpt) Controls the token construction, validation, and revocation operations. Entrypoint in the keystone.token.provider namespace. Core providers are [fernet|pkiz|pki|uuid].
= True(BoolOpt) Revoke token by token identifier. Setting revoke_by_id to true enables various forms of enumerating tokens, e.g. `list tokens for user`. These enumerations are processed to determine the list of tokens to revoke. Only disable if you are switching to using the Revoke extension with a backend other than KVS, which stores events in memory.
-
diff --git a/doc/common/tables/keystone-tokenless.xml b/doc/common/tables/keystone-tokenless.xml deleted file mode 100644 index dab6135ccf..0000000000 --- a/doc/common/tables/keystone-tokenless.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Tokenless Authorization configuration options
Configuration option = Default valueDescription
[tokenless_auth]
= SSL_CLIENT_I_DN(StrOpt) The issuer attribute that is served as an IdP ID for the X.509 tokenless authorization along with the protocol to look up its corresponding mapping. It is the environment variable in the WSGI environment that references to the issuer of the client certificate.
= x509(StrOpt) The protocol name for the X.509 tokenless authorization along with the option issuer_attribute below can look up its corresponding mapping.
= [](MultiStrOpt) The list of trusted issuers to further filter the certificates that are allowed to participate in the X.509 tokenless authorization. If the option is absent then no certificates will be allowed. The naming format for the attributes of a Distinguished Name(DN) must be separated by a comma and contain no spaces. This configuration option may be repeated for multiple values. For example: trusted_issuer=CN=john,OU=keystone,O=openstack trusted_issuer=CN=mary,OU=eng,O=abc
-
diff --git a/doc/common/tables/keystone-trust.xml b/doc/common/tables/keystone-trust.xml deleted file mode 100644 index 0d661a44fa..0000000000 --- a/doc/common/tables/keystone-trust.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of trust configuration options
Configuration option = Default valueDescription
[trust]
= False(BoolOpt) Enable redelegation feature.
= sql(StrOpt) Entrypoint for the trust backend driver in the keystone.trust namespace.
= True(BoolOpt) Delegation and impersonation features can be optionally disabled.
= 3(IntOpt) Maximum depth of trust redelegation.
-
diff --git a/doc/common/tables/keystone-zeromq.xml b/doc/common/tables/keystone-zeromq.xml deleted file mode 100644 index 010ba1d048..0000000000 --- a/doc/common/tables/keystone-zeromq.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/manila-amqp.xml b/doc/common/tables/manila-amqp.xml deleted file mode 100644 index 5eb1aa29e6..0000000000 --- a/doc/common/tables/manila-amqp.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use for notifications. If not set, we fall back to the same configuration used for RPC.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/manila-api.xml b/doc/common/tables/manila-api.xml deleted file mode 100644 index 753b04ec35..0000000000 --- a/doc/common/tables/manila-api.xml +++ /dev/null @@ -1,130 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= api-paste.ini(StrOpt) File name for the paste.deploy config for manila-api.
= True(BoolOpt) Whether to rate limit the API.
= False(BoolOpt) Deploy v1 of the Manila API. This option is deprecated, is not used, and will be removed in a future release.
= False(BoolOpt) Deploy v2 of the Manila API. This option is deprecated, is not used, and will be removed in a future release.
= 16384(IntOpt) Maximum line size of message headers to be accepted. Option max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
= 1000(IntOpt) The maximum number of items returned in a single response from a collection resource.
= None(StrOpt) Base URL to be presented to users in links to the Share API
= (ListOpt) Specify list of extensions to load when using osapi_share_extension option with manila.api.contrib.select_extensions.
= ['manila.api.contrib.standard_extensions'](MultiStrOpt) The osapi share extension to load.
= ::(StrOpt) IP address for OpenStack Share API to listen on.
= 8786(PortOpt) Port for OpenStack Share API to listen on.
= 1(IntOpt) Number of workers for OpenStack Share API service.
= manila.share.api.API(StrOpt) The full class name of the share API class to use.
= manila.volume.cinder.API(StrOpt) The full class name of the Volume API class to use.
= manila-share-%s(StrOpt) Volume name template.
= manila-snapshot-%s(StrOpt) Volume snapshot name template.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
-
diff --git a/doc/common/tables/manila-auth.xml b/doc/common/tables/manila-auth.xml deleted file mode 100644 index 93e62771b1..0000000000 --- a/doc/common/tables/manila-auth.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Authorization configuration options
Configuration option = Default valueDescription
[DEFAULT]
= keystone(StrOpt) The strategy to use for auth. Supports noauth, keystone, and deprecated.
-
diff --git a/doc/common/tables/manila-auth_token.xml b/doc/common/tables/manila-auth_token.xml deleted file mode 100644 index 4169eeb93d..0000000000 --- a/doc/common/tables/manila-auth_token.xml +++ /dev/null @@ -1,192 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Authorization Token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(Opt) Config Section from which to load plugin specific options
= None(Opt) Authentication type to load
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/manila-ca.xml b/doc/common/tables/manila-ca.xml deleted file mode 100644 index 7633487e40..0000000000 --- a/doc/common/tables/manila-ca.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Certificate Authority configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) CA certificate file to use to verify connecting clients.
= None(StrOpt) Certificate file to use when starting the server securely.
= None(StrOpt) Private key file to use when starting the server securely.
-
diff --git a/doc/common/tables/manila-common.xml b/doc/common/tables/manila-common.xml deleted file mode 100644 index 40fbe3fc6b..0000000000 --- a/doc/common/tables/manila-common.xml +++ /dev/null @@ -1,172 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 900(IntOpt) Timeout for client connections socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever.
= manila.compute.nova.API(StrOpt) The full class name of the Compute API class to use.
= True(BoolOpt) Services to be added to the available pool on create.
= localhost(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.
= 20.0(FloatOpt) Float representation of the over subscription ratio when thin provisioning is involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times the total physical capacity. If the ratio is 10.5, it means provisioned capacity can be 10.5 times the total physical capacity. A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. A ratio lower than 1.0 is invalid.
= None(ListOpt) Memcached servers or None for in process cache.
= False(BoolOpt) Whether to log monkey patching.
= (ListOpt) List of modules or decorators to monkey patch.
= 10.0.0.1(StrOpt) IP address of this host.
= 3(IntOpt) Number of times to attempt to run flakey shell commands.
= 60(IntOpt) Range of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0)
= 300.0(FloatOpt) Interval in seconds between execution of periodic hooks. Used when option 'enable_periodic_hooks' is set to True. Default is 300.
= 60(IntOpt) Seconds between running periodic tasks.
= 10(IntOpt) Seconds between nodes reporting state to datastore.
= 0(IntOpt) The percentage of backend capacity reserved.
= None(StrOpt) Path to the rootwrap configuration file to use for running commands as root.
= 60(IntOpt) Maximum time since last check-in for up service.
= $state_path/smb.conf(StrOpt) Path to smb config.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= 10(IntOpt) Maximum database connection retries during startup. (setting -1 implies an infinite retry count).
= 10(IntOpt) Interval between retries of opening a SQL connection.
= clean.sqlite(StrOpt) File name of clean sqlite database.
= manila.sqlite(StrOpt) The filename to use with sqlite.
= True(BoolOpt) If passed, use synchronous mode for sqlite.
= /var/lib/manila(StrOpt) Top-level directory for maintaining manila's state.
= nova(StrOpt) Availability zone of this node.
= True(BoolOpt) Sets the value of TCP_KEEPALIVE (True/False) for each server socket.
= None(IntOpt) Sets the value of TCP_KEEPCNT for each server socket. Not supported on OS X.
= None(IntOpt) Sets the value of TCP_KEEPINTVL in seconds for each server socket. Not supported on OS X.
= 600(IntOpt) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X.
= 0(IntOpt) Count of reservations until usage is refreshed.
= False(BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.
= True(BoolOpt) If False, closes the client socket connection explicitly. Setting it to True to maintain backward compatibility. Recommended setting is set it to False.
-
diff --git a/doc/common/tables/manila-compute.xml b/doc/common/tables/manila-compute.xml deleted file mode 100644 index c4e6f4efcf..0000000000 --- a/doc/common/tables/manila-compute.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Compute configuration options
Configuration option = Default valueDescription
[DEFAULT]
= http://localhost:5000/v2.0(StrOpt) Identity service URL.
= None(StrOpt) Nova admin password.
= service(StrOpt) Nova admin tenant name.
= nova(StrOpt) Nova admin username.
= False(BoolOpt) Allow to perform insecure SSL requests to nova.
= 2.10(StrOpt) Version of Nova API to be used.
= None(StrOpt) Location of CA certificates file to use for nova client requests.
= compute:nova:adminURL(StrOpt) Same as nova_catalog_info, but for admin endpoint.
= compute:nova:publicURL(StrOpt) Info to match when looking for nova in the service catalog. Format is separated values of the form: <service_type>:<service_name>:<endpoint_type>
= None(StrOpt) Region name of this node.
-
diff --git a/doc/common/tables/manila-cors.xml b/doc/common/tables/manila-cors.xml deleted file mode 100644 index b2567d2a22..0000000000 --- a/doc/common/tables/manila-cors.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(ListOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(ListOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/manila-database.xml b/doc/common/tables/manila-database.xml deleted file mode 100644 index 7c3a629b6c..0000000000 --- a/doc/common/tables/manila-database.xml +++ /dev/null @@ -1,131 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Database configuration options
Configuration option = Default valueDescription
[DEFAULT]
= sqlalchemy(StrOpt) The backend to use for database.
= manila.db(StrOpt) Driver to use for database access.
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/manila-emc.xml b/doc/common/tables/manila-emc.xml deleted file mode 100644 index da9144ea82..0000000000 --- a/doc/common/tables/manila-emc.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of EMC Share Drivers configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) User name for the EMC server.
= None(StrOpt) Password for the EMC server.
= None(StrOpt) EMC pool name.
= None(StrOpt) The root directory where shares will be located.
= None(StrOpt) EMC server hostname or IP address.
= server_2(StrOpt) Container of share servers.
= 8080(PortOpt) Port number for the EMC server.
= True(BoolOpt) Use secure connection to server.
= None(StrOpt) Share backend.
-
diff --git a/doc/common/tables/manila-ganesha.xml b/doc/common/tables/manila-ganesha.xml deleted file mode 100644 index 9e7a2bfa8e..0000000000 --- a/doc/common/tables/manila-ganesha.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Ganesha configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /etc/ganesha(StrOpt) Directory where Ganesha config files are stored.
= $ganesha_config_dir/ganesha.conf(StrOpt) Path to main Ganesha config file.
= $state_path/manila-ganesha.db(StrOpt) Location of Ganesha database file. (Ganesha module only.)
= $ganesha_config_dir/export.d(StrOpt) Path to directory containing Ganesha export configuration. (Ganesha module only.)
= /etc/manila/ganesha-export-templ.d(StrOpt) Path to directory containing Ganesha export block templates. (Ganesha module only.)
= maxread = 65536, prefread = 65536(StrOpt) Options to use when exporting a share using ganesha NFS server. Note that these defaults can be overridden when a share is created by passing metadata with key name export_options. Also note the complete set of default ganesha export options is specified in ganesha_utils. (GPFS only.)
= ganesha.nfsd(StrOpt) Name of the ganesha nfs service.
-
diff --git a/doc/common/tables/manila-generic.xml b/doc/common/tables/manila-generic.xml deleted file mode 100644 index 7d25cf9d25..0000000000 --- a/doc/common/tables/manila-generic.xml +++ /dev/null @@ -1,200 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Generic Share Driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= http://localhost:5000/v2.0(StrOpt) Identity service URL.
= None(StrOpt) Cinder admin password.
= service(StrOpt) Cinder admin tenant name.
= cinder(StrOpt) Cinder admin username.
= False(BoolOpt) Allow to perform insecure SSL requests to cinder.
= None(StrOpt) Location of CA certificates file to use for cinder client requests.
= volume:cinder:publicURL(StrOpt) Info to match when looking for cinder in the service catalog. Format is separated values of the form: <service_type>:<service_name>:<endpoint_type>
= True(BoolOpt) Allow attaching between instances and volumes in different availability zones.
= 3(IntOpt) Number of cinderclient retries on failed HTTP calls.
= None(StrOpt) Name or id of cinder volume type which will be used for all volumes created by driver.
= False(BoolOpt) Attach share server directly to share network. Used only with Neutron.
= None(BoolOpt) There are two possible approaches for share drivers in Manila. First is when share driver is able to handle share-servers and second when not. Drivers can support either both or only one of these approaches. So, set this opt to True if share driver is able to handle share servers and it is desired mode else set False. It is set to None by default to make this choice intentional.
= manila.network.linux.interface.OVSInterfaceDriver(StrOpt) Vif driver. Used only with Neutron.
= manila-service(StrOpt) Keypair name that will be created and used for service instances.
= 120(IntOpt) Maximum time to wait for attaching cinder volume.
= 300(IntOpt) Maximum time in seconds to wait for creating service instance.
= 180(IntOpt) Maximum time to wait for creating cinder volume.
= 180(IntOpt) Maximum time to wait for extending cinder volume.
= br-int(StrOpt) Name of Open vSwitch bridge to use.
= ~/.ssh/id_rsa(StrOpt) Path to host's private key.
= ~/.ssh/id_rsa.pub(StrOpt) Path to hosts public key.
= manila-service-image(StrOpt) Name of image in Glance, that will be used for service instance creation.
= 100(IntOpt) ID of flavor, that will be used for service instance creation.
= None(StrOpt) Name or ID of service instance in Nova to use for share exports. Used only when share servers handling is disabled.
= manila_service_instance_%s(StrOpt) Name of service instance.
= neutron(StrOpt) Allowed values are ['nova', 'neutron'].
= None(StrOpt) Password for service instance user.
= manila-service(StrOpt) Security group name, that will be used for service instance creation.
= $share_mount_path/smb.conf(StrOpt) Path to SMB config in service instance.
= None(StrOpt) User in service instance that will be used for authentication.
= None(StrOpt) Can be either name of network that is used by service instance within Nova to get IP address or IP address itself for managing shares there. Used only when share servers handling is disabled.
= 10.254.0.0/16(StrOpt) CIDR of manila service network. Used only with Neutron.
= 28(IntOpt) This mask is used for dividing service network into subnets, IP capacity of subnet with this mask directly defines possible amount of created service VMs per tenant's subnet. Used only with Neutron.
= manila_service_network(StrOpt) Name of manila service network. Used only with Neutron.
= CIFS=manila.share.drivers.generic.CIFSHelper, NFS=manila.share.drivers.generic.NFSHelper(ListOpt) Specify list of share export helpers.
= /shares(StrOpt) Parent path in service instance where shares will be mounted.
= ext4(StrOpt) Filesystem type of the share volume.
= None(StrOpt) Can be either name of network that is used by service instance within Nova to get IP address or IP address itself for exporting shares. Used only when share servers handling is disabled.
= manila-share-%s(StrOpt) Volume name template.
= manila-snapshot-%s(StrOpt) Volume snapshot name template.
-
diff --git a/doc/common/tables/manila-glusterfs.xml b/doc/common/tables/manila-glusterfs.xml deleted file mode 100644 index 80d628423d..0000000000 --- a/doc/common/tables/manila-glusterfs.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of GlusterFS Share Drivers configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Remote Ganesha server node's IP address.
= None(StrOpt) Remote Ganesha server node's login password. This is not required if 'glusterfs_path_to_private_key' is configured.
= root(StrOpt) Remote Ganesha server node's username.
= $state_path/mnt(StrOpt) Base directory containing mount points for Gluster volumes.
= Gluster(StrOpt) Type of NFS server that mediate access to the Gluster volumes (Gluster or Ganesha).
= None(StrOpt) Path of Manila host's private SSH key file.
= None(StrOpt) Remote GlusterFS server node's login password. This is not required if 'glusterfs_path_to_private_key' is configured.
= (ListOpt) List of GlusterFS servers that can be used to create shares. Each GlusterFS server should be of the form [remoteuser@]<volserver>, and they are assumed to belong to distinct Gluster clusters.
= None(StrOpt) Specifies GlusterFS share layout, that is, the method of associating backing GlusterFS resources to shares.
= None(StrOpt) Specifies the GlusterFS volume to be mounted on the Manila host. It is of the form [remoteuser@]<volserver>:<volid>.
= None(StrOpt) Regular expression template used to filter GlusterFS volumes for share creation. The regex template can optionally (ie. with support of the GlusterFS backend) contain the #{size} parameter which matches an integer (sequence of digits) in which case the value shall be interpreted as size of the volume in GB. Examples: "manila-share-volume-\d+$", "manila-share-volume-#{size}G-\d+$"; with matching volume names, respectively: "manila-share-volume-12", "manila-share-volume-3G-13". In latter example, the number that matches "#{size}", that is, 3, is an indication that the size of volume is 3G.
-
diff --git a/doc/common/tables/manila-gpfs.xml b/doc/common/tables/manila-gpfs.xml deleted file mode 100644 index 32dbec1423..0000000000 --- a/doc/common/tables/manila-gpfs.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IBM GPFS Share Driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= $state_path/mnt(StrOpt) Base folder where exported shares are located.
= None(ListOpt) A list of the fully qualified NFS server names that make up the OpenStack Manila configuration.
= KNFS(StrOpt) NFS Server type. Valid choices are "KNFS" (kernel NFS) or "GNFS" (Ganesha NFS).
= None(StrOpt) IP to be added to GPFS export string.
= KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper, GNFS=manila.share.drivers.ibm.gpfs.GNFSHelper(ListOpt) Specify list of share export helpers.
= None(StrOpt) GPFS server SSH login name.
= None(StrOpt) GPFS server SSH login password. The password is not needed, if 'gpfs_ssh_private_key' is configured.
= 22(PortOpt) GPFS server SSH port.
= None(StrOpt) Path to GPFS server SSH private key for login.
= rw,sync,no_root_squash,insecure,no_wdelay,no_subtree_check(StrOpt) Options to use when exporting a share using kernel NFS server. Note that these defaults can be overridden when a share is created by passing metadata with key name export_options.
-
diff --git a/doc/common/tables/manila-hdfs.xml b/doc/common/tables/manila-hdfs.xml deleted file mode 100644 index 5d19e0cf30..0000000000 --- a/doc/common/tables/manila-hdfs.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HDFS Share Driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) The IP of the HDFS namenode.
= 9000(PortOpt) The port of HDFS namenode service.
= None(StrOpt) HDFS namenode ssh login name.
= 22(PortOpt) HDFS namenode SSH port.
= None(StrOpt) Path to HDFS namenode SSH private key for login.
= None(StrOpt) HDFS namenode SSH login password, This parameter is not necessary, if 'hdfs_ssh_private_key' is configured.
-
diff --git a/doc/common/tables/manila-hds_hnas.xml b/doc/common/tables/manila-hds_hnas.xml deleted file mode 100644 index d189b42854..0000000000 --- a/doc/common/tables/manila-hds_hnas.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HDS NAS Share Driver configuration options
Configuration option = Default valueDescription
[hnas1]
= None(StrOpt) The IP of the clusters admin node. Only set in HNAS multinode clusters.
= None(StrOpt) Specify which EVS this backend is assigned to.
= None(StrOpt) Specify IP for mounting shares.
= None(StrOpt) Specify file-system name for creating shares.
= None(StrOpt) HNAS management interface IP for communication between Manila controller and HNAS.
= None(StrOpt) HNAS user password. Required only if private key is not provided.
= None(StrOpt) RSA/DSA private key value used to connect into HNAS. Required only if password is not provided.
= 30(IntOpt) The time (in seconds) to wait for stalled HNAS jobs before aborting.
= None(StrOpt) HNAS username Base64 String in order to perform tasks such as create file-systems and network interfaces.
= None(StrOpt) The backend name for a given driver implementation.
= manila.share.drivers.generic.GenericShareDriver(StrOpt) Driver to use for share creation.
-
diff --git a/doc/common/tables/manila-hpe3par.xml b/doc/common/tables/manila-hpe3par.xml deleted file mode 100644 index ef081faa5d..0000000000 --- a/doc/common/tables/manila-hpe3par.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HPE 3PAR Share Driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1
= False(BoolOpt) Enable HTTP debugging to 3PAR
= OpenStack(StrOpt) The File Provisioning Group (FPG) to use
= False(BoolOpt) Use one filestore per share
= (StrOpt) 3PAR password for the user specified in hpe3par_username
= (StrOpt) IP address of SAN controller
= (StrOpt) Username for SAN controller
= (StrOpt) Password for SAN controller
= 22(PortOpt) SSH port to use with SAN
= (StrOpt) The IP address for shares not using a share server
= (StrOpt) 3PAR username with the 'edit' role
-
diff --git a/doc/common/tables/manila-huawei.xml b/doc/common/tables/manila-huawei.xml deleted file mode 100644 index ba1697f225..0000000000 --- a/doc/common/tables/manila-huawei.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Huawei Share Driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /etc/manila/manila_huawei_conf.xml(StrOpt) The configuration file for the Manila Huawei driver.
-
diff --git a/doc/common/tables/manila-logging.xml b/doc/common/tables/manila-logging.xml deleted file mode 100644 index 9cc2094b2f..0000000000 --- a/doc/common/tables/manila-logging.xml +++ /dev/null @@ -1,132 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= False(BoolOpt) Whether to make exception message format errors fatal.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s(StrOpt) Format string for user_identity field of the logging_context_format_string
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
-
diff --git a/doc/common/tables/manila-netapp.xml b/doc/common/tables/manila-netapp.xml deleted file mode 100644 index 79c1438b3f..0000000000 --- a/doc/common/tables/manila-netapp.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NetApp Share Drivers configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (.*)(StrOpt) Pattern for searching available aggregates for provisioning.
= os_%(net_allocation_id)s(StrOpt) Logical interface (LIF) name template
= None(StrOpt) Administrative user account name used to access the storage system.
= None(StrOpt) Password for the administrative user account specified in the netapp_login option.
= (.*)(StrOpt) Pattern for overriding the selection of network ports on which to create Vserver LIFs.
= root(StrOpt) Root volume name.
= None(StrOpt) Name of aggregate to create Vserver root volumes on. This option only applies when the option driver_handles_share_servers is set to True.
= None(StrOpt) The hostname (or IP address) for the storage system.
= None(PortOpt) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS.
= ontap_cluster(StrOpt) The storage family type used on the storage system; valid values include ontap_cluster for using clustered Data ONTAP.
= None(StrOpt) Comma-separated list of options that control which trace info is written to the debug logs. Values include method and api.
= http(StrOpt) The transport protocol used when communicating with the storage system or proxy server. Valid values are http or https.
= share_%(share_id)s(StrOpt) NetApp volume name template.
= os_%s(StrOpt) Name template to use for new Vserver.
-
diff --git a/doc/common/tables/manila-qpid.xml b/doc/common/tables/manila-qpid.xml deleted file mode 100644 index e6d99890a1..0000000000 --- a/doc/common/tables/manila-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of QPID configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/manila-quobyte.xml b/doc/common/tables/manila-quobyte.xml deleted file mode 100644 index 3bba63fcd7..0000000000 --- a/doc/common/tables/manila-quobyte.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Quobyte Share Driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) The X.509 CA file to verify the server cert.
= quobyte(StrOpt) Password for Quobyte API server
= None(StrOpt) URL of the Quobyte API server (http or https)
= admin(StrOpt) Username for Quobyte API server.
= root(StrOpt) Default owning group for new volumes.
= root(StrOpt) Default owning user for new volumes.
= False(BoolOpt) Actually deletes shares (vs. unexport)
= BASE(StrOpt) Name of volume configuration used for new shares.
-
diff --git a/doc/common/tables/manila-quota.xml b/doc/common/tables/manila-quota.xml deleted file mode 100644 index 1e1a7dcf39..0000000000 --- a/doc/common/tables/manila-quota.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Quota configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 0(IntOpt) Number of seconds between subsequent usage refreshes.
= 10000(IntOpt) Maximum number of volume gigabytes to allow per host.
= manila.quota.DbQuotaDriver(StrOpt) Default driver to use for quota checks.
= 1000(IntOpt) Number of share gigabytes allowed per project.
= 10(IntOpt) Number of share-networks allowed per project.
= 50(IntOpt) Number of shares allowed per project.
= 1000(IntOpt) Number of snapshot gigabytes allowed per project.
= 50(IntOpt) Number of share snapshots allowed per project.
= 86400(IntOpt) Number of seconds until a reservation expires.
-
diff --git a/doc/common/tables/manila-rabbitmq.xml b/doc/common/tables/manila-rabbitmq.xml deleted file mode 100644 index c1920e185d..0000000000 --- a/doc/common/tables/manila-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/manila-redis.xml b/doc/common/tables/manila-redis.xml deleted file mode 100644 index 3a5e9b47c6..0000000000 --- a/doc/common/tables/manila-redis.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/manila-rpc.xml b/doc/common/tables/manila-rpc.xml deleted file mode 100644 index 69452a8d32..0000000000 --- a/doc/common/tables/manila-rpc.xml +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
= manila-share(StrOpt) The topic share nodes listen on.
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
-
diff --git a/doc/common/tables/manila-san.xml b/doc/common/tables/manila-san.xml deleted file mode 100644 index 61e4424430..0000000000 --- a/doc/common/tables/manila-san.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SAN configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 60(IntOpt) Backend server SSH connection timeout.
= 10(IntOpt) Maximum number of connections in the SSH pool.
= 1(IntOpt) Minimum number of connections in the SSH pool.
-
diff --git a/doc/common/tables/manila-scheduler.xml b/doc/common/tables/manila-scheduler.xml deleted file mode 100644 index f1180f881e..0000000000 --- a/doc/common/tables/manila-scheduler.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Scheduler configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 1.0(FloatOpt) Multiplier used for weighing share capacity. Negative numbers mean to stack vs spread.
= 1.0(FloatOpt) Multiplier used for weighing pools which have existing share servers. Negative numbers mean to spread vs stack.
= AvailabilityZoneFilter, CapacityFilter, CapabilitiesFilter, ConsistencyGroupFilter(ListOpt) Which filter class names to use for filtering hosts when not specified in the request.
= CapacityWeigher(ListOpt) Which weigher class names to use for weighing hosts.
= manila.scheduler.drivers.filter.FilterScheduler(StrOpt) Default scheduler driver to use.
= manila.scheduler.host_manager.HostManager(StrOpt) The scheduler host manager class to use.
= (StrOpt) Absolute path to scheduler configuration JSON file.
= manila.scheduler.manager.SchedulerManager(StrOpt) Full class name for the scheduler manager.
= 3(IntOpt) Maximum number of attempts to schedule a share.
= manila-scheduler(StrOpt) The topic scheduler nodes listen on.
-
diff --git a/doc/common/tables/manila-share.xml b/doc/common/tables/manila-share.xml deleted file mode 100644 index 9fbb6fe1d0..0000000000 --- a/doc/common/tables/manila-share.xml +++ /dev/null @@ -1,164 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Share configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) If set to True, then Manila will delete all share servers which were unused more than specified time .If set to False - automatic deletion of share servers will be disabled.
= 4096(IntOpt) Number of backlog requests to configure the socket with.
= None(StrOpt) Default share type to use.
= False(BoolOpt) Whether share servers will be deleted on deletion of the last share.
= None(BoolOpt) There are two possible approaches for share drivers in Manila. First is when share driver is able to handle share-servers and second when not. Drivers can support either both or only one of these approaches. So, set this opt to True if share driver is able to handle share servers and it is desired mode else set False. It is set to None by default to make this choice intentional.
= False(BoolOpt) Whether to enable periodic hooks or not.
= False(BoolOpt) Whether to enable post hooks or not.
= False(BoolOpt) Whether to enable pre hooks or not.
= None(ListOpt) A list of share backend names to use. These backend names should be backed by a unique [CONFIG] group with its options.
= NFS, CIFS(ListOpt) Specify list of protocols to be allowed for share creation. Available values are '('NFS', 'CIFS', 'GLUSTERFS', 'HDFS')'
= 64(IntOpt) Size of executor thread pool.
= (ListOpt) Driver(s) to perform some additional actions before and after share driver actions and on a periodic basis. Default is [].
= 300(IntOpt) Timeout for creating and deleting share instances when performing share migration (seconds).
= None(StrOpt) The IP of the node responsible for copying data during migration, such as the data copy service node, reachable by the backend.
= lost+found(ListOpt) List of files and folders to be ignored when migrating shares. Items should be names (not including any path).
= None(StrOpt) Backend IP in admin network to use for mounting shares during migration.
= None(StrOpt) The command for mounting shares for this backend. Must specifythe executable and all necessary parameters for the protocol supported. It is advisable to separate protocols per backend.
= True(BoolOpt) Specify whether read only access mode is supported in thisbackend.
= /tmp/(StrOpt) Temporary path to create and mount shares during migration.
= 90(IntOpt) Time to wait for access rules to be allowed/denied on backends when migrating shares using generic approach (seconds).
= None(StrOpt) Name of the configuration group in the Manila conf file to look for network config options.If not set, the share backend's config group will be used.If an option is not found within provided group, then'DEFAULT' group will be used for search of option.
= sudo(StrOpt) Deprecated: command to use for running commands as root.
= manila.share.manager.ShareManager(StrOpt) Full class name for the share manager.
= share-%s(StrOpt) Template string to be used to generate share names.
= share-snapshot-%s(StrOpt) Template string to be used to generate share snapshot names.
= month(StrOpt) Time period to generate share usages for. Time period must be hour, day, month or year.
= False(BoolOpt) Whether to suppress post hook errors (allow driver's results to pass through) or not.
= False(BoolOpt) Whether to suppress pre hook errors (allow driver perform actions) or not.
= False(BoolOpt) If set to True, then manila will deny access and remove all access rules on share unmanage.If set to False - nothing will be changed.
= 10(IntOpt) Unallocated share servers reclamation time interval (minutes). Minimum value is 10 minutes, maximum is 60 minutes. The reclamation function is run every 10 minutes and delete share servers which were unused more than unused_share_server_cleanup_interval option defines. This value reflects the shortest time Manila will wait for a share server to go unutilized before deleting it.
= False(BoolOpt) If set to False, then share creation from snapshot will be performed on the same host. If set to True, then scheduling step will be used.
-
diff --git a/doc/common/tables/manila-winrm.xml b/doc/common/tables/manila-winrm.xml deleted file mode 100644 index fab10810c1..0000000000 --- a/doc/common/tables/manila-winrm.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of WinRM configuration options
Configuration option = Default valueDescription
[DEFAULT]
= ~/.ssl/key.pem(StrOpt) Path to the x509 certificate key.
= ~/.ssl/cert.pem(StrOpt) Path to the x509 certificate used for accessing the serviceinstance.
= 60(IntOpt) WinRM connection timeout.
= 60(IntOpt) WinRM operation timeout.
= 3(IntOpt) WinRM retry count.
= 5(IntOpt) WinRM retry interval in seconds
= False(BoolOpt) Use x509 certificates in order to authenticate to theservice instance.
-
diff --git a/doc/common/tables/manila-zeromq.xml b/doc/common/tables/manila-zeromq.xml deleted file mode 100644 index 1f3c0eedcc..0000000000 --- a/doc/common/tables/manila-zeromq.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= 100(IntOpt) Number of retries to find free port number before fail with ZMQBindError.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= 65536(IntOpt) Maximal port number for random ports range.
= 49152(IntOpt) Minimal port number for random ports range.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/manila-zfssa.xml b/doc/common/tables/manila-zfssa.xml deleted file mode 100644 index de94891b92..0000000000 --- a/doc/common/tables/manila-zfssa.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZFSSA Share Driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) ZFSSA management authorized userpassword.
= None(StrOpt) ZFSSA management authorized username.
= None(StrOpt) IP address for data.
= None(StrOpt) ZFSSA management IP address.
= fletcher4(StrOpt) Controls checksum used for data blocks.
= off(StrOpt) Data compression-off, lzjb, gzip-2, gzip, gzip-9.
= latency(StrOpt) Controls behavior when servicing synchronous writes.
= (StrOpt) Location of project in ZFS/SA.
= true(StrOpt) Controls whether a share quota includes snapshot.
= true(StrOpt) Controls whether file ownership can be changed.
= false(StrOpt) Controls whether the share is scanned for viruses.
= None(StrOpt) ZFSSA storage pool name.
= None(StrOpt) ZFSSA project name.
= None(StrOpt) REST connection timeout (in seconds).
-
diff --git a/doc/common/tables/manual/cinder-netapp_cdot_extraspecs.xml b/doc/common/tables/manual/cinder-netapp_cdot_extraspecs.xml deleted file mode 100644 index 40fc43d341..0000000000 --- a/doc/common/tables/manual/cinder-netapp_cdot_extraspecs.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of extra specs options for NetApp Unified Driver with Clustered Data ONTAP
Extra specTypeDescription
netapp_raid_typeStringLimit the candidate volume list based on one of the following raid types: raid4, raid_dp.
netapp_disk_typeStringLimit the candidate volume list based on one of the following disk types: ATA, BSAS, EATA, FCAL, FSAS, LUN, MSATA, SAS, SATA, SCSI, XATA, XSAS, or SSD.
netapp:qos_policy_groupPlease note that this extra spec has a colon (:) in its name because it is used by the driver to assign the QoS policy group to the OpenStack Block Storage volume after it has been provisioned.StringSpecify the name of a QoS policy group, which defines measurable Service Level Objectives, that should be applied to the OpenStack Block Storage volume at the time of volume creation. Ensure that the QoS policy group object within Data ONTAP should be defined before an OpenStack Block Storage volume is created, and that the QoS policy group is not associated with the destination FlexVol volume.
netapp_mirroredBooleanLimit the candidate volume list to only the ones that are mirrored on the storage controller.
netapp_unmirroredIn the Juno release, these negative-assertion extra specs are formally deprecated by the NetApp unified driver. Instead of using the deprecated negative-assertion extra specs (for example, ) with a value of true, use the corresponding positive-assertion extra spec (for example, ) with a value of false.BooleanLimit the candidate volume list to only the ones that are not mirrored on the storage controller.
netapp_dedupBooleanLimit the candidate volume list to only the ones that have deduplication enabled on the storage controller.
netapp_nodedupBooleanLimit the candidate volume list to only the ones that have deduplication disabled on the storage controller.
netapp_compressionBooleanLimit the candidate volume list to only the ones that have compression enabled on the storage controller.
netapp_nocompressionBooleanLimit the candidate volume list to only the ones that have compression disabled on the storage controller.
netapp_thin_provisionedBooleanLimit the candidate volume list to only the ones that support thin provisioning on the storage controller.
netapp_thick_provisionedBooleanLimit the candidate volume list to only the ones that support thick provisioning on the storage controller.
-
diff --git a/doc/common/tables/manual/keystone-saml-module.xml b/doc/common/tables/manual/keystone-saml-module.xml deleted file mode 100644 index c4022808bb..0000000000 --- a/doc/common/tables/manual/keystone-saml-module.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization configuration options
Configuration option = Default valueDescription
[auth]
= keystone.auth.plugins.mapped.Mapped(StrOpt) The saml2 auth plugin module.
-
diff --git a/doc/common/tables/neutron-agent.xml b/doc/common/tables/neutron-agent.xml deleted file mode 100644 index f275b58d6b..0000000000 --- a/doc/common/tables/neutron-agent.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of agent configuration options
Configuration option = Default valueDescription
[DEFAULT]
= $state_path/external/pids(StrOpt) Location to store child pid files
= None(IntOpt) MTU setting for device.
= dibbler(StrOpt) Driver used for ipv6 prefix delegation. This needs to be an entry point defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for entry points included with the neutron source.
= False(BoolOpt) Start and use a daemon that can run the commands that need to be run with root privileges. This option is usually enabled on nodes that run nova compute processes
[AGENT]
= Open vSwitch agent(StrOpt) Selects the Agent Type reported
= nova(LengthStrOpt) Availability zone of this node
= (ListOpt) List of <physical_network>:<physical_interface>
-
diff --git a/doc/common/tables/neutron-amqp.xml b/doc/common/tables/neutron-amqp.xml deleted file mode 100644 index f0e34e2e8a..0000000000 --- a/doc/common/tables/neutron-amqp.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= neutron(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/neutron-api.xml b/doc/common/tables/neutron-api.xml deleted file mode 100644 index 6edd7f99e5..0000000000 --- a/doc/common/tables/neutron-api.xml +++ /dev/null @@ -1,107 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Allow the usage of the bulk API
= False(BoolOpt) Allow the usage of the pagination
= False(BoolOpt) Allow the usage of the sorting
= (StrOpt) The path for API extensions
= api-paste.ini(StrOpt) File name for the paste.deploy config for api service
= 4096(IntOpt) Number of backlog requests to configure the socket with
= 900(IntOpt) Timeout for client connections' socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever.
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
= -1(StrOpt) The maximum number of items returned in a single response, value was 'infinite' or negative integer means no limit
= 30(IntOpt) Number of seconds to keep retrying to listen
= (ListOpt) The service plugins Neutron will use
= 600(IntOpt) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X.
= True(BoolOpt) If False, closes the client socket connection explicitly.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
[service_providers]
= [](MultiStrOpt) Defines providers for advanced services using the format: <service_type>:<name>:<driver>[:default]
-
diff --git a/doc/common/tables/neutron-auth_token.xml b/doc/common/tables/neutron-auth_token.xml deleted file mode 100644 index 10313eba50..0000000000 --- a/doc/common/tables/neutron-auth_token.xml +++ /dev/null @@ -1,170 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/neutron-bagpipe.xml b/doc/common/tables/neutron-bagpipe.xml deleted file mode 100644 index 79e76067b5..0000000000 --- a/doc/common/tables/neutron-bagpipe.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of BaGpipe BGP configuration options
Configuration option = Default valueDescription
[BAGPIPE]
= 127.0.0.1(StrOpt) BGP component REST service IP address.
= 8082(IntOpt) BGP component REST service IP port.
= br-mpls(StrOpt) OVS MPLS bridge to use
= patch-from-tun(StrOpt) OVS Peer patch port in MPLS bridge to tunnel bridge (traffic from tunnel bridge)
= patch-to-tun(StrOpt) OVS Peer patch port in MPLS bridge to tunnel bridge(traffic to tunnel bridge)
= 10(IntOpt) The number of seconds the BGP component client will wait between polling for restart detection.
= patch-from-mpls(StrOpt) OVS Peer patch port in tunnel bridge to MPLS bridge (traffic from MPLS bridge)
= patch-to-mpls(StrOpt) OVS Peer patch port in tunnel bridge to MPLS bridge(traffic to MPLS bridge)
-
diff --git a/doc/common/tables/neutron-bigswitch.xml b/doc/common/tables/neutron-bigswitch.xml deleted file mode 100644 index 20d77faa82..0000000000 --- a/doc/common/tables/neutron-bigswitch.xml +++ /dev/null @@ -1,183 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of BigSwitch configuration options
Configuration option = Default valueDescription
[NOVA]
= (ListOpt) Nova compute nodes to manually set VIF type to 802.1qbg
= (ListOpt) Nova compute nodes to manually set VIF type to 802.1qbh
= (ListOpt) Nova compute nodes to manually set VIF type to binding_failed
= (ListOpt) Nova compute nodes to manually set VIF type to bridge
= (ListOpt) Nova compute nodes to manually set VIF type to distributed
= (ListOpt) Nova compute nodes to manually set VIF type to dvs
= (ListOpt) Nova compute nodes to manually set VIF type to hw_web
= (ListOpt) Nova compute nodes to manually set VIF type to hyperv
= (ListOpt) Nova compute nodes to manually set VIF type to ib_hostdev
= (ListOpt) Nova compute nodes to manually set VIF type to iovisor
= (ListOpt) Nova compute nodes to manually set VIF type to ivs
= (ListOpt) Nova compute nodes to manually set VIF type to midonet
= (ListOpt) Nova compute nodes to manually set VIF type to other
= (ListOpt) Nova compute nodes to manually set VIF type to ovs
= (ListOpt) Nova compute nodes to manually set VIF type to unbound
= (ListOpt) Nova compute nodes to manually set VIF type to vhostuser
= (ListOpt) Nova compute nodes to manually set VIF type to vrouter
= ivs(StrOpt) Virtual interface type to configure on Nova compute nodes
= unbound, binding_failed, distributed, ovs, bridge, other, ivs, iovisor, vhostuser, dvs, 802.1qbg, 802.1qbh, hyperv, midonet, ib_hostdev, hw_web, vrouter(ListOpt) List of allowed vif_type values.
[RESTPROXY]
= True(BoolOpt) Flag to decide if a route to the metadata server should be injected into the VM
= True(BoolOpt) If neutron fails to create a resource because the backend controller doesn't know of a dependency, the plugin automatically triggers a full data synchronization to the controller.
= True(BoolOpt) Re-use HTTP/HTTPS connections to the controller.
= 60(IntOpt) Time between verifications that the backend controller database is consistent with Neutron. (0 to disable)
= neutron-ubuntu1404-master(StrOpt) User defined identifier for this Neutron deployment
= False(BoolOpt) Disables SSL certificate validation for controllers
= None(StrOpt) The username and password for authenticating against the Big Switch or Floodlight controller.
= True(BoolOpt) If True, Use SSL when connecting to the Big Switch or Floodlight controller.
= 10(IntOpt) Maximum number of seconds to wait for proxy request to connect and complete.
= localhost:8800(ListOpt) A comma separated list of Big Switch or Floodlight servers and port numbers. The plugin proxies the requests to the Big Switch/Floodlight server, which performs the networking configuration. Only oneserver is needed per deployment, but you may wish todeploy multiple servers to support failover.
= /etc/neutron/plugins/bigswitch/ssl(StrOpt) Directory containing ca_certs and host_certs certificate directories.
= True(BoolOpt) Trust and store the first certificate received for each controller address and use it to validate future connections to that address.
= False(BoolOpt) Sync data on connect
= 4(IntOpt) Maximum number of threads to spawn to handle large volumes of port creations.
[RESTPROXYAGENT]
= br-int(StrOpt) Name of integration bridge on compute nodes used for security group insertion.
= 5(IntOpt) Seconds between agent checks for port changes
= ivs(StrOpt) Virtual switch type.
[ROUTER]
= 200(IntOpt) Maximum number of router rules
= ['*:any:any:permit'](MultiStrOpt) The default router rules installed in new tenant routers. Repeat the config option for each rule. Format is <tenant>:<source>:<destination>:<action> Use an * to specify default for all tenants.
-
diff --git a/doc/common/tables/neutron-brocade.xml b/doc/common/tables/neutron-brocade.xml deleted file mode 100644 index 75fc8e901d..0000000000 --- a/doc/common/tables/neutron-brocade.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Brocade configuration options
Configuration option = Default valueDescription
[PHYSICAL_INTERFACE]
= eth0(StrOpt) The network interface to use when creating a port
[SWITCH]
= (StrOpt) The address of the host to SSH to
= NOS(StrOpt) Currently unused
= (StrOpt) The SSH password to use
= (StrOpt) The SSH username to use
-
diff --git a/doc/common/tables/neutron-brocade_vyatta_l3.xml b/doc/common/tables/neutron-brocade_vyatta_l3.xml deleted file mode 100644 index 54b53c325d..0000000000 --- a/doc/common/tables/neutron-brocade_vyatta_l3.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Brocade Vyatta L3 plug-in configuration options
Configuration option = Default valueDescription
[VROUTER]
= 2(StrOpt) Nova VM flavor for instances of Vyatta vRouter.
= None(StrOpt) Nova image id for instances of Vyatta vRouter.
= None(StrOpt) Keystone URL.
= None(StrOpt) Vyatta vRouter management network id.
= 5(IntOpt) Number of seconds between consecutive Nova queries when waiting for router instance status change.
= 300(IntOpt) Number of seconds to wait for Nova to activate instance before setting resource to error state.
= None(StrOpt) Name of tenant admin user.
= None(StrOpt) Tenant admin password.
= None(StrOpt) UUID of tenant that holds Vyatta vRouter instances.
= 300(IntOpt) Number of seconds to wait for Vyatta vRouter to boot before setting resource to error state.
= vyatta:vyatta(StrOpt) Vyatta vRouter login credentials
= 5(IntOpt) Number of seconds between consecutive Vyatta vRouter queries when waiting for router instance boot.
-
diff --git a/doc/common/tables/neutron-ca.xml b/doc/common/tables/neutron-ca.xml deleted file mode 100644 index 7e53e6abb4..0000000000 --- a/doc/common/tables/neutron-ca.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CA and SSL configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) CA certificate file to use to verify connecting clients
= None(StrOpt) Certificate file to use when starting the server securely
= None(StrOpt) Private key file to use when starting the server securely
-
diff --git a/doc/common/tables/neutron-cisco.xml b/doc/common/tables/neutron-cisco.xml deleted file mode 100644 index 392564c322..0000000000 --- a/doc/common/tables/neutron-cisco.xml +++ /dev/null @@ -1,153 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Cisco configuration options
Configuration option = Default valueDescription
[cfg_agent]
= 30(IntOpt) Time in seconds for connecting to a hosting device
= neutron_fwaas.services.firewall.drivers.cisco.csr_firewall_svc_helper.CsrFirewallServiceHelper(StrOpt) Path of the firewall service helper class.
= 300(IntOpt) The time in seconds until a backlogged hosting device is presumed dead. This value should be set up high enough to recover from a period of connectivity loss or high load when the device may not be responding.
= networking_cisco.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper(StrOpt) Path of the routing service helper class.
= 10(IntOpt) Interval when the process_services() loop executes in seconds. This is when the config agent lets each service helper to process its neutron resources.
[cisco_csr_ipsec]
= 60(IntOpt) Status check interval for Cisco CSR IPSec connections
[general]
= 10(IntOpt) Time in seconds between renewed scheduling attempts of non-scheduled routers.
= 60(IntOpt) Seconds of no status update until a cfg agent is considered down.
= mgmt_sec_grp(StrOpt) Default security group applied on management port. Default value is mgmt_sec_grp.
= True(BoolOpt) Ensure that Nova is running before attempting to create any VM.
= L3AdminTenant(StrOpt) Name of the L3 admin tenant.
= osn_mgmt_nw(StrOpt) Name of management network for device configuration. Default value is osn_mgmt_nw
= /opt/stack/data/neutron/cisco/config_drive(StrOpt) Path to config drive files for service VM instances.
= /opt/stack/data/neutron/cisco/templates(StrOpt) Path to templates for hosting devices.
[hosting_devices]
= 420(IntOpt) Booting time in seconds before a CSR1kv becomes operational.
= networking_cisco.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver(StrOpt) Config agent driver for CSR1kv.
= csr1kv_cfg_template(StrOpt) CSR1kv configdrive template file.
= networking_cisco.plugins.cisco.l3.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver(StrOpt) Hosting device driver for CSR1kv.
= 621(StrOpt) UUID of Nova flavor for CSR1kv.
= csr1kv_openstack_img(StrOpt) Name of Glance image for CSR1kv.
= cisco(StrOpt) Password to use for CSR1kv configurations.
= networking_cisco.plugins.cisco.l3.plugging_drivers.n1kv_trunking_driver.N1kvTrunkingPlugDriver(StrOpt) Plugging driver for CSR1kv.
= stack(StrOpt) Username to use for CSR1kv configurations.
[ml2_cisco_n1kv]
= 2(IntOpt) Maximum number of retry attempts for VSM REST API.
[n1kv]
= osn_mgmt_pp(StrOpt) Name of N1kv port profile for management ports.
= osn_t1_np(StrOpt) Name of N1kv network profile for T1 networks (i.e., trunk networks for VXLAN segmented traffic).
= osn_t1_pp(StrOpt) Name of N1kv port profile for T1 ports (i.e., ports carrying traffic from VXLAN segmented networks).
= osn_t2_np(StrOpt) Name of N1kv network profile for T2 networks (i.e., trunk networks for VLAN segmented traffic).
= osn_t2_pp(StrOpt) Name of N1kv port profile for T2 ports (i.e., ports carrying traffic from VLAN segmented networks).
-
diff --git a/doc/common/tables/neutron-common.xml b/doc/common/tables/neutron-common.xml deleted file mode 100644 index 7be3cf5ce8..0000000000 --- a/doc/common/tables/neutron-common.xml +++ /dev/null @@ -1,273 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Admin password
= None(StrOpt) Admin tenant name
= None(StrOpt) Admin user
= 75(IntOpt) Seconds to regard the agent is down; should be at least twice report_interval, to be sure the agent is down for good.
= None(IntOpt) Number of separate API worker processes for service. If not specified, the default is equal to the number of CPUs available for best performance.
= None(StrOpt) Certificate Authority public key (CA cert) file for ssl
= False(BoolOpt) Turn off verification of the certificate for ssl
= None(StrOpt) Authentication region
= keystone(StrOpt) The type of authentication to use
= None(StrOpt) Authentication URL
= fa:16:3e:00:00:00(StrOpt) The base MAC address Neutron will use for VIFs
= 0.0.0.0(StrOpt) The host IP to bind to
= 9696(IntOpt) The port to bind to
= None(StrOpt) The core plugin Neutron will use
= None(StrOpt) Default IPv4 subnet-pool to be used for automatic subnet CIDR allocation
= None(StrOpt) Default IPv6 subnet-pool to be used for automatic subnet CIDR allocation
= ['neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver'](MultiStrOpt) Drivers used to manage loadbalancing devices
= True(BoolOpt) Allow sending resource operation notification to DHCP agent
= 1(IntOpt) Number of DHCP agents scheduled to host a tenant network. If this number is greater than 1, the scheduler automatically assigns multiple DHCP agents for a given tenant network, providing high availability for DHCP service.
= False(BoolOpt) Use broadcast in DHCP replies
= $state_path/dhcp(StrOpt) Location to store DHCP server config files
= True(BoolOpt) Delete namespace after removing a dhcp server.This option is deprecated and will be removed in a future release.
= openstacklocal(StrOpt) Domain to use for building the hostnames.This option is deprecated. It has been moved to neutron.conf as dns_domain. It will removed from here in a future release
= 86400(IntOpt) DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite lease times.
= networks(StrOpt) Representing the resource type whose load is being reported by the agent. This can be "networks", "subnets" or "ports". When specified (Default is networks), the server will extract particular load sent as part of its agent configuration object from the agent report state, which is the number of resources being consumed, at every report_interval.dhcp_load_type can be used in combination with network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is WeightScheduler, dhcp_load_type can be configured to represent the choice for the resource being balanced. Example: dhcp_load_type=networks
= openstacklocal(StrOpt) Domain to use for building the hostnames
= True(BoolOpt) Agent starts with admin_state_up=False when enable_new_agents=False. In the case, user's resources will not be scheduled automatically to the agent until admin changes admin_state_up to True.
= False(BoolOpt) Enable services on an agent with admin_state_up False. If this option is False, when admin_state_up of an agent is turned False, services on it will be disabled. Agents with admin_state_up False are not selected for automatic scheduling regardless of this option. But manual scheduling to such agents is available if this option is True.
= adminURL(StrOpt) Network service endpoint type to pull from the keystone catalog
= None(StrOpt) Neutron endpoint URL, if not set will use endpoint from the keystone catalog along with endpoint_type
= 64(IntOpt) Size of executor thread pool.
= True(BoolOpt) Ensure that configured gateway is on subnet. For IPv6, validate only if gateway is not a link local address. Deprecated, to be removed during the K release, at which point the check will be mandatory.
= localhost(StrOpt) Hostname to be used by the neutron server, agents and services running on this machine. All the agents and services running on this machine must use the same host value.
= None(StrOpt) The driver used to manage the virtual interface.
= False(BoolOpt) Force ip_lib calls to use the root helper
= None(StrOpt) IPAM driver to use.
= 16(IntOpt) How many times Neutron will retry MAC generation
= 10(IntOpt) Maximum number of allowed address pairs
= 5(IntOpt) Maximum number of DNS nameservers
= 5(IntOpt) Maximum number of fixed ips per port
= 20(IntOpt) Maximum number of host routes per subnet
= None(ListOpt) Memcached servers or None for in process cache.
= 5(IntOpt) Range of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0)
= 40(IntOpt) Seconds between running periodic tasks
= 5(IntOpt) Periodic interval at which the plugin checks for the monitoring L2 gateway agent
= 300(IntOpt) Interval between two metering reports
= /etc/nova/rootwrap.conf(StrOpt) Path to the rootwrap configuration file to use for running commands as root
= /var/lib/neutron(StrOpt) Where to store Neutron state files. This directory must be writable by the agent.
= False(BoolOpt) If True, then allow plugins that support it to create VLAN transparent networks.
[AGENT]
= respawn(StrOpt) Action to be executed when a child process dies
= 60(IntOpt) Interval between checks of child process liveness (seconds), use 0 to disable
= False(BoolOpt) Log agent heartbeats
= sudo(StrOpt) Root helper application.
= None(StrOpt) Root helper daemon application to use when possible.
[certificates]
= barbican(StrOpt) Certificate Manager plugin. Defaults to barbican.
= /var/lib/neutron-lbaas/certificates/(StrOpt) Absolute path to the certificate storage directory. Defaults to env[OS_LBAAS_TLS_STORAGE].
[heleos]
= None(StrOpt) ESM admin password.
[keystone_authtoken]
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
[qos]
= message_queue(ListOpt) Drivers list to use to send the update notification
-
diff --git a/doc/common/tables/neutron-compute.xml b/doc/common/tables/neutron-compute.xml deleted file mode 100644 index 96ef9420c0..0000000000 --- a/doc/common/tables/neutron-compute.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Compute configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Send notification to nova when port data (fixed_ips/floatingip) changes so nova can update its cache.
= True(BoolOpt) Send notification to nova when port status changes
= http://localhost:5000/v2.0(StrOpt) Authorization URL for connecting to nova in admin context. Deprecated in favour of an auth plugin in [nova].
= None(StrOpt) Password for connection to nova in admin context. Deprecated in favour of an auth plugin in [nova].
= None(StrOpt) The uuid of the admin nova tenant. Deprecated in favour of an auth plugin in [nova].
= None(StrOpt) The name of the admin nova tenant. Deprecated in favour of an auth plugin in [nova].
= None(StrOpt) Username for connecting to nova in admin context. Deprecated in favour of an auth plugin in [nova].
= (StrOpt) Client certificate for nova metadata api server.
= (StrOpt) Private key of client certificate.
= http://127.0.0.1:8774/v2(StrOpt) URL for connection to nova. Deprecated in favour of an auth plugin in [nova].
= 2(IntOpt) Number of seconds between sending events to nova if there are any events to send.
-
diff --git a/doc/common/tables/neutron-cors.xml b/doc/common/tables/neutron-cors.xml deleted file mode 100644 index 1ef467750c..0000000000 --- a/doc/common/tables/neutron-cors.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/neutron-database.xml b/doc/common/tables/neutron-database.xml deleted file mode 100644 index c73f0c53ca..0000000000 --- a/doc/common/tables/neutron-database.xml +++ /dev/null @@ -1,102 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/neutron-dhcp_agent.xml b/doc/common/tables/neutron-dhcp_agent.xml deleted file mode 100644 index 4ae8a9a2d0..0000000000 --- a/doc/common/tables/neutron-dhcp_agent.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of DHCP agent configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) If True, effort is made to advertise MTU settings to VMs via network methods (DHCP and RA MTU options) when the network's preferred MTU is known.
= neutron.agent.linux.dhcp.Dnsmasq(StrOpt) The driver used to manage the DHCP server.
= None(StrOpt) Base log dir for dnsmasq logging. The log contains DHCP and DNS log information and is useful for debugging issues with either DHCP or DNS. If this section is null, disable dnsmasq log.
= (StrOpt) Override the default dnsmasq settings with this file
= None(ListOpt) Comma-separated list of the DNS servers which will be used as forwarders.
= 16777216(IntOpt) Limit number of leases to prevent a denial-of-service.
= False(BoolOpt) Support Metadata requests on isolated networks.
= False(BoolOpt) Allows for serving metadata requests from a dedicated network. Requires enable_isolated_metadata = True
= False(BoolOpt) Force to use DHCP to get Metadata on all networks.
= 4(IntOpt) Number of threads to use during sync process.
= 5(IntOpt) Interval to resync.
= True(BoolOpt) Allow overlapping IP. This option is deprecated and will be removed in a future release.
-
diff --git a/doc/common/tables/neutron-dvr.xml b/doc/common/tables/neutron-dvr.xml deleted file mode 100644 index 9440ed5932..0000000000 --- a/doc/common/tables/neutron-dvr.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of DVR configuration options
Configuration option = Default valueDescription
[DEFAULT]
= fa:16:3f:00:00:00(StrOpt) The base mac address used for unique DVR instances by Neutron. The first 3 octets will remain unchanged. If the 4th octet is not 00, it will also be used. The others will be randomly generated. The 'dvr_base_mac' *must* be different from 'base_mac' to avoid mixing them up with MAC's allocated for tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. The default is 3 octet
= False(BoolOpt) System-wide flag to determine the type of router that tenants can create. Only admin can override.
-
diff --git a/doc/common/tables/neutron-dvs.xml b/doc/common/tables/neutron-dvs.xml deleted file mode 100644 index c942102604..0000000000 --- a/doc/common/tables/neutron-dvs.xml +++ /dev/null @@ -1,58 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VMware DVS configuration options
Configuration option = Default valueDescription
[dvs]
= 10(IntOpt) The number of times we retry on failures, e.g., socket error, etc.
= None(StrOpt) Specify a CA bundle file to use in verifying the vCenter server certificate.
= None(StrOpt) The name of the preconfigured DVS.
= None(StrOpt) Hostname or IP address for connection to VMware vCenter host.
= None(StrOpt) Password for connection to VMware vCenter host.
= 443(IntOpt) Port for connection to VMware vCenter host.
= None(StrOpt) Username for connection to VMware vCenter host.
= False(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "ca_file" is set.
= 0.5(FloatOpt) The interval used for polling of remote tasks.
-
diff --git a/doc/common/tables/neutron-embrane.xml b/doc/common/tables/neutron-embrane.xml deleted file mode 100644 index 2d372add75..0000000000 --- a/doc/common/tables/neutron-embrane.xml +++ /dev/null @@ -1,58 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Embrane configuration options
Configuration option = Default valueDescription
[heleos]
= admin(StrOpt) ESM admin username.
= True(BoolOpt) Define if the requests have run asynchronously or not
= None(StrOpt) Dummy user traffic Security Zone id
= None(StrOpt) ESM management root address
= None(StrOpt) In band Security Zone id
= None(StrOpt) Management Security Zone id
= None(StrOpt) Out of band Security Zone id
= default(StrOpt) Shared resource pool id
= None(StrOpt) Router image id (Embrane FW/VPN)
-
diff --git a/doc/common/tables/neutron-fujitsu_cfab.xml b/doc/common/tables/neutron-fujitsu_cfab.xml deleted file mode 100644 index cf69ca05b2..0000000000 --- a/doc/common/tables/neutron-fujitsu_cfab.xml +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of FUJITSU Converged Fabric Switch configuration options
Configuration option = Default valueDescription
[fujitsu_cfab]
= (StrOpt) The address of the C-Fabric to telnet to.
= admin(StrOpt) The C-Fabric password to use.
= (ListOpt) List of <physical_network>:<vfab_id> tuples specifying physical_network names and corresponding vfab ids.
= (StrOpt) The prefix string for pprofile name.
= True(BoolOpt) Whether to save configuration.
= False(BoolOpt) Whether to share a C-Fabric pprofile among Neutron ports using the same VLAN ID.
= admin(StrOpt) The C-Fabric username to use.
-
diff --git a/doc/common/tables/neutron-fujitsu_ism.xml b/doc/common/tables/neutron-fujitsu_ism.xml deleted file mode 100644 index 0d5b9b6b99..0000000000 --- a/doc/common/tables/neutron-fujitsu_ism.xml +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of FUJITSU Software ServerView Infrastructure Manager configuration options
Configuration option = Default valueDescription
[fujitsu_ism]
= furukawa-ism(StrOpt) The IP address or hostname of the ISM.
= /etc/neutron/plugins/ml2/fujitsu/server.crt(StrOpt) The certification authority for ISM.
= admin(StrOpt) The ISM password to use.
= 25566(StrOpt) The port number of the ISM.
= 30(StrOpt) The API timeout value for ISM.
= admin(StrOpt) The ISM username to use.
-
diff --git a/doc/common/tables/neutron-fwaas.xml b/doc/common/tables/neutron-fwaas.xml deleted file mode 100644 index 94b7f58c6b..0000000000 --- a/doc/common/tables/neutron-fwaas.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of Firewall-as-a-Service configuration options
Configuration option = Default valueDescription
[fwaas]
= False(BoolOpt) Enable FWaaS
-
diff --git a/doc/common/tables/neutron-fwaas_ngfw.xml b/doc/common/tables/neutron-fwaas_ngfw.xml deleted file mode 100644 index d64cb173ce..0000000000 --- a/doc/common/tables/neutron-fwaas_ngfw.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of FWaaS NGFW plug-in configuration options
Configuration option = Default valueDescription
[ngfw]
= (StrOpt) Authentication key to SMC API
= (StrOpt) verion of SMC API
= (StrOpt) URL to contact SMC server
-
diff --git a/doc/common/tables/neutron-fwaas_varmour.xml b/doc/common/tables/neutron-fwaas_varmour.xml deleted file mode 100644 index a9ca7a1914..0000000000 --- a/doc/common/tables/neutron-fwaas_varmour.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of FWaaS vArmour plug-in configuration options
Configuration option = Default valueDescription
[vArmour]
= localhost(StrOpt) vArmour director ip
= 443(StrOpt) vArmour director port
= varmour(StrOpt) vArmour director password
= varmour(StrOpt) vArmour director username
-
diff --git a/doc/common/tables/neutron-hyperv_agent.xml b/doc/common/tables/neutron-hyperv_agent.xml deleted file mode 100644 index f1c29d4b44..0000000000 --- a/doc/common/tables/neutron-hyperv_agent.xml +++ /dev/null @@ -1,99 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HyperV agent configuration options
Configuration option = Default valueDescription
[AGENT]
= False(BoolOpt) Enables metrics collections for switch ports by using Hyper-V's metric APIs. Collected data can by retrieved by other apps and services, e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above
= private(StrOpt) Private vswitch name used for local networks
= 100(IntOpt) Specifies the maximum number of retries to enable Hyper-V's port metrics collection. The agent will try to enable the feature once every polling_interval period for at most metrics_max_retries or until it succeedes.
= 169.254.169.254(StrOpt) Specifies the address which will serve the metadata for the instance.
= (ListOpt) List of <physical_network>:<vswitch> where the physical networks can be expressed with wildcards, e.g.: ."*:external"
= 2(IntOpt) The number of seconds the agent will wait between polling for local device changes.
[NVGRE]
= False(BoolOpt) Enables Hyper-V NVGRE. Requires Windows Server 2012 or above.
= None(StrOpt) Specifies the tunnel IP which will be used and reported by this host for NVGRE networks.
= 0(IntOpt) Specifies the VLAN ID of the physical network, required for setting the NVGRE Provider Address.
[hyperv]
= False(BoolOpt) Force V1 WMI utility classes
[neutron]
= http://localhost:5000/v2.0(StrOpt) auth url for connecting to neutron in admin context
= None(StrOpt) password for connecting to neutron in admin context
= None(StrOpt) tenant name for connecting to neutron in admin context
= None(StrOpt) username for connecting to neutron in admin context
= keystone(StrOpt) auth strategy for connecting to neutron in admin context
= http://127.0.0.1:9696(StrOpt) URL for connecting to neutron
= 30(IntOpt) timeout value for connecting to neutron in seconds
-
diff --git a/doc/common/tables/neutron-ipv6_ra.xml b/doc/common/tables/neutron-ipv6_ra.xml deleted file mode 100644 index eb7cecf23b..0000000000 --- a/doc/common/tables/neutron-ipv6_ra.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of IPv6 router advertisement configuration options
Configuration option = Default valueDescription
[DEFAULT]
= $state_path/ra(StrOpt) Location to store IPv6 RA config files
-
diff --git a/doc/common/tables/neutron-l2_agent.xml b/doc/common/tables/neutron-l2_agent.xml deleted file mode 100644 index ba0acdd227..0000000000 --- a/doc/common/tables/neutron-l2_agent.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of L2 agent extension configuration options
Configuration option = Default valueDescription
[agent]
= (ListOpt) Extensions list to use
-
diff --git a/doc/common/tables/neutron-l2gw.xml b/doc/common/tables/neutron-l2gw.xml deleted file mode 100644 index 2003e9f941..0000000000 --- a/doc/common/tables/neutron-l2gw.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Layer 2 Gateway configuration options
Configuration option = Default valueDescription
[DEFAULT]
= Switch1(StrOpt) default_device_name of the l2 gateway
= None(StrOpt) Unique identifier of the NSX L2 Gateway service which will be used by default for network gateways
= None(StrOpt) Unique identifier of the NSX L3 Gateway service which will be used for implementing routers and floating IPs
= networking_l2gw.services.l2gateway.ovsdb.data.L2GatewayOVSDBCallbacks(StrOpt) L2 gateway plugin callback class where theRPCs from the agent are going to get invoked
= 5(IntOpt) Number of l2 gateways allowed per tenant, -1 for unlimited
[ovsdb]
= False(BoolOpt) Set to True if ovsdb Manager manages the client
= None(StrOpt) Trusted issuer CA cert
= None(StrOpt) L2 gateway agent public certificate
= None(StrOpt) L2 gateway agent private key
= 10(IntOpt) Maximum number of retries to open a socket with the OVSDB server
= host1:127.0.0.1:6632(StrOpt) OVSDB server name:host/IP:port
= 20(IntOpt) Seconds between periodic task runs
-
diff --git a/doc/common/tables/neutron-l3_agent.xml b/doc/common/tables/neutron-l3_agent.xml deleted file mode 100644 index b2a4c97522..0000000000 --- a/doc/common/tables/neutron-l3_agent.xml +++ /dev/null @@ -1,121 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of L3 agent configuration options
Configuration option = Default valueDescription
[DEFAULT]
= legacy(StrOpt) The working mode for the agent. Allowed modes are: 'legacy' - this preserves the existing behavior where the L3 agent is deployed on a centralized networking node to provide L3 services like DNAT, and SNAT. Use this mode if you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - this enables centralized SNAT support in conjunction with DVR. This mode must be used for an L3 agent running on a centralized node (or in single-host deployments, e.g. devstack)
= True(BoolOpt) Automatically remove networks from offline DHCP agents.
= False(BoolOpt) Automatically reschedule routers from offline L3 agents to online L3 agents.
= True(BoolOpt) Allow running metadata proxy.
= True(BoolOpt) Define the default value of enable_snat if not provided in external_gateway_info.
= 0x2(StrOpt) Iptables mangle mark used to mark ingress from external network. This mark will be masked with 0xffff so that only the lower 16 bits will be used.
= br-ex(StrOpt) Name of bridge used for external network traffic.
= (StrOpt) UUID of external network for routers implemented by the agents.
= $state_path/ha_confs(StrOpt) Location to store keepalived/conntrackd config files
= 2(IntOpt) The advertisement interval in seconds
= None(StrOpt) VRRP authentication password
= PASS(StrOpt) VRRP authentication type
= True(BoolOpt) Agent should implement routers with no gateway
= (StrOpt) With IPv6, the network used for the external gateway does not need to have an associated subnet, since the automatically assigned link-local address (LLA) can be used. However, an IPv6 gateway address is needed for use as the next-hop for the default route. If no IPv6 gateway address is configured here, (and only then) the neutron router will be configured to get its default route from router advertisements (RAs) from the upstream router; in which case the upstream router must also be configured to send these RAs. The ipv6_gateway, when configured, should be the LLA of the interface on the upstream router. If a next-hop using a global unique address (GUA) is desired, it needs to be done via a subnet allocated to the network and not through this parameter.
= False(BoolOpt) Enable HA mode for virtual routers.
= 169.254.192.0/18(StrOpt) Subnet used for the l3 HA admin network.
= (StrOpt) The physical network name with which the HA network can be created.
= (StrOpt) The network type to use when creating the HA network for an HA router. By default or if empty, the first 'tenant_network_types' is used. This is helpful when the VRRP traffic should use a specific network which is not the default one.
= 3(IntOpt) Maximum number of agents on which a router will be scheduled.
= 2(IntOpt) Minimum number of agents on which a router will be scheduled.
= (StrOpt) If namespaces is disabled, the l3 agent can only configure a router that has the matching router ID.
= 3(IntOpt) Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the feature is disabled
[AGENT]
= True(BoolOpt) Add comments to iptables rules.
= True(BoolOpt) Use the root helper to read the namespaces from the operating system.
-
diff --git a/doc/common/tables/neutron-l3_arista.xml b/doc/common/tables/neutron-l3_arista.xml deleted file mode 100644 index 6cdd231679..0000000000 --- a/doc/common/tables/neutron-l3_arista.xml +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Arista layer-3 service plug-in configuration options
Configuration option = Default valueDescription
[l3_arista]
= 180(IntOpt) Sync interval in seconds between L3 Service plugin and EOS. This interval defines how often the synchronization is performed. This is an optional field. If not set, a value of 180 seconds is assumed
= False(BoolOpt) This flag is used indicate if Arista Switches are configured in MLAG mode. If yes, all L3 config is pushed to both the switches automatically. If this flag is set to True, ensure to specify IP addresses of both switches. This is optional. If not set, a value of "False" is assumed.
= (StrOpt) Arista EOS IP address. This is required field. If not set, all communications to Arista EOS will fail
= (StrOpt) Password for Arista EOS. This is required field. If not set, all communications to Arista EOS will fail
= (StrOpt) Username for Arista EOS. This is required field. If not set, all communications to Arista EOS will fail
= (StrOpt) Arista EOS IP address for second Switch MLAGed with the first one. This an optional field, however, if mlag_config flag is set, then this is required. If not set, all communications to Arista EOS will fail
= False(BoolOpt) A "True" value for this flag indicates to create a router in VRF. If not set, all routers are created in default VRF. This is optional. If not set, a value of "False" is assumed.
-
diff --git a/doc/common/tables/neutron-l3_brocade_mlx.xml b/doc/common/tables/neutron-l3_brocade_mlx.xml deleted file mode 100644 index dc70a5e8fd..0000000000 --- a/doc/common/tables/neutron-l3_brocade_mlx.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Brocade MLX L3 plug-in configuration options
Configuration option = Default valueDescription
[L3_BROCADE_MLX_EXAMPLE]
= (StrOpt) The IP address of the MLX switch
= password(StrOpt) The SSH password of the switch
= (StrOpt) Allowed physical networks where VLAN can be configured on this switch
= (StrOpt) Ports to be tagged in the VLAN being configured on the switch
= admin(StrOpt) The SSH username of the switch
[l3_brocade_mlx]
= (StrOpt) Switches connected to the compute nodes
-
diff --git a/doc/common/tables/neutron-lbaas.xml b/doc/common/tables/neutron-lbaas.xml deleted file mode 100644 index 01ce5ac51d..0000000000 --- a/doc/common/tables/neutron-lbaas.xml +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Load-Balancer-as-a-Service configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) The driver used to manage the virtual interface.
= neutron_lbaas.services.loadbalancer.agent_scheduler.ChanceScheduler(StrOpt) Driver to use for scheduling pool to a default loadbalancer agent
= neutron_lbaas.agent_scheduler.ChanceScheduler(StrOpt) Driver to use for scheduling to a default loadbalancer agent
= 40(IntOpt) Seconds between running periodic tasks
= neutron_lbaas.agent_scheduler.ChanceScheduler(StrOpt) Driver to use for scheduling to a default loadbalancer agent
= neutron_lbaas.services.loadbalancer.agent_scheduler.ChanceScheduler(StrOpt) Driver to use for scheduling pool to a default loadbalancer agent
[service_auth]
= http://127.0.0.1:5000/v2.0(StrOpt) Authentication endpoint
= admin(StrOpt) The service admin tenant name
= admin(StrOpt) The service admin user name
= password(StrOpt) The service admin password
= admin(StrOpt) The admin user domain name
= admin(StrOpt) The admin project domain name
= RegionOne(StrOpt) The deployment region
= lbaas(StrOpt) The name of the service
= 2(StrOpt) The auth version used to authenticate
[service_providers]
= [](MultiStrOpt) Defines providers for advanced services using the format: <service_type>:<name>:<driver>[:default]
[certificates]
= barbican(StrOpt) Certificate Manager plugin. Defaults to barbican.
= /var/lib/neutron-lbaas/certificates/(StrOpt) Absolute path to the certificate storage directory. Defaults to env[OS_LBAAS_TLS_STORAGE].
[service_auth]
= password(StrOpt) The service admin password
= admin(StrOpt) The admin project domain name
= admin(StrOpt) The service admin tenant name
= admin(StrOpt) The service admin user name
= admin(StrOpt) The admin user domain name
= http://127.0.0.1:5000/v2.0(StrOpt) Authentication endpoint
= 2(StrOpt) The auth version used to authenticate
= RegionOne(StrOpt) The deployment region
= lbaas(StrOpt) The name of the service
-
diff --git a/doc/common/tables/neutron-lbaas_agent.xml b/doc/common/tables/neutron-lbaas_agent.xml deleted file mode 100644 index fa2b4c010d..0000000000 --- a/doc/common/tables/neutron-lbaas_agent.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of LBaaS agent configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 40(IntOpt) Seconds between running periodic tasks
= None(StrOpt) The driver used to manage the virtual interface.
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= False(BoolOpt) Uses veth for an interface or not
= ['neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver'](MultiStrOpt) Drivers used to manage loadbalancing devices
[haproxy]
= $state_path/lbaas(StrOpt) Location to store config and state files
= 3(IntOpt) When delete and re-add the same vip, send this many gratuitous ARPs to flush the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
= nogroup(StrOpt) The user group
-
diff --git a/doc/common/tables/neutron-lbaas_octavia.xml b/doc/common/tables/neutron-lbaas_octavia.xml deleted file mode 100644 index 213ca28b2e..0000000000 --- a/doc/common/tables/neutron-lbaas_octavia.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of LBaaS Octavia plugin configuration options
Configuration option = Default valueDescription
[octavia]
= http://127.0.0.1:9876(StrOpt) URL of Octavia controller root
= 3(IntOpt) Interval in seconds to poll octavia when an entity is created, updated, or deleted.
= 100(IntOpt) Time to stop polling octavia when a status of an entity does not change.
-
diff --git a/doc/common/tables/neutron-lbaas_services.xml b/doc/common/tables/neutron-lbaas_services.xml deleted file mode 100644 index d9dc18a13e..0000000000 --- a/doc/common/tables/neutron-lbaas_services.xml +++ /dev/null @@ -1,327 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of LBaaS Embrane, Radware, NetScaler, HAproxy, Octavia plugin configuration options
Configuration option = Default valueDescription
[haproxy]
= 3(IntOpt) When delete and re-add the same vip, send this many gratuitous ARPs to flush the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
= nogroup(StrOpt) The user group
= $state_path/lbaas(StrOpt) Location to store config and state files
= 10(IntOpt) Seconds between periodic task runs
= None(StrOpt) The driver used to manage the virtual interface.
= /usr/lib/python/site-packages/neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/haproxy.loadbalancer.j2(StrOpt) Jinja template file for haproxy configuration
[octavia]
= http://127.0.0.1:9876(StrOpt) URL of Octavia controller root
[heleoslb]
= None(StrOpt) ESM admin password.
= None(StrOpt) ESM admin username.
= None(BoolOpt) Define if the requests have run asynchronously or not
= None(StrOpt) Dummy user traffic Security Zone id for LBs
= None(StrOpt) ESM management root address
= None(StrOpt) In band Security Zone id for LBs
= small(StrOpt) choose LB image flavor to use, accepted values: small, medium
= None(StrOpt) Load Balancer image id (Embrane LB)
= None(StrOpt) Management Security Zone id for LBs
= None(StrOpt) Out of band Security Zone id for LBs
= None(StrOpt) Shared resource pool id
= 60(IntOpt) resource synchronization interval in seconds
[netscaler_driver]
= True(StrOpt) Setting for option to enable synchronous operationsNetScaler Control Center Server.
= None(StrOpt) Setting to enable/disable cleanup mode for NetScaler Control Center Server
= None(StrOpt) Password to login to the NetScaler Control Center Server.
= None(StrOpt) The URL to reach the NetScaler Control Center Server.
= None(StrOpt) Username to login to the NetScaler Control Center Server.
= True,300(StrOpt) Setting for member status collection fromNetScaler Control Center Server.
= 2(StrOpt) Setting for periodic task collection interval fromNetScaler Control Center Server..
[octavia]
= http://127.0.0.1:9876(StrOpt) URL of Octavia controller root
= 3(IntOpt) Interval in seconds to poll octavia when an entity is created, updated, or deleted.
= 100(IntOpt) Time to stop polling octavia when a status of an entity does not change.
[radware]
= setup_l2_l3(ListOpt) List of actions that are not pushed to the completion queue.
= None(StrOpt) IP address of secondary vDirect server.
= {'ha_network_name': 'HA-Network', 'service': '_REPLACE_', 'ha_ip_pool_name': 'default', 'twoleg_enabled': '_REPLACE_', 'allocate_ha_ips': True, 'allocate_ha_vrrp': True}(DictOpt) Parameter for l2_l3 workflow constructor.
= {'data_ip_address': '192.168.200.99', 'data_port': 1, 'gateway': '192.168.200.1', 'ha_port': 2, 'data_ip_mask': '255.255.255.0'}(DictOpt) Parameter for l2_l3 workflow setup.
= openstack_l2_l3(StrOpt) Name of l2_l3 workflow. Default: openstack_l2_l3.
= BaseCreate(StrOpt) Name of the l4 workflow action. Default: BaseCreate.
= openstack_l4(StrOpt) Name of l4 workflow. Default: openstack_l4.
= VA(StrOpt) Service ADC type. Default: VA.
= (StrOpt) Service ADC version.
= 20(IntOpt) Size of service cache. Default: 20.
= 100(IntOpt) Service compression throughput. Default: 100.
= False(BoolOpt) Enables or disables the Service HA pair. Default: False.
= -1(IntOpt) A required VLAN for the interswitch link to use.
= (ListOpt) Resource pool IDs.
= False(BoolOpt) Enable or disable Alteon interswitch link for stateful session failover. Default: False.
= 100(IntOpt) Service SSL throughput. Default: 100.
= 1000(IntOpt) Service throughput. Default: 1000.
= None(StrOpt) IP address of vDirect server.
= radware(StrOpt) vDirect user password.
= vDirect(StrOpt) vDirect user name.
[radwarev2]
= manage_l3(ListOpt) Name of child workflow templates used.Default: manage_l3
= None(StrOpt) IP address of secondary vDirect server.
= VA(StrOpt) Service ADC type. Default: VA.
= (StrOpt) Service ADC version.
= 20(IntOpt) Size of service cache. Default: 20.
= 100(IntOpt) Service compression throughput. Default: 100.
= False(BoolOpt) Enables or disables the Service HA pair. Default: False.
= -1(IntOpt) A required VLAN for the interswitch link to use.
= (ListOpt) Resource pool IDs.
= False(BoolOpt) Enable or disable Alteon interswitch link for stateful session failover. Default: False.
= 100(IntOpt) Service SSL throughput. Default: 100.
= 1000(IntOpt) Service throughput. Default: 1000.
= stats(StrOpt) Name of the workflow action for statistics. Default: stats.
= None(StrOpt) IP address of vDirect server.
= radware(StrOpt) vDirect user password.
= vDirect(StrOpt) vDirect user name.
= apply(StrOpt) Name of the workflow action. Default: apply.
= {'data_ip_address': '192.168.200.99', 'ha_network_name': 'HA-Network', 'ha_port': 2, 'allocate_ha_ips': True, 'ha_ip_pool_name': 'default', 'allocate_ha_vrrp': True, 'data_port': 1, 'gateway': '192.168.200.1', 'twoleg_enabled': '_REPLACE_', 'data_ip_mask': '255.255.255.0'}(DictOpt) Parameter for l2_l3 workflow constructor.
= os_lb_v2(StrOpt) Name of the workflow template. Default: os_lb_v2.
[radwarev2_debug]
= True(BoolOpt) Configule ADC with L3 parameters?
= True(BoolOpt) Configule ADC with L4 parameters?
= True(BoolOpt) Provision ADC service?
-
diff --git a/doc/common/tables/neutron-linuxbridge_agent.xml b/doc/common/tables/neutron-linuxbridge_agent.xml deleted file mode 100644 index 0fe41fa6bb..0000000000 --- a/doc/common/tables/neutron-linuxbridge_agent.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Linux Bridge agent configuration options
Configuration option = Default valueDescription
[LINUX_BRIDGE]
= (ListOpt) List of <physical_network>:<physical_bridge>
= (ListOpt) List of <physical_network>:<physical_interface>
[VXLAN]
= True(BoolOpt) Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 plugin using linuxbridge mechanism driver
= False(BoolOpt) Extension to use alongside ml2 plugin's l2population mechanism driver. It enables the plugin to populate VXLAN forwarding table.
= None(IPOpt) Local IP address of the VXLAN endpoints.
= None(IntOpt) TOS for vxlan interface protocol packets.
= None(IntOpt) TTL for vxlan interface protocol packets.
= 224.0.0.1(StrOpt) Multicast group(s) for vxlan interface. A range of group addresses may be specified by using CIDR notation. To reserve a unique group for each possible (24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on all the agents.
-
diff --git a/doc/common/tables/neutron-logging.xml b/doc/common/tables/neutron-logging.xml deleted file mode 100644 index 0a44ee74de..0000000000 --- a/doc/common/tables/neutron-logging.xml +++ /dev/null @@ -1,117 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= False(BoolOpt) Make exception message format errors fatal
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation.
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s .
= None(StrOpt) (Optional) The base directory used for relative --log-file paths.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines.
= False(BoolOpt) Enable SSL on the API server
= True(BoolOpt) Log output to standard error.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
[oslo_versionedobjects]
= False(BoolOpt) Make exception message format errors fatal
-
diff --git a/doc/common/tables/neutron-metadata.xml b/doc/common/tables/neutron-metadata.xml deleted file mode 100644 index 25f8540e95..0000000000 --- a/doc/common/tables/neutron-metadata.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of metadata configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 0x1(StrOpt) Iptables mangle mark used to mark metadata valid requests. This mark will be masked with 0xffff so that only the lower 16 bits will be used.
= 4096(IntOpt) Number of backlog requests to configure the metadata server socket with
= 9697(IntOpt) TCP Port used by Neutron metadata namespace proxy.
= (StrOpt) Group (gid or name) running metadata proxy after its initialization (if empty: agent effective group).
= (StrOpt) Shared secret to sign instance-id request
= $state_path/metadata_proxy(StrOpt) Location for Metadata Proxy UNIX domain socket.
= deduce(StrOpt) Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce mode from metadata_proxy_user/group values, 'user': set metadata proxy socket mode to 0o644, to use when metadata_proxy_user is agent effective user or root, 'group': set metadata proxy socket mode to 0o664, to use when metadata_proxy_group is agent effective group or root, 'all': set metadata proxy socket mode to 0o666, to use otherwise.
= (StrOpt) User (uid or name) running metadata proxy after its initialization (if empty: agent effective user).
= None(BoolOpt) Enable/Disable log watch by metadata proxy. It should be disabled when metadata_proxy_user/group is not allowed to read/write its log file and copytruncate logrotate option must be used if logrotate is enabled on metadata proxy log files. Option default value is deduced from metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent effective user id/name.
= 1(IntOpt) Number of separate worker processes for metadata server (defaults to half of the number of CPUs)
= False(BoolOpt) Allow to perform insecure SSL (https) requests to nova metadata
= 127.0.0.1(StrOpt) IP address used by Nova metadata server.
= 8775(IntOpt) TCP Port used by Nova metadata server.
= http(StrOpt) Protocol to access nova metadata, http or https
-
diff --git a/doc/common/tables/neutron-metering_agent.xml b/doc/common/tables/neutron-metering_agent.xml deleted file mode 100644 index f052658d9a..0000000000 --- a/doc/common/tables/neutron-metering_agent.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of metering agent configuration options
Configuration option = Default valueDescription
[DEFAULT]
= neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver(StrOpt) Metering driver
= 30(IntOpt) Interval between two metering measures
[AGENT]
= 30(FloatOpt) Seconds between nodes reporting state to server; should be less than agent_down_time, best if it is half or less than agent_down_time.
-
diff --git a/doc/common/tables/neutron-midonet.xml b/doc/common/tables/neutron-midonet.xml deleted file mode 100644 index 286e88ccf4..0000000000 --- a/doc/common/tables/neutron-midonet.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Midonet configuration options
Configuration option = Default valueDescription
[MIDONET]
= midonet.neutron.client.api.MidonetApiClient(StrOpt) MidoNet client used to access MidoNet data storage.
= localhost(StrOpt) IP that the cluster service can be reached on
= 8088(StrOpt) Port that the cluster service can be reached on
= http://localhost:8080/midonet-api(StrOpt) MidoNet API server URI.
= passw0rd(StrOpt) MidoNet admin password.
= 77777777-7777-7777-7777-777777777777(StrOpt) ID of the project that MidoNet admin user belongs to.
= vxlan(StrOpt) Tunnel protocol used by Midonet
= admin(StrOpt) MidoNet admin username.
-
diff --git a/doc/common/tables/neutron-ml2.xml b/doc/common/tables/neutron-ml2.xml deleted file mode 100644 index 6c5d4c98c3..0000000000 --- a/doc/common/tables/neutron-ml2.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 configuration options
Configuration option = Default valueDescription
[ml2]
= (ListOpt) An ordered list of extension driver entrypoints to be loaded from the neutron.ml2.extension_drivers namespace.
= None(StrOpt) Default network type for external networks when no provider attributes are specified. By default it is None, which means that if provider attributes are not specified while creating external networks then they will have the same type as tenant networks. Allowed values for external_network_type config option depend on the network type values configured in type_drivers config option.
= (ListOpt) An ordered list of networking mechanism driver entrypoints to be loaded from the neutron.ml2.mechanism_drivers namespace.
= 0(IntOpt) The maximum permissible size of an unfragmented packet travelling from and to addresses where encapsulated Neutron traffic is sent. If <= 0, the path MTU is indeterminate.
= (ListOpt) A list of mappings of physical networks to MTU values. The format of the mapping is <physnet>:<mtu val>. This mapping allows specifying a physical network MTU value that differs from the default segment_mtu value.
= 0(IntOpt) The maximum permissible size of an unfragmented packet travelling a L2 network segment. If <= 0, the segment MTU is indeterminate.
= local(ListOpt) Ordered list of network_types to allocate as tenant networks.
= local, flat, vlan, gre, vxlan, geneve(ListOpt) List of network type driver entrypoints to be loaded from the neutron.ml2.type_drivers namespace.
-
diff --git a/doc/common/tables/neutron-ml2_ale_omniswitch.xml b/doc/common/tables/neutron-ml2_ale_omniswitch.xml deleted file mode 100644 index 87f82dfcb7..0000000000 --- a/doc/common/tables/neutron-ml2_ale_omniswitch.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 OmniSwich mechanism driver configuration options
Configuration option = Default valueDescription
[ml2_ale_omniswitch]
= (StrOpt) No help text available for this option.
= (ListOpt) No help text available for this option.
= (ListOpt) No help text available for this option.
= 600(IntOpt) No help text available for this option.
-
diff --git a/doc/common/tables/neutron-ml2_arista.xml b/doc/common/tables/neutron-ml2_arista.xml deleted file mode 100644 index b5c588b15e..0000000000 --- a/doc/common/tables/neutron-ml2_arista.xml +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 Arista mechanism driver configuration options
Configuration option = Default valueDescription
[ml2_arista]
= (StrOpt) Arista EOS IP address. This is required field. If not set, all communications to Arista EOS will fail.
= (StrOpt) Password for Arista EOS. This is required field. If not set, all communications to Arista EOS will fail.
= (StrOpt) Username for Arista EOS. This is required field. If not set, all communications to Arista EOS will fail.
= RegionOne(StrOpt) Defines Region Name that is assigned to this OpenStack Controller. This is useful when multiple OpenStack/Neutron controllers are managing the same Arista HW clusters. Note that this name must match with the region name registered (or known) to keystone service. Authentication with Keysotne is performed by EOS. This is optional. If not set, a value of "RegionOne" is assumed.
= 180(IntOpt) Sync interval in seconds between Neutron plugin and EOS. This interval defines how often the synchronization is performed. This is an optional field. If not set, a value of 180 seconds is assumed.
= True(BoolOpt) Defines if hostnames are sent to Arista EOS as FQDNs ("node1.domain.com") or as short names ("node1"). This is optional. If not set, a value of "True" is assumed.
-
diff --git a/doc/common/tables/neutron-ml2_bagpipe.xml b/doc/common/tables/neutron-ml2_bagpipe.xml deleted file mode 100644 index abaac12c32..0000000000 --- a/doc/common/tables/neutron-ml2_bagpipe.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 BaGpipe BGP driver configuration options
Configuration option = Default valueDescription
[ml2_bagpipe]
= 64512(IntOpt) Autonomous System number
[ml2_type_route_target]
= 64512(IntOpt) Route Target Autonomous System number.
= (ListOpt) Comma-separated list of <rt_nn_min>:<rt_nn_max> tuples enumerating ranges of Route Target number that are available for tenant network allocation
-
diff --git a/doc/common/tables/neutron-ml2_bigswitch.xml b/doc/common/tables/neutron-ml2_bigswitch.xml deleted file mode 100644 index b0352f9256..0000000000 --- a/doc/common/tables/neutron-ml2_bigswitch.xml +++ /dev/null @@ -1,183 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 BigSwitch mechanism driver configuration options
Configuration option = Default valueDescription
[NOVA]
= (ListOpt) Nova compute nodes to manually set VIF type to 802.1qbg
= (ListOpt) Nova compute nodes to manually set VIF type to 802.1qbh
= (ListOpt) Nova compute nodes to manually set VIF type to binding_failed
= (ListOpt) Nova compute nodes to manually set VIF type to bridge
= (ListOpt) Nova compute nodes to manually set VIF type to distributed
= (ListOpt) Nova compute nodes to manually set VIF type to dvs
= (ListOpt) Nova compute nodes to manually set VIF type to hw_web
= (ListOpt) Nova compute nodes to manually set VIF type to hyperv
= (ListOpt) Nova compute nodes to manually set VIF type to ib_hostdev
= (ListOpt) Nova compute nodes to manually set VIF type to iovisor
= (ListOpt) Nova compute nodes to manually set VIF type to ivs
= (ListOpt) Nova compute nodes to manually set VIF type to midonet
= (ListOpt) Nova compute nodes to manually set VIF type to other
= (ListOpt) Nova compute nodes to manually set VIF type to ovs
= (ListOpt) Nova compute nodes to manually set VIF type to unbound
= (ListOpt) Nova compute nodes to manually set VIF type to vhostuser
= (ListOpt) Nova compute nodes to manually set VIF type to vrouter
= ivs(StrOpt) Virtual interface type to configure on Nova compute nodes
= unbound, binding_failed, distributed, ovs, bridge, other, ivs, iovisor, vhostuser, dvs, 802.1qbg, 802.1qbh, hyperv, midonet, ib_hostdev, hw_web, vrouter(ListOpt) List of allowed vif_type values.
[RESTPROXY]
= True(BoolOpt) Flag to decide if a route to the metadata server should be injected into the VM
= True(BoolOpt) If neutron fails to create a resource because the backend controller doesn't know of a dependency, the plugin automatically triggers a full data synchronization to the controller.
= True(BoolOpt) Re-use HTTP/HTTPS connections to the controller.
= 60(IntOpt) Time between verifications that the backend controller database is consistent with Neutron. (0 to disable)
= neutron-ubuntu1404-master(StrOpt) User defined identifier for this Neutron deployment
= False(BoolOpt) Disables SSL certificate validation for controllers
= None(StrOpt) The username and password for authenticating against the Big Switch or Floodlight controller.
= True(BoolOpt) If True, Use SSL when connecting to the Big Switch or Floodlight controller.
= 10(IntOpt) Maximum number of seconds to wait for proxy request to connect and complete.
= localhost:8800(ListOpt) A comma separated list of Big Switch or Floodlight servers and port numbers. The plugin proxies the requests to the Big Switch/Floodlight server, which performs the networking configuration. Only oneserver is needed per deployment, but you may wish todeploy multiple servers to support failover.
= /etc/neutron/plugins/bigswitch/ssl(StrOpt) Directory containing ca_certs and host_certs certificate directories.
= True(BoolOpt) Trust and store the first certificate received for each controller address and use it to validate future connections to that address.
= False(BoolOpt) Sync data on connect
= 4(IntOpt) Maximum number of threads to spawn to handle large volumes of port creations.
[RESTPROXYAGENT]
= br-int(StrOpt) Name of integration bridge on compute nodes used for security group insertion.
= 5(IntOpt) Seconds between agent checks for port changes
= ivs(StrOpt) Virtual switch type.
[ROUTER]
= 200(IntOpt) Maximum number of router rules
= ['*:any:any:permit'](MultiStrOpt) The default router rules installed in new tenant routers. Repeat the config option for each rule. Format is <tenant>:<source>:<destination>:<action> Use an * to specify default for all tenants.
-
diff --git a/doc/common/tables/neutron-ml2_brocade.xml b/doc/common/tables/neutron-ml2_brocade.xml deleted file mode 100644 index 8901fd7fea..0000000000 --- a/doc/common/tables/neutron-ml2_brocade.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 Brocade mechanism driver configuration options
Configuration option = Default valueDescription
[ML2_BROCADE_MLX_EXAMPLE]
= (StrOpt) The address of the host to SSH to
= NI(StrOpt) OS type of the device.
= password(StrOpt) The SSH password to use
= (StrOpt) Allowed physical networks
= (StrOpt) Ports
= SSH(StrOpt) Protocol used to communicate with Switch
= admin(StrOpt) The SSH username to use
[ml2_brocade]
= (StrOpt) The address of the host to SSH to
= NOS(StrOpt) OS Type of the switch
= 4.0.0(StrOpt) OS Version number
= password(StrOpt) The SSH password to use
= (StrOpt) Allowed physical networks
= 1(StrOpt) Rbridge id of provider edge router(s)
= admin(StrOpt) The SSH username to use
-
diff --git a/doc/common/tables/neutron-ml2_brocade_fi_ni.xml b/doc/common/tables/neutron-ml2_brocade_fi_ni.xml deleted file mode 100644 index f90ec6027f..0000000000 --- a/doc/common/tables/neutron-ml2_brocade_fi_ni.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 Brocade MLX ICX mechanism driver configuration options
Configuration option = Default valueDescription
[ml2_brocade_fi_ni]
= (StrOpt) Switches connected to the compute nodes
-
diff --git a/doc/common/tables/neutron-ml2_cisco.xml b/doc/common/tables/neutron-ml2_cisco.xml deleted file mode 100644 index b3e3347a43..0000000000 --- a/doc/common/tables/neutron-ml2_cisco.xml +++ /dev/null @@ -1,225 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 Cisco mechanism driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) Prefix for APIC domain/names/profiles created
[ml2_cisco]
= False(BoolOpt) Enable strict host key checks when connecting to Nexus switches
= None(StrOpt) The physical network managed by the switches.
= False(BoolOpt) Prevent caching ssh connections to Nexus device
= False(BoolOpt) To make Nexus configuration persistent
= True(BoolOpt) Provider VLANs are automatically created as needed on the Nexus switch
= True(BoolOpt) Provider VLANs are automatically trunked as needed on the ports of the Nexus switch
= p-(StrOpt) VLAN Name prefix for provider vlans
= False(BoolOpt) Distribute SVI interfaces over all switches
= 0(IntOpt) Periodic time to check switch connection. (0=disabled)
= q-(StrOpt) VLAN Name prefix
= False(BoolOpt) Create and delete Nexus switch VXLAN global settings; feature nv overlay, feature vn-segment-vlan-based, interface nve + source-interface loopback
[ml2_cisco_apic]
= 2(FloatOpt) Interval between agent poll for topology (in sec)
= 30(FloatOpt) Interval between agent status updates (in sec)
= ${apic_system_id}_app(StrOpt) Name for the app profile used for Openstack
= ${apic_system_id}(StrOpt) Name for the domain created on APIC
= ${apic_system_id}_entity_profile(StrOpt) Name of the entity profile to be created
= ${apic_system_id}_function_profile(StrOpt) Name of the function profile to be created
= (ListOpt) The uplink ports to check for ACI connectivity
= (ListOpt) An ordered list of host names or IP addresses of the APIC controller(s).
= ${apic_system_id}_lacp_profile(StrOpt) Name of the LACP profile to be created
= use_name(StrOpt) Name mapping strategy to use: use_uuid | use_name
= ${apic_system_id}_node_profile(StrOpt) Name of the node profile to be created
= None(StrOpt) Password for the APIC controller
= 0(IntOpt) Synchronization interval in seconds
= True(BoolOpt) Use SSL to connect to the APIC controller
= None(StrOpt) Username for the APIC controller
= ${apic_system_id}_vlan_ns(StrOpt) Name for the vlan namespace to be used for Openstack
= 2:4093(StrOpt) Range of VLAN's to be used for Openstack
= (ListOpt) The switch pairs for VPC connectivity
[ml2_cisco_n1kv]
= default-pp(StrOpt) Cisco Nexus1000V default policy profile.
= 4(IntOpt) Number of threads to use to make HTTP requests.
= 15(IntOpt) HTTP timeout, in seconds, for connections to the Cisco Nexus1000V VSMs.
= None(ListOpt) Comma Separated IP Addresses of the Cisco Nexus1000V VSMs.
= None(StrOpt) Password for all configured Cisco Nexus1000V VSMs.
= 60(IntOpt) Cisco Nexus1000V policy profile polling duration in seconds.
= False(BoolOpt) Restrict the visibility of network profiles to the tenants.
= False(BoolOpt) Restrict the visibility of policy profiles to the tenants.
= 300(IntOpt) Time interval between consecutive neutron-VSM syncs.
= None(StrOpt) Username for all configured Cisco Nexus1000V VSMs.
[ml2_cisco_ucsm]
= 1137:0071, 8086:10c9(ListOpt) List of comma separated vendor_id:product_id of SR_IOV capable devices supported by this MD. This MD supports both VM-FEX and SR-IOV devices.
= None(ListOpt) List of comma separated Host:Service Profile tuples providing the Service Profile associated with each Host to be supported by this MD.
= None(StrOpt) Cisco UCS Manager IP address. This is a required field to communicate with a Cisco UCS Manager.
= None(StrOpt) Password for UCS Manager. This is a required field to communicate with a Cisco UCS Manager.
= None(StrOpt) Username for UCS Manager. This is a required field to communicate with a Cisco UCS Manager.
[ml2_type_nexus_vxlan]
= (ListOpt) List of multicast groups to be used for global VNIDsin the format - a:b,c,e:f.
= (ListOpt) List of global VNID ranges in the format - a:b, c:d.Multiple ranges can be separated by a comma
-
diff --git a/doc/common/tables/neutron-ml2_flat.xml b/doc/common/tables/neutron-ml2_flat.xml deleted file mode 100644 index cda362cb67..0000000000 --- a/doc/common/tables/neutron-ml2_flat.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 Flat mechanism driver configuration options
Configuration option = Default valueDescription
[ml2_type_flat]
= (ListOpt) List of physical_network names with which flat networks can be created. Use * to allow flat networks with arbitrary physical_network names.
-
diff --git a/doc/common/tables/neutron-ml2_fslsdn.xml b/doc/common/tables/neutron-ml2_fslsdn.xml deleted file mode 100644 index 1afcc3a9d2..0000000000 --- a/doc/common/tables/neutron-ml2_fslsdn.xml +++ /dev/null @@ -1,62 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 Freescale SDN mechanism driver configuration options
Configuration option = Default valueDescription
[ml2_fslsdn]
= False(BoolOpt) If set, ignore any SSL validation issues.
= keystone(StrOpt) Auth strategy for connecting to neutron in admin context.
= http://127.0.0.1:5000/v2.0/(StrOpt) CRD Auth URL.
= None(StrOpt) Location of ca certificates file to use for CRD client requests.
= password(StrOpt) CRD Service Password.
= RegionOne(StrOpt) Region name for connecting to CRD Service in admin context.
= service(StrOpt) CRD Tenant Name.
= http://127.0.0.1:9797(StrOpt) URL for connecting to CRD service.
= 30(IntOpt) Timeout value for connecting to CRD service in seconds.
= crd(StrOpt) CRD service Username.
-
diff --git a/doc/common/tables/neutron-ml2_geneve.xml b/doc/common/tables/neutron-ml2_geneve.xml deleted file mode 100644 index d9db359100..0000000000 --- a/doc/common/tables/neutron-ml2_geneve.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 Geneve type driver configuration options
Configuration option = Default valueDescription
[ml2_type_geneve]
= 50(IntOpt) Geneve encapsulation header size is dynamic, this value is used to calculate the maximum MTU for the driver.this is the sum of the sizes of the outer ETH + IP + UDP + GENEVE header sizes
= (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of Geneve VNI IDs that are available for tenant network allocation
-
diff --git a/doc/common/tables/neutron-ml2_gre.xml b/doc/common/tables/neutron-ml2_gre.xml deleted file mode 100644 index fcf4d6a702..0000000000 --- a/doc/common/tables/neutron-ml2_gre.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 GRE configuration options
Configuration option = Default valueDescription
[ml2_type_gre]
= (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-
diff --git a/doc/common/tables/neutron-ml2_l2pop.xml b/doc/common/tables/neutron-ml2_l2pop.xml deleted file mode 100644 index ee19586328..0000000000 --- a/doc/common/tables/neutron-ml2_l2pop.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 L2 population configuration options
Configuration option = Default valueDescription
[l2pop]
= 180(IntOpt) Delay within which agent is expected to update existing ports whent it restarts
-
diff --git a/doc/common/tables/neutron-ml2_mlnx.xml b/doc/common/tables/neutron-ml2_mlnx.xml deleted file mode 100644 index 39b1620da0..0000000000 --- a/doc/common/tables/neutron-ml2_mlnx.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Mellanox ML2 mechanism driver configuration options
Configuration option = Default valueDescription
[ESWITCH]
= 2(IntOpt) backoff rate multiplier for waiting period between retries for request to daemon, i.e. value of 2 will double the request timeout each retry
= tcp://127.0.0.1:60001(StrOpt) eswitch daemon end point
= (ListOpt) List of <physical_network>:<physical_interface>
= 3000(IntOpt) The number of milliseconds the agent will wait for response on request to daemon.
= 3(IntOpt) The number of retries the agent will send request to daemon before giving up
-
diff --git a/doc/common/tables/neutron-ml2_ncs.xml b/doc/common/tables/neutron-ml2_ncs.xml deleted file mode 100644 index c2fc92c271..0000000000 --- a/doc/common/tables/neutron-ml2_ncs.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 NCS mechanism driver configuration options
Configuration option = Default valueDescription
[ml2_ncs]
= None(StrOpt) HTTP password for authentication
= 10(IntOpt) HTTP timeout in seconds.
= None(StrOpt) HTTP URL of Tail-f NCS REST interface.
= None(StrOpt) HTTP username for authentication
-
diff --git a/doc/common/tables/neutron-ml2_odl.xml b/doc/common/tables/neutron-ml2_odl.xml deleted file mode 100644 index 9dc2d22432..0000000000 --- a/doc/common/tables/neutron-ml2_odl.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 OpenDaylight mechanism driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Enable eventlet backdoor. Acceptable values are 0, <port>, and <start>:<end>, where 0 results in listening on a random tcp port number; <port> results in listening on the specified port number (and not enabling backdoor if that port is in use); and <start>:<end> results in listening on the smallest unused port number within the specified range of port numbers. The chosen port is displayed in the service's log file.
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
= True(BoolOpt) Some periodic tasks can be run in a separate process. Should we run them here?
[ml2_odl]
= None(StrOpt) HTTP password for authentication
= 30(IntOpt) Tomcat session timeout in minutes.
= 10(IntOpt) HTTP timeout in seconds.
= None(StrOpt) HTTP URL of OpenDaylight REST interface.
= None(StrOpt) HTTP username for authentication
-
diff --git a/doc/common/tables/neutron-ml2_ofa.xml b/doc/common/tables/neutron-ml2_ofa.xml deleted file mode 100644 index fe2a7a9656..0000000000 --- a/doc/common/tables/neutron-ml2_ofa.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 ofagent mechanism driver configuration options
Configuration option = Default valueDescription
[AGENT]
= True(BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/VXLAN tunnel.
= 60(IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath
-
diff --git a/doc/common/tables/neutron-ml2_sriov.xml b/doc/common/tables/neutron-ml2_sriov.xml deleted file mode 100644 index e8903bac47..0000000000 --- a/doc/common/tables/neutron-ml2_sriov.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 ML2 SR-IOV driver configuration options
Configuration option = Default valueDescription
[ml2_sriov]
= True(BoolOpt) SRIOV neutron agent is required for port binding. DEPRECATED: This option is deprecated in the Liberty release and will be removed in the Mitaka release. From Mitaka the agent will always be required.
= 15b3:1004, 8086:10ca(ListOpt) Supported PCI vendor devices, defined by vendor_id:product_id according to the PCI ID Repository. Default enables support for Intel and Mellanox SR-IOV capable NICs
-
diff --git a/doc/common/tables/neutron-ml2_vlan.xml b/doc/common/tables/neutron-ml2_vlan.xml deleted file mode 100644 index a31de841dd..0000000000 --- a/doc/common/tables/neutron-ml2_vlan.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 VLAN configuration options
Configuration option = Default valueDescription
[ml2_type_vlan]
= (ListOpt) List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> specifying physical_network names usable for VLAN provider and tenant networks, as well as ranges of VLAN tags on each available for allocation to tenant networks.
-
diff --git a/doc/common/tables/neutron-ml2_vxlan.xml b/doc/common/tables/neutron-ml2_vxlan.xml deleted file mode 100644 index ae6e19c392..0000000000 --- a/doc/common/tables/neutron-ml2_vxlan.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ML2 VXLN configuration options
Configuration option = Default valueDescription
[ml2_type_vxlan]
= (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation
= None(StrOpt) Multicast group for VXLAN. If unset, disables VXLAN multicast mode.
-
diff --git a/doc/common/tables/neutron-nec.xml b/doc/common/tables/neutron-nec.xml deleted file mode 100644 index b107c2546a..0000000000 --- a/doc/common/tables/neutron-nec.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Nec configuration options
Configuration option = Default valueDescription
[OFC]
= 3(IntOpt) Maximum attempts per OFC API request. NEC plugin retries API request to OFC when OFC returns ServiceUnavailable (503). The value must be greater than 0.
= None(StrOpt) Location of certificate file.
= trema(StrOpt) Driver to use.
= True(BoolOpt) Enable packet filter.
= 127.0.0.1(StrOpt) Host to connect to.
= False(BoolOpt) Disable SSL certificate verification.
= None(StrOpt) Location of key file.
= (StrOpt) Base URL of OFC REST API. It is prepended to each API request.
= 8888(StrOpt) Port to connect to.
= True(BoolOpt) Support packet filter on OFC router interface.
= False(BoolOpt) Use SSL to connect.
[PROVIDER]
= l3-agent(StrOpt) Default router provider to use.
= l3-agent, openflow(ListOpt) List of enabled router providers.
[fwaas]
= (StrOpt) Name of the FWaaS Driver
-
diff --git a/doc/common/tables/neutron-nova.xml b/doc/common/tables/neutron-nova.xml deleted file mode 100644 index 2629d7503e..0000000000 --- a/doc/common/tables/neutron-nova.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of nova configuration options
Configuration option = Default valueDescription
[nova]
= None(StrOpt) Name of the plugin to load
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) PEM encoded Certificate Authority to use when verifying HTTPs connections.
= None(StrOpt) PEM encoded client certificate cert file
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) PEM encoded client certificate key file
= None(StrOpt) Name of nova region to use. Useful if keystone manages more than one region.
= None(IntOpt) Timeout value for http requests
-
diff --git a/doc/common/tables/neutron-nsx.xml b/doc/common/tables/neutron-nsx.xml deleted file mode 100644 index 2a2078f745..0000000000 --- a/doc/common/tables/neutron-nsx.xml +++ /dev/null @@ -1,343 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VMware NSX configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 900(IntOpt) Reconnect connection to nsx if not used within this amount of time.
= None(StrOpt) Unique identifier of the Service Cluster which will be used by logical services like dhcp and metadata
= None(StrOpt) This is uuid of the default NSX Transport zone that will be used for creating tunneled isolated "Neutron" networks. It needs to be created in NSX before starting Neutron with the nsx plugin.
= 75(IntOpt) Time before aborting a request
= None(ListOpt) Lists the NSX controllers in this cluster
= breth0(StrOpt) Name of the interface on a L2 Gateway transport nodewhich should be used by default when setting up a network connection
= None(StrOpt) Class path for the L2 gateway backend driver
= admin(StrOpt) Password for NSX controllers in this cluster
= admin(StrOpt) User name for NSX controllers in this cluster
= 2(IntOpt) Number of times a redirect should be followed
= 2(IntOpt) Number of time a request should be retried
[NSX]
= agent(StrOpt) The mode used to implement DHCP/metadata services.
= 10(IntOpt) Maximum concurrent connections to each NSX controller.
= stt(StrOpt) The default network tranport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
= 5000(IntOpt) Maximum number of ports of a logical switch on a bridged transport zone (default 5000)
= 256(IntOpt) Maximum number of ports of a logical switch on an overlay transport zone (default 256)
= access_network(StrOpt) If set to access_network this enables a dedicated connection to the metadata proxy for metadata server access via Neutron router. If set to dhcp_host_route this enables host route injection via the dhcp agent. This option is only useful if running on a host that does not support namespaces otherwise access_network should be used.
= -1(IntOpt) Number of seconds a generation id should be valid for (default -1 meaning do not time out)
= service(StrOpt) The default option leverages service nodes to perform packet replication though one could set to this to 'source' to perform replication locally. This is useful if one does not want to deploy a service node(s). It must be set to 'service' for leveraging distributed routers.
[NSX_DHCP]
= 43200(IntOpt) Default DHCP lease time
= openstacklocal(StrOpt) Domain to use for building the hostnames
= (ListOpt) Comma separated list of additional domain name servers
[NSX_LSN]
= False(BoolOpt) Pull LSN information from NSX in case it is missing from the local data store. This is useful to rebuild the local store in case of server recovery.
[NSX_METADATA]
= 127.0.0.1(StrOpt) IP address used by Metadata server.
= 8775(IntOpt) TCP Port used by Metadata server.
= (StrOpt) Shared secret to sign instance-id request
[NSX_SYNC]
= False(BoolOpt) Always read operational status from backend on show operations. Enabling this option might slow down the system.
= 0(IntOpt) Maximum value for the additional random delay in seconds between runs of the state synchronization task
= 500(IntOpt) Minimum number of resources to be retrieved from NSX during state synchronization
= 1(IntOpt) Minimum delay, in seconds, between two state synchronization queries to NSX. It must not exceed state_sync_interval
= 10(IntOpt) Interval in seconds between runs of the state synchronization task. Set it to 0 to disable it
[nsx_v3]
= None(StrOpt) Specify a CA bundle file to use in verifying the NSX Manager server certificate.
= None(StrOpt) Default bridge cluster identifier for L2 gateway. This needs to be created in NSX before using the L2 gateway service plugin.
= None(StrOpt) Default edge cluster identifier
= None(StrOpt) This is the UUID of the default NSX overlay transport zone that will be used for creating tunneled isolated Neutron networks. It needs to be created in NSX before starting Neutron with the NSX plugin.
= None(StrOpt) Default tier0 router identifier
= None(StrOpt) This is the UUID of the default NSX VLAN transport zone that will be used for bridging between Neutron networks. It needs to be created in NSX before starting Neutron with the NSX plugin.
= True(BoolOpt) If true, the NSX Manager server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "ca_file" is set.
= None(StrOpt) IP address of the NSX manager
= default(StrOpt) Password for the NSX manager
= admin(StrOpt) User name for the NSX manager
= 10(IntOpt) Maximum number of times to retry API request
[nsxv]
= service:large:4:10, service:compact:4:10, vdr:large:4:10(ListOpt) Defines edge pool using the format: <edge_type>:[edge_size]:<min_edges>:<max_edges>.edge_type: service,vdr. edge_size: compact, large, xlarge, quadlarge and default is large.
= None(StrOpt) Specify a CA bundle file to use in verifying the NSXv server certificate.
= (ListOpt) Parameter listing the IDs of the clusters which are used by OpenStack.
= None(StrOpt) Optional parameter identifying the ID of datacenter to deploy NSX Edges
= None(StrOpt) Optional parameter identifying the ID of datastore to deploy NSX Edges
= None(StrOpt) Optional parameter identifying the ID of datastore to deploy NSX Edges
= 86400(IntOpt) DHCP default lease time.
= None(StrOpt) DVS ID for VLANs
= None(StrOpt) Password to configure for Edge appliance login
= None(StrOpt) Username to configure for Edge appliance login
= False(BoolOpt) Enable HA for NSX Edges
= compact(StrOpt) Edge appliance size to be used for creating exclusive router. Valid values: ['compact', 'large', 'xlarge', 'quadlarge']. This edge_appliance_size will be picked up if --router-size parameter is not specified while doing neutron router-create
= None(StrOpt) Network ID for physical network connectivity
= True(BoolOpt) If true, the NSXv server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "ca_file" is set.
= None(StrOpt) A URL to a locking mechanism coordinator
= None(StrOpt) uri for vsm
= 20(IntOpt) Maximum number of sub interfaces supported per vnic in edge.
= True(BoolOpt) If True, the server instance will attempt to initialize the metadata infrastructure
= None(StrOpt) Shared secret to sign metadata requests
= None(StrOpt) Management network default gateway for metadata proxy
= None(StrOpt) Network ID for management network connectivity
= None(ListOpt) Management network IP address for metadata proxy
= None(StrOpt) Management network netmask for metadata proxy
= None(ListOpt) IP addresses used by Nova metadata service
= 8775(IntOpt) TCP Port used by Nova metadata server
= default(StrOpt) Password for vsm
= None(StrOpt) Optional parameter identifying the ID of resource to deploy NSX Edges
= 10(IntOpt) Maximum number of API retries on endpoint.
= True(BoolOpt) If True then plugin will use NSXV spoofguard component for port-security feature.
= 2000(IntOpt) Task status check interval
= shared, distributed, exclusive(ListOpt) Ordered list of router_types to allocate as tenant routers.
= admin(StrOpt) User name for vsm
= None(StrOpt) Network scope ID for VXLAN virtual wires
-
diff --git a/doc/common/tables/neutron-nvsd.xml b/doc/common/tables/neutron-nvsd.xml deleted file mode 100644 index dca0235e9a..0000000000 --- a/doc/common/tables/neutron-nvsd.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of NVSD driver configuration options
Configuration option = Default valueDescription
[AGENT]
= br-int(StrOpt) integration bridge
[nvsd]
= 127.0.0.1(StrOpt) NVSD Controller IP address
= oc123(StrOpt) NVSD Controller password
= 8082(IntOpt) NVSD Controller Port number
= 0(IntOpt) Number of login retries to NVSD controller
= ocplugin(StrOpt) NVSD Controller username
= 30(IntOpt) NVSD controller REST API request timeout in seconds
-
diff --git a/doc/common/tables/neutron-onos.xml b/doc/common/tables/neutron-onos.xml deleted file mode 100644 index 999a2a4bbb..0000000000 --- a/doc/common/tables/neutron-onos.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Open Networking Operating System (ONOS) configuration options
Configuration option = Default valueDescription
[onos]
= (StrOpt) Password for authentication.
= (StrOpt) ONOS ReST interface URL
= (StrOpt) Username for authentication.
-
diff --git a/doc/common/tables/neutron-opencontrail.xml b/doc/common/tables/neutron-opencontrail.xml deleted file mode 100644 index f0165efa0e..0000000000 --- a/doc/common/tables/neutron-opencontrail.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of OpenContrail configuration options
Configuration option = Default valueDescription
[CONTRAIL]
= 127.0.0.1(StrOpt) IP address to connect to opencontrail controller
= 8082(IntOpt) Port to connect to opencontrail controller
-
diff --git a/doc/common/tables/neutron-openvswitch_agent.xml b/doc/common/tables/neutron-openvswitch_agent.xml deleted file mode 100644 index 3e6d97dba5..0000000000 --- a/doc/common/tables/neutron-openvswitch_agent.xml +++ /dev/null @@ -1,152 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Open vSwitch agent configuration options
Configuration option = Default valueDescription
[DEFAULT]
= br-int(StrOpt) Name of Open vSwitch bridge to use
= False(BoolOpt) Uses veth for an interface or not
= 10(IntOpt) Timeout in seconds for ovs-vsctl commands
[AGENT]
= False(BoolOpt) Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 l2population driver. Allows the switch (when supporting an overlay) to respond to an ARP request locally without performing a costly ARP broadcast into the overlay.
= True(BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/VXLAN tunnel.
= False(BoolOpt) Reset flow table on start. Setting this to True will cause brief traffic interruption.
= False(BoolOpt) Make the l2 agent run in DVR mode.
= False(BoolOpt) Use ML2 l2population mechanism driver to learn remote MAC and IPs and improve tunnel scalability.
= True(BoolOpt) Minimize polling by monitoring ovsdb for interface changes.
= 30(IntOpt) The number of seconds to wait before respawning the ovsdb monitor after losing communication with it.
= True(BoolOpt) Enable suppression of ARP responses that don't match an IP address that belongs to the port from which they originate. Note: This prevents the VMs attached to this agent from spoofing, it doesn't protect them from other devices which have the capability to spoof (e.g. bare metal or VMs attached to agents without this flag set to True). Spoofing rules will not be added to any ports that have port security disabled. For LinuxBridge, this requires ebtables. For OVS, it requires a version that supports matching ARP headers.
= 10(IntOpt) Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If value is set to 0, rpc timeout won't be changed
= False(BoolOpt) Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/VXLAN tunnel.
= (ListOpt) Network types supported by the agent (gre and/or vxlan).
= None(IntOpt) MTU size of veth interfaces
= 4789(IntOpt) The UDP port to use for VXLAN tunnels.
[OVS]
= (ListOpt) List of <physical_network>:<bridge>. Deprecated for ofagent.
= system(StrOpt) OVS datapath to use.
= patch-tun(StrOpt) Peer patch port in integration bridge for tunnel bridge.
= br-int(StrOpt) Integration bridge to use.
= None(IPOpt) Local IP address of tunnel endpoint.
= 30(IntOpt) Timeout in seconds to wait for the local switch connecting the controller. Used only for 'native' driver.
= ovs-ofctl(StrOpt) OpenFlow interface to use.
= 127.0.0.1(IPOpt) Address to listen on for OpenFlow connections. Used only for 'native' driver.
= 6633(IntOpt) Port to listen on for OpenFlow connections. Used only for 'native' driver.
= 10(IntOpt) Timeout in seconds to wait for a single OpenFlow request. Used only for 'native' driver.
= tcp:127.0.0.1:6640(StrOpt) The connection string for the native OVSDB backend
= vsctl(StrOpt) The interface for interacting with the OVSDB
= patch-int(StrOpt) Peer patch port in tunnel bridge for integration bridge.
= br-tun(StrOpt) Tunnel bridge to use.
= False(BoolOpt) Use veths instead of patch ports to interconnect the integration bridge to physical bridges.
-
diff --git a/doc/common/tables/neutron-ovn.xml b/doc/common/tables/neutron-ovn.xml deleted file mode 100644 index acfb7ed4ac..0000000000 --- a/doc/common/tables/neutron-ovn.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Virtual Network for Open vSwitch configuration options
Configuration option = Default valueDescription
[ovn]
= log(StrOpt) The synchronization mode of OVN with Neutron DB. -off - synchronization is off -log - during neutron-server startup, check to see if OVN is in sync with the Neutron database. Log warnings for any inconsistencies found so that an admin can investigate -repair - during neutron-server startup, automatically create resources found in Neutron but not in OVN. Also remove resources from OVN that are no longer in Neutron.
= tcp:127.0.0.1:6640(StrOpt) The connection string for the native OVSDB backend
= 60(IntOpt) Timeout in seconds for the OVSDB connection transaction
-
diff --git a/doc/common/tables/neutron-pd_linux_agent.xml b/doc/common/tables/neutron-pd_linux_agent.xml deleted file mode 100644 index 5f7f1407e2..0000000000 --- a/doc/common/tables/neutron-pd_linux_agent.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IPv6 Prefix Delegation driver configuration options
Configuration option = Default valueDescription
[DEFAULT]
= $state_path/pd(StrOpt) Location to store IPv6 PD files.
= dibbler(StrOpt) Service to handle DHCPv6 Prefix delegation.
= 8888(StrOpt) A decimal value as Vendor's Registered Private Enterprise Number as required by RFC3315 DUID-EN.
-
diff --git a/doc/common/tables/neutron-plumgrid.xml b/doc/common/tables/neutron-plumgrid.xml deleted file mode 100644 index d41de11ac3..0000000000 --- a/doc/common/tables/neutron-plumgrid.xml +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of PLUMgrid configuration options
Configuration option = Default valueDescription
[plumgriddirector]
= localhost(StrOpt) PLUMgrid Director server to connect to
= 8080(IntOpt) PLUMgrid Director server port to connect to
= True(BoolOpt) Distributed locking is enabled or disabled
= networking_plumgrid.neutron.plugins.drivers.plumlib.Plumlib(StrOpt) PLUMgrid Driver
= password(StrOpt) PLUMgrid Director admin password
= 5(IntOpt) PLUMgrid Director server timeout
= username(StrOpt) PLUMgrid Director admin username
-
diff --git a/doc/common/tables/neutron-policy.xml b/doc/common/tables/neutron-policy.xml deleted file mode 100644 index 761238240f..0000000000 --- a/doc/common/tables/neutron-policy.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of policy configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Allow overlapping IP support in Neutron
-
diff --git a/doc/common/tables/neutron-qpid.xml b/doc/common/tables/neutron-qpid.xml deleted file mode 100644 index 7f867ac041..0000000000 --- a/doc/common/tables/neutron-qpid.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/neutron-quotas.xml b/doc/common/tables/neutron-quotas.xml deleted file mode 100644 index 40e152398d..0000000000 --- a/doc/common/tables/neutron-quotas.xml +++ /dev/null @@ -1,125 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of quotas configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 30(IntOpt) Maximum number of routes
[QUOTAS]
= -1(IntOpt) Default number of resource allowed per tenant. A negative value means unlimited.
= neutron.db.quota.driver.DbQuotaDriver(StrOpt) Default driver to use for quota checks
= 1(IntOpt) Number of firewalls allowed per tenant. A negative value means unlimited.
= 1(IntOpt) Number of firewall policies allowed per tenant. A negative value means unlimited.
= 100(IntOpt) Number of firewall rules allowed per tenant. A negative value means unlimited.
= 50(IntOpt) Number of floating IPs allowed per tenant. A negative value means unlimited.
= -1(IntOpt) Number of health monitors allowed per tenant. A negative value means unlimited.
= -1(IntOpt) Number of health monitors allowed per tenant. A negative value means unlimited.
= network, subnet, port(ListOpt) Resource name(s) that are supported in quota features. This option is now deprecated for removal.
= -1(IntOpt) Number of Loadbalancer Listeners allowed per tenant. A negative value means unlimited.
= 10(IntOpt) Number of LoadBalancers allowed per tenant. A negative value means unlimited.
= -1(IntOpt) Number of pool members allowed per tenant. A negative value means unlimited.
= 10(IntOpt) Number of networks allowed per tenant. A negative value means unlimited.
= 5(IntOpt) Number of network gateways allowed per tenant, -1 for unlimited
= 100(IntOpt) Number of packet_filters allowed per tenant, -1 for unlimited
= 10(IntOpt) Number of pools allowed per tenant. A negative value means unlimited.
= 50(IntOpt) Number of ports allowed per tenant. A negative value means unlimited.
= 10(IntOpt) Default number of RBAC entries allowed per tenant. A negative value means unlimited.
= 10(IntOpt) Number of routers allowed per tenant. A negative value means unlimited.
= 10(IntOpt) Number of security groups allowed per tenant. A negative value means unlimited.
= 100(IntOpt) Number of security rules allowed per tenant. A negative value means unlimited.
= 10(IntOpt) Number of subnets allowed per tenant, A negative value means unlimited.
= 10(IntOpt) Number of vips allowed per tenant. A negative value means unlimited.
= True(BoolOpt) Keep in track in the database of current resourcequota usage. Plugins which do not leverage the neutron database should set this flag to False
-
diff --git a/doc/common/tables/neutron-rabbitmq.xml b/doc/common/tables/neutron-rabbitmq.xml deleted file mode 100644 index 13bf091743..0000000000 --- a/doc/common/tables/neutron-rabbitmq.xml +++ /dev/null @@ -1,118 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/neutron-redis.xml b/doc/common/tables/neutron-redis.xml deleted file mode 100644 index 63ee38c628..0000000000 --- a/doc/common/tables/neutron-redis.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= None(StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_ring]
= /etc/oslo/matchmaker_ring.json(StrOpt) Matchmaker ring file (JSON).
-
diff --git a/doc/common/tables/neutron-rpc.xml b/doc/common/tables/neutron-rpc.xml deleted file mode 100644 index d855a0fc1e..0000000000 --- a/doc/common/tables/neutron-rpc.xml +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 300(IntOpt) Heartbeat frequency.
= 600(IntOpt) Heartbeat time-to-live.
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 60(IntOpt) Seconds to wait for a response from a call.
= 1(IntOpt) Number of RPC worker processes for service
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
-
diff --git a/doc/common/tables/neutron-scheduler.xml b/doc/common/tables/neutron-scheduler.xml deleted file mode 100644 index 34a3072b61..0000000000 --- a/doc/common/tables/neutron-scheduler.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of scheduler configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Allow auto scheduling networks to DHCP agent.
= neutron.scheduler.dhcp_agent_scheduler.WeightScheduler(StrOpt) Driver to use for scheduling network to DHCP agent
= True(BoolOpt) Allow auto scheduling of routers to L3 agent.
= True(BoolOpt) Delete namespace after removing a router.This option is deprecated and will be removed in a future release.
= neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler(StrOpt) Driver to use for scheduling router to a default L3 agent
-
diff --git a/doc/common/tables/neutron-sdnve.xml b/doc/common/tables/neutron-sdnve.xml deleted file mode 100644 index da36d41e55..0000000000 --- a/doc/common/tables/neutron-sdnve.xml +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SDN-VE configuration options
Configuration option = Default valueDescription
[SDNVE]
= /one/nb/v2/(StrOpt) Base URL for SDN-VE controller REST API.
= 127.0.0.1(ListOpt) List of IP addresses of SDN-VE controller(s).
= OVERLAY(StrOpt) Tenant type: OVERLAY (default) or OF.
= json(StrOpt) SDN-VE request/response format.
= sdnve_info_string(StrOpt) SDN-VE RPC subject.
= None(StrOpt) Integration bridge to use.
= (ListOpt) List of <physical_network_name>:<interface_name> mappings.
= SDNVE-OF(StrOpt) The string in tenant description that indicates the tenant is a OF tenant.
= True(BoolOpt) Indicating if controller is out of band or not.
= SDNVE-OVERLAY(StrOpt) The string in tenant description that indicates the tenant is a OVERLAY tenant.
= admin(StrOpt) SDN-VE administrator password.
= 8443(StrOpt) SDN-VE controller port number.
= True(BoolOpt) Whether to reset the integration bridge before use.
= False(BoolOpt) Whether to use a fake controller.
= admin(StrOpt) SDN-VE administrator user ID.
[SDNVE_AGENT]
= 2(IntOpt) Agent polling interval if necessary.
= True(BoolOpt) Whether to use rpc.
-
diff --git a/doc/common/tables/neutron-securitygroups.xml b/doc/common/tables/neutron-securitygroups.xml deleted file mode 100644 index bc0aa8999a..0000000000 --- a/doc/common/tables/neutron-securitygroups.xml +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of security groups configuration options
Configuration option = Default valueDescription
[SECURITYGROUP]
= True(BoolOpt) Enable defer_apply on security bridge.
= True(BoolOpt) Use ipset to speed-up the iptables based security groups.
= True(BoolOpt) Controls whether the neutron security group API is enabled in the server. It should be false when using no security groups or using the nova security group API.
= None(StrOpt) Driver for security groups firewall in the L2 agent
= networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver(StrOpt) DriverManager implementation for OVS based Firewall.
= br-sec(StrOpt) <security_bridge>:<phy_interface>
-
diff --git a/doc/common/tables/neutron-sriov.xml b/doc/common/tables/neutron-sriov.xml deleted file mode 100644 index 52a5fccf56..0000000000 --- a/doc/common/tables/neutron-sriov.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SR-IOV configuration options
Configuration option = Default valueDescription
[SRIOV_NIC]
= (ListOpt) List of <network_device>:<excluded_devices> mapping network_device to the agent's node-specific list of virtual functions that should not be used for virtual networking. excluded_devices is a semicolon separated list of virtual functions (BDF format).to exclude from network_device. The network_device in the mapping should appear in the physical_device_mappings list.
= (ListOpt) List of <physical_network>:<network_device> mapping physical network names to the agent's node-specific physical network device of SR-IOV physical function to be used for VLAN networks. All physical networks listed in network_vlan_ranges on the server should have mappings to appropriate interfaces on each agent
-
diff --git a/doc/common/tables/neutron-vmware.xml b/doc/common/tables/neutron-vmware.xml deleted file mode 100644 index 0225bc24f2..0000000000 --- a/doc/common/tables/neutron-vmware.xml +++ /dev/null @@ -1,163 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VMware configuration options
Configuration option = Default valueDescription
[DEFAULT]
= FortyGigE1/0/1(StrOpt) default_interface_name of the l2 gateway
[OVSVAPP]
= networking_vsphere.agent.ovsvapp_agent.OVSvAppL2Agent(StrOpt) OVSvApp Agent implementation.
= (ListOpt) Bridge mappings.
= True(IntOpt) Do not fragment.
= True(BoolOpt) To monitor the OVSvApp Agents.
= br-int(StrOpt) Integration Bridge.
= (StrOpt) Local IP address of VXLAN tunnel endpoint.
= (StrOpt) IP address for monitoring OVS Status.
= networking_vsphere.drivers.manager.VcenterManager(StrOpt) Driver Manager implementation for NetworkDriver.
= 2(IntOpt) The number of seconds the agent will wait between polling for local device changes.
= 30(IntOpt) Seconds between nodes reporting state to server.
= vlan(StrOpt) Network type for tenant networks
= br-tun(StrOpt) Tunnel Bridge for tunneling.
= False(BoolOpt) Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/VXLAN tunnel.
= vxlan(ListOpt) Tunnel network types supported by the OVSvApp Agent.
= 1500(IntOpt) MTU size of veth interfaces.
= 4789(IntOpt) The UDP port to use for VXLAN tunnels.
[VMWARE]
= False(BoolOpt) Enable SSL certificate check for vCenter.
= None(StrOpt) Certificate chain path containing cacert of vCenters.
= [''](MultiStrOpt) vCenter cluster to DVS mapping.
= None(StrOpt) ESX host name where this OVSvApp is hosted.
= True(BoolOpt) Set host into maintenance mode.
= 443(IntOpt) Customized https_port for vCenter communication.
= 5(StrOpt) Number of retries while connecting to vcenter server.
= None(StrOpt) Unique ID of the vCenter Server on which this OVSvApp ishosted
= None(StrOpt) vCenter server IP.
= None(StrOpt) vCenter server password.
= None(StrOpt) vCenter server user name.
= None(StrOpt) vCenter server wsdl location.
[vmware]
= None(IntOpt) Set this value if affected by an increased network latency causing repeated characters when typing in a remote console.
= 100(IntOpt) The maximum number of ObjectContent data objects that should be returned in a single result. A positive value will cause the operation to suspend the retrieval when the count of objects reaches the specified maximum. The server may still limit the count to something less than the configured value. Any remaining objects may be retrieved with additional requests.
= None(StrOpt) Identifies a proxy service that provides network access to the serial_port_service_uri. This option is ignored if serial_port_service_uri is not specified.
= None(StrOpt) Identifies the remote system that serial port traffic will be sent to. If this is not set, no serial ports will be added to the created VMs.
-
diff --git a/doc/common/tables/neutron-vpnaas.xml b/doc/common/tables/neutron-vpnaas.xml deleted file mode 100644 index 1790e11af6..0000000000 --- a/doc/common/tables/neutron-vpnaas.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of VPN-as-a-Service configuration options
Configuration option = Default valueDescription
[vpnagent]
= ['neutron_vpnaas.services.vpn.device_drivers.vyatta_ipsec.VyattaIPSecDriver'](MultiStrOpt) The vpn device drivers Neutron will use
-
diff --git a/doc/common/tables/neutron-vpnaas_ipsec.xml b/doc/common/tables/neutron-vpnaas_ipsec.xml deleted file mode 100644 index 04d311aaed..0000000000 --- a/doc/common/tables/neutron-vpnaas_ipsec.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VPNaaS IPsec plug-in configuration options
Configuration option = Default valueDescription
[ipsec]
= $state_path/ipsec(StrOpt) Location to store ipsec server config files
= False(BoolOpt) Enable detail logging for ipsec pluto process. If the flag set to True, the detailed logging will be written into config_base_dir/<pid>/logs.
= 60(IntOpt) Interval for checking ipsec status
-
diff --git a/doc/common/tables/neutron-vpnaas_libreswan.xml b/doc/common/tables/neutron-vpnaas_libreswan.xml deleted file mode 100644 index 38d001ee30..0000000000 --- a/doc/common/tables/neutron-vpnaas_libreswan.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VPNaaS libreSwan plug-in configuration options
Configuration option = Default valueDescription
[libreswan]
= 1.5(FloatOpt) A factor to increase the retry interval for each retry
= 5(IntOpt) The maximum number of retries for checking for pluto daemon shutdown
= 1(IntOpt) Initial interval in seconds for checking if pluto daemon is shutdown
-
diff --git a/doc/common/tables/neutron-vpnaas_openswan.xml b/doc/common/tables/neutron-vpnaas_openswan.xml deleted file mode 100644 index 8115b0fb36..0000000000 --- a/doc/common/tables/neutron-vpnaas_openswan.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VPNaaS strongSwan plug-in configuration options
Configuration option = Default valueDescription
[openswan]
= /usr/lib/python/site-packages/neutron_vpnaas/services/vpn/device_drivers/template/openswan/ipsec.conf.template(StrOpt) Template file for ipsec configuration
= /usr/lib/python/site-packages/neutron_vpnaas/services/vpn/device_drivers/template/openswan/ipsec.secret.template(StrOpt) Template file for ipsec secret configuration
-
diff --git a/doc/common/tables/neutron-vpnaas_strongswan.xml b/doc/common/tables/neutron-vpnaas_strongswan.xml deleted file mode 100644 index a8a98c4dbc..0000000000 --- a/doc/common/tables/neutron-vpnaas_strongswan.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VPNaaS Openswan plug-in configuration options
Configuration option = Default valueDescription
[strongswan]
= /etc/strongswan.d(StrOpt) The area where default StrongSwan configuration files are located.
= /usr/lib/python/site-packages/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.conf.template(StrOpt) Template file for ipsec configuration.
= /usr/lib/python/site-packages/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.secret.template(StrOpt) Template file for ipsec secret configuration.
= /usr/lib/python/site-packages/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/strongswan.conf.template(StrOpt) Template file for strongswan configuration.
-
diff --git a/doc/common/tables/neutron-zeromq.xml b/doc/common/tables/neutron-zeromq.xml deleted file mode 100644 index 98c3aba479..0000000000 --- a/doc/common/tables/neutron-zeromq.xml +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= local(StrOpt) MatchMaker driver.
= 9501(IntOpt) ZeroMQ receiver listening port.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
-
diff --git a/doc/common/tables/nova-amqp.xml b/doc/common/tables/nova-amqp.xml deleted file mode 100644 index f84e03f233..0000000000 --- a/doc/common/tables/nova-amqp.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= None(StrOpt) Default publisher_id for outgoing notifications
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/nova-api.xml b/doc/common/tables/nova-api.xml deleted file mode 100644 index d1d7ce8a5d..0000000000 --- a/doc/common/tables/nova-api.xml +++ /dev/null @@ -1,162 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= api-paste.ini(StrOpt) File name for the paste.deploy config for nova-api
= False(BoolOpt) Whether to use per-user rate limiting for the api. This option is only used by v2 api. Rate limiting is removed from v2.1 api.
= 900(IntOpt) Timeout for client connections' socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever.
= True(BoolOpt) Services to be added to the available pool on create
= ec2, osapi_compute, metadata(ListOpt) A list of APIs to enable by default
= (ListOpt) A list of APIs with enabled SSL
= instance-%08x(StrOpt) Template string to be used to generate instance names
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
= %(name)s-%(count)d(StrOpt) When creating multiple instances with a single request using the os-multiple-create API extension, this template will be used to build the display name for each instance. The benefit is that the instances end up with different hostnames. To restore legacy behavior of every instance having the same name, set this option to "%(name)s". Valid keys for the template are: name, uuid, count.
= cache_in_nova, bittorrent(ListOpt) These are image properties which a snapshot should not inherit from an instance
= nokernel(StrOpt) Kernel image that indicates not to use a kernel, but to use a raw disk image instead
= (ListOpt) DEPRECATED: Specify list of extensions to load when using osapi_compute_extension option with nova.api.openstack.compute.legacy_v2.contrib.select_extensions This option will be removed in the near future. After that point you have to run all of the API.
= ['nova.api.openstack.compute.legacy_v2.contrib.standard_extensions'](MultiStrOpt) osapi compute extension to load. This option will be removed in the near future. After that point you have to run all of the API.
= None(StrOpt) Base URL that will be presented to users in links to the OpenStack Compute API
= 0.0.0.0(StrOpt) The IP address on which the OpenStack API will listen.
= 8774(IntOpt) The port on which the OpenStack API will listen.
= None(IntOpt) Number of workers for OpenStack API service. The default will be the number of CPUs available.
= building(ListOpt) List of instance states that should hide network info
= None(StrOpt) The HTTP header used to determine the scheme for the original request, even if it was removed by an SSL terminating proxy. Typical value is "HTTP_X_FORWARDED_PROTO".
= db(StrOpt) The driver for servicegroup service (valid options are: db, zk, mc)
= snapshot-%s(StrOpt) Template string to be used to generate snapshot names
= 600(IntOpt) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X.
= False(BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.
= 1000(IntOpt) Size of the pool of greenthreads used by wsgi
= True(BoolOpt) If False, closes the client socket connection explicitly.
= %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f(StrOpt) A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds.
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_versionedobjects]
= False(BoolOpt) Make exception message format errors fatal
-
diff --git a/doc/common/tables/nova-apiv21.xml b/doc/common/tables/nova-apiv21.xml deleted file mode 100644 index e41fdcb3cc..0000000000 --- a/doc/common/tables/nova-apiv21.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API v2.1 configuration options
Configuration option = Default valueDescription
[osapi_v21]
= True(BoolOpt) DEPRECATED: Whether the V2.1 API is enabled or not. This option will be removed in the near future.
= (ListOpt) DEPRECATED: A list of v2.1 API extensions to never load. Specify the extension aliases here. This option will be removed in the near future. After that point you have to run all of the API.
= (ListOpt) DEPRECATED: If the list is not empty then a v2.1 API extension will only be loaded if it exists in this list. Specify the extension aliases here. This option will be removed in the near future. After that point you have to run all of the API.
-
diff --git a/doc/common/tables/nova-apiv3.xml b/doc/common/tables/nova-apiv3.xml deleted file mode 100644 index 1bdafebd61..0000000000 --- a/doc/common/tables/nova-apiv3.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API v3 configuration options
Configuration option = Default valueDescription
[osapi_v3]
= False(BoolOpt) Whether the V3 API is enabled or not
= (ListOpt) A list of v3 API extensions to never load. Specify the extension aliases here.
= (ListOpt) If the list is not empty then a v3 API extension will only be loaded if it exists in this list. Specify the extension aliases here.
-
diff --git a/doc/common/tables/nova-auth_token.xml b/doc/common/tables/nova-auth_token.xml deleted file mode 100644 index 447770f259..0000000000 --- a/doc/common/tables/nova-auth_token.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/nova-authentication.xml b/doc/common/tables/nova-authentication.xml deleted file mode 100644 index db4b9e1be3..0000000000 --- a/doc/common/tables/nova-authentication.xml +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of authentication configuration options
Configuration option = Default valueDescription
[DEFAULT]
= keystone(StrOpt) The strategy to use for auth: keystone or noauth2. noauth2 is designed for -testing only, as it does no actual credential checking. noauth2 provides -administrative credentials only if 'admin' is specified as the username.
-
diff --git a/doc/common/tables/nova-availabilityzones.xml b/doc/common/tables/nova-availabilityzones.xml deleted file mode 100644 index c8d4a4c52e..0000000000 --- a/doc/common/tables/nova-availabilityzones.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of availability zones configuration options
Configuration option = Default valueDescription
[DEFAULT]
= nova(StrOpt) Default compute node availability_zone
= None(StrOpt) Availability zone to use when user doesn't specify one
= internal(StrOpt) The availability_zone to show internal services under
-
diff --git a/doc/common/tables/nova-barbican.xml b/doc/common/tables/nova-barbican.xml deleted file mode 100644 index 53e5ef8ad7..0000000000 --- a/doc/common/tables/nova-barbican.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Barbican configuration options
Configuration option = Default valueDescription
[barbican]
= None(StrOpt) PEM encoded Certificate Authority to use when verifying HTTPs connections.
= key-manager:barbican:public(StrOpt) Info to match when looking for barbican in the service catalog. Format is: separated values of the form: <service_type>:<service_name>:<endpoint_type>
= None(StrOpt) PEM encoded client certificate cert file
= None(StrOpt) Override service catalog lookup with template for barbican endpoint e.g. http://localhost:9311/v1/%(project_id)s
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) PEM encoded client certificate key file
= None(StrOpt) Region name of this node
= None(IntOpt) Timeout value for http requests
-
diff --git a/doc/common/tables/nova-ca.xml b/doc/common/tables/nova-ca.xml deleted file mode 100644 index 148a7532b5..0000000000 --- a/doc/common/tables/nova-ca.xml +++ /dev/null @@ -1,111 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CA and SSL configuration options
Configuration option = Default valueDescription
[DEFAULT]
= cacert.pem(StrOpt) Filename of root CA
= $state_path/CA(StrOpt) Where we keep our root CA
= self.pem(StrOpt) SSL certificate file
= nova.cert.manager.CertManager(StrOpt) Full class name for the Manager for cert
= cert(StrOpt) The topic cert nodes listen on
= crl.pem(StrOpt) Filename of root Certificate Revocation List
= private/cakey.pem(StrOpt) Filename of private key
= $state_path/keys(StrOpt) Where we keep our keys
= /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s(StrOpt) Subject for certificate for projects, %s for project, timestamp
= None(StrOpt) CA certificate file to use to verify connecting clients
= None(StrOpt) SSL certificate of API server
= None(StrOpt) SSL private key of API server
= False(BoolOpt) Should we use a CA for each project?
= /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s(StrOpt) Subject for certificate for users, %s for project, user, timestamp
[ssl]
= None(StrOpt) CA certificate file to use to verify connecting clients.
= None(StrOpt) Certificate file to use when starting the server securely.
= None(StrOpt) Private key file to use when starting the server securely.
-
diff --git a/doc/common/tables/nova-cells.xml b/doc/common/tables/nova-cells.xml deleted file mode 100644 index 7449bce9f7..0000000000 --- a/doc/common/tables/nova-cells.xml +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of cell configuration options
Configuration option = Default valueDescription
[cells]
= 60(IntOpt) Seconds to wait for response from a call to a cell.
= hypervisor=xenserver;kvm, os=linux;windows(ListOpt) Key/Multi-value list with the capabilities of the cell
= compute(StrOpt) Type of cell
= None(StrOpt) Configuration file from which to read cells configuration. If given, overrides reading cells from the database.
= 60(IntOpt) Interval, in seconds, for getting fresh cell information from the database.
= nova.cells.rpc_driver.CellsRPCDriver(StrOpt) Cells communication driver to use
= False(BoolOpt) Enable cell functionality
= 1(IntOpt) Number of instances to update per periodic task run
= 3600(IntOpt) Number of seconds after an instance was updated or deleted to continue to update cells
= nova.cells.manager.CellsManager(StrOpt) Manager for cells
= 10(IntOpt) Maximum number of hops for cells routing.
= 300(IntOpt) Number of seconds after which a lack of capability and capacity updates signals the child cell is to be treated as a mute.
= -10000.0(FloatOpt) Multiplier used to weigh mute children. (The value should be negative.)
= nova(StrOpt) Name of this cell
= 1.0(FloatOpt) Multiplier used to weigh offset weigher.
= 10.0(FloatOpt) Percentage of cell capacity to hold in reserve. Affects both memory and disk utilization
= cells(StrOpt) The topic cells nodes listen on
-
diff --git a/doc/common/tables/nova-common.xml b/doc/common/tables/nova-common.xml deleted file mode 100644 index 4207528e13..0000000000 --- a/doc/common/tables/nova-common.xml +++ /dev/null @@ -1,134 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /usr/local/bin(StrOpt) Directory where nova binaries are installed
= compute(StrOpt) The topic compute nodes listen on
= console(StrOpt) The topic console proxy nodes listen on
= consoleauth(StrOpt) The topic console auth proxy nodes listen on
= 64(IntOpt) Size of executor thread pool.
= localhost(StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address. However, the node name must be valid within an AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address
= None(ListOpt) Memcached servers or None for in process cache.
= 10.0.0.1(StrOpt) IP address of this host
= False(BoolOpt) If set, send api.fault notifications on caught exceptions in the API service.
= None(StrOpt) If set, send compute.instance.update notifications on instance state changes. Valid values are None for no notifications, "vm_state" for notifications on VM state changes, or "vm_and_task_state" for notifications on VM and task state changes.
= /usr/lib/python/site-packages/nova(StrOpt) Directory where the nova python module is installed
= 10(IntOpt) Seconds between nodes reporting state to datastore
= /etc/nova/rootwrap.conf(StrOpt) Path to the rootwrap configuration file to use for running commands as root
= 60(IntOpt) Maximum time since last check-in for up service
= $pybasedir(StrOpt) Top-level directory for maintaining nova's state
= None(StrOpt) Explicitly specify the temporary working directory
= False(BoolOpt) Start and use a daemon that can run the commands that need to be run with root privileges. This option is usually enabled on nodes that run nova compute processes
[keystone_authtoken]
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
[workarounds]
= True(BoolOpt) DEPRECATED: Whether to destroy instances on startup when we suspect they have previously been evacuated. This can result in data loss if undesired. See https://launchpad.net/bugs/1419785
= True(BoolOpt) When using libvirt 1.2.2 live snapshots fail intermittently under load. This config option provides a mechanism to enable live snapshot while this is resolved. See https://bugs.launchpad.net/nova/+bug/1334398
= False(BoolOpt) This option allows a fallback to sudo for performance reasons. For example see https://bugs.launchpad.net/nova/+bug/1415106
= True(BoolOpt) Whether or not to handle events raised from the compute driver's 'emit_event' method. These are lifecycle events raised from compute drivers that implement the method. An example of a lifecycle event is an instance starting or stopping. If the instance is going through task state changes due to an API operation, like resize, the events are ignored. However, this is an advanced feature which allows the hypervisor to signal to the compute service that an unexpected state change has occurred in an instance and the instance can be shutdown automatically - which can inherently race in reboot operations or when the compute service or host is rebooted, either planned or due to an unexpected outage. Care should be taken when using this and sync_power_state_interval is negative since then if any instances are out of sync between the hypervisor and the Nova database they will have to be synchronized manually. See https://bugs.launchpad.net/bugs/1444630
-
diff --git a/doc/common/tables/nova-compute.xml b/doc/common/tables/nova-compute.xml deleted file mode 100644 index 1df007920a..0000000000 --- a/doc/common/tables/nova-compute.xml +++ /dev/null @@ -1,184 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Compute configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(MultiStrOpt) Monitor classes available to the compute which may be specified more than once. This option is DEPRECATED and no longer used. Use setuptools entry points to list available monitor plugins.
= None(StrOpt) Driver to use for controlling virtualization. Options include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, fake.FakeDriver, ironic.IronicDriver, vmwareapi.VMwareVCDriver, hyperv.HyperVDriver
= nova.compute.manager.ComputeManager(StrOpt) Full class name for the Manager for compute
= (ListOpt) A list of monitors that can be used for getting compute metrics. You can use the alias/name from the setuptools entry points for nova.compute.monitors.* namespaces. If no namespace is supplied, the "cpu." namespace is assumed for backwards-compatibility. An example value that would enable both the CPU and NUMA memory bandwidth monitors that used the virt driver variant: ["cpu.virt_driver", "numa_mem_bw.virt_driver"]
= vcpu(ListOpt) The names of the extra resources to track.
= nova.compute.stats.Stats(StrOpt) Class that will manage stats for the local compute host
= localhost(StrOpt) Console proxy host to use to connect to instances on this host.
= nova.console.manager.ConsoleProxyManager(StrOpt) Full class name for the Manager for console proxy
= m1.small(StrOpt) Default flavor to use for the EC2 API only. The Nova API does not support a default flavor.
= INFO(StrOpt) Default notification level for outgoing notifications
= True(BoolOpt) Enables returning of the instance password by the relevant server API calls such as create, rebuild or rescue, If the hypervisor does not support password injection then the password returned will not be correct
= 60(IntOpt) Number of seconds between instance network information cache updates
= 2400(IntOpt) Number of seconds to wait between runs of the image cache manager. Set to -1 to disable. Setting this to 0 will run at the default rate.
= _base(StrOpt) Where cached images are stored under $instances_path. This is NOT the full path - just a folder name. For per-compute-host cached images, set to _base_$my_ip
= 0(IntOpt) Amount of time in seconds an instance can be in BUILD before going into ERROR status. Set to 0 to disable.
= 300(IntOpt) Interval in seconds for retrying failed instance file deletes. Set to -1 to disable. Setting this to 0 will run at the default rate.
= False(BoolOpt) Generate periodic compute.instance.exists notifications
= month(StrOpt) Time period to generate instance usages for. Time period must be hour, day, month or year
= $state_path/instances(StrOpt) Where instances are stored on disk
= 10(IntOpt) Maximum number of instance builds to run concurrently
= 5(IntOpt) The number of times to attempt to reap an instance's files.
= 0(IntOpt) Automatically hard reboot an instance if it has been stuck in a rebooting state longer than N seconds. Set to 0 to disable.
= 0(IntOpt) Interval in seconds for reclaiming deleted instances
= 0(IntOpt) Automatically unrescue an instance after N seconds. Set to 0 to disable.
= 0(IntOpt) Automatically confirm resizes after N seconds. Set to 0 to disable.
= False(BoolOpt) Whether to start guests that were running before the host rebooted
= reap(StrOpt) Action to take if a running deleted instance is detected.Set to 'noop' to take no action.
= 1800(IntOpt) Number of seconds to wait between runs of the cleanup task.
= 0(IntOpt) Number of seconds after being deleted when a running instance should be considered eligible for cleanup.
= 0(IntOpt) Time in seconds before a shelved instance is eligible for removing from a host. -1 never offload, 0 offload immediately when shelved
= 3600(IntOpt) Interval in seconds for polling shelved instances to offload. Set to -1 to disable.Setting this to 0 will run at the default rate.
= 60(IntOpt) Total amount of time to wait in seconds for an instance to perform a clean shutdown.
= 600(IntOpt) Interval to sync power states between the database and the hypervisor. Set to -1 to disable. Setting this to 0 will run at the default rate.
= 0(IntOpt) Interval in seconds for updating compute resources. A number less than 0 means to disable the task completely. Leaving this at the default of 0 will cause this to run at the default periodic interval. Setting it to any positive value will cause it to run at approximately that number of seconds.
= True(BoolOpt) Fail instance boot if vif plugging fails
= 300(IntOpt) Number of seconds to wait for neutron vif plugging events to arrive before continuing or failing (see vif_plugging_is_fatal). If this is set to zero and vif_plugging_is_fatal is False, events should not be expected to arrive at all.
-
diff --git a/doc/common/tables/nova-conductor.xml b/doc/common/tables/nova-conductor.xml deleted file mode 100644 index a0cbf4b767..0000000000 --- a/doc/common/tables/nova-conductor.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of conductor configuration options
Configuration option = Default valueDescription
[DEFAULT]
= -1(IntOpt) Number of times to retry live-migration before failing. If == -1, try until out of hosts. If == 0, only try once, no retries.
[conductor]
= nova.conductor.manager.ConductorManager(StrOpt) Full class name for the Manager for conductor
= conductor(StrOpt) The topic on which conductor nodes listen
= False(BoolOpt) Perform nova-conductor operations locally
= None(IntOpt) Number of workers for OpenStack Conductor service. The default will be the number of CPUs available.
-
diff --git a/doc/common/tables/nova-configdrive.xml b/doc/common/tables/nova-configdrive.xml deleted file mode 100644 index b078b6c5f4..0000000000 --- a/doc/common/tables/nova-configdrive.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of config drive configuration options
Configuration option = Default valueDescription
[DEFAULT]
= iso9660(StrOpt) Config drive format.
= 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01(StrOpt) List of metadata versions to skip placing into the config drive
= None(StrOpt) Set to "always" to force injection to take place on a config drive. NOTE: The "always" will be deprecated in the Liberty release cycle.
= genisoimage(StrOpt) Name and optionally path of the tool used for ISO image creation
[hyperv]
= False(BoolOpt) Attaches the Config Drive image as a cdrom drive instead of a disk drive
= False(BoolOpt) Sets the admin password in the config drive image
-
diff --git a/doc/common/tables/nova-console.xml b/doc/common/tables/nova-console.xml deleted file mode 100644 index 29733c4477..0000000000 --- a/doc/common/tables/nova-console.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of console configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (ListOpt) Allowed Origin header hostnames for access to console proxy servers
= localhost(StrOpt) Publicly visible name for this console host
= 600(IntOpt) How many seconds before deleting tokens
= nova.consoleauth.manager.ConsoleAuthManager(StrOpt) Manager for console auth
[mks]
= False(BoolOpt) Enable MKS related features
= http://127.0.0.1:6090/(StrOpt) Location of MKS web console proxy, in the form "http://127.0.0.1:6090/"
-
diff --git a/doc/common/tables/nova-cors.xml b/doc/common/tables/nova-cors.xml deleted file mode 100644 index 45f8c8b8fb..0000000000 --- a/doc/common/tables/nova-cors.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/nova-database.xml b/doc/common/tables/nova-database.xml deleted file mode 100644 index 41d8691609..0000000000 --- a/doc/common/tables/nova-database.xml +++ /dev/null @@ -1,182 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[DEFAULT]
= nova.db(StrOpt) The driver to use for database access
[api_database]
= None(StrOpt) The SQLAlchemy connection string to use to connect to the Nova API database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= True(BoolOpt) If True, SQLite uses synchronous mode.
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
= False(BoolOpt) Enable the experimental use of thread pooling for all DB API calls
-
diff --git a/doc/common/tables/nova-debug.xml b/doc/common/tables/nova-debug.xml deleted file mode 100644 index bfd099632b..0000000000 --- a/doc/common/tables/nova-debug.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[guestfs]
= False(BoolOpt) Enable guestfs debug
-
diff --git a/doc/common/tables/nova-ec2.xml b/doc/common/tables/nova-ec2.xml deleted file mode 100644 index fbcc45c5e8..0000000000 --- a/doc/common/tables/nova-ec2.xml +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of EC2 configuration options
Configuration option = Default valueDescription
[DEFAULT]
= $my_ip(StrOpt) The internal IP address of the EC2 API server
= $my_ip(StrOpt) The IP address of the EC2 API server
= 0.0.0.0(StrOpt) The IP address on which the EC2 API will listen.
= 8773(IntOpt) The port on which the EC2 API will listen.
= /(StrOpt) The path prefix used to call the ec2 API server
= 8773(IntOpt) The port of the EC2 API server
= False(BoolOpt) Return the IP address as private dns hostname in describe instances
= http(StrOpt) The protocol to use when connecting to the EC2 API server
= True(BoolOpt) Validate security group names according to EC2 specification
= 300(IntOpt) Time in seconds before ec2 timestamp expires
= None(IntOpt) Number of workers for EC2 API service. The default will be equal to the number of CPUs available.
= False(BoolOpt) Disable SSL certificate verification.
= http://localhost:5000/v2.0/ec2tokens(StrOpt) URL to get token from ec2 request.
= 5(IntOpt) Number of failed auths before lockout.
= 15(IntOpt) Number of minutes to lockout if triggered.
= 15(IntOpt) Number of minutes for lockout window.
= (ListOpt) List of region=fqdn pairs separated by commas
-
diff --git a/doc/common/tables/nova-ephemeral_storage_encryption.xml b/doc/common/tables/nova-ephemeral_storage_encryption.xml deleted file mode 100644 index 0016f8822f..0000000000 --- a/doc/common/tables/nova-ephemeral_storage_encryption.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ephemeral storage encryption configuration options
Configuration option = Default valueDescription
[ephemeral_storage_encryption]
= aes-xts-plain64(StrOpt) The cipher and mode to be used to encrypt ephemeral storage. Which ciphers are available ciphers depends on kernel support. See /proc/crypto for the list of available options.
= False(BoolOpt) Whether to encrypt ephemeral storage
= 512(IntOpt) The bit length of the encryption key to be used to encrypt ephemeral storage (in XTS mode only half of the bits are used for encryption key)
-
diff --git a/doc/common/tables/nova-fping.xml b/doc/common/tables/nova-fping.xml deleted file mode 100644 index 7a333ef32b..0000000000 --- a/doc/common/tables/nova-fping.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - -
Description of fping configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /usr/sbin/fping(StrOpt) Full path to fping.
-
diff --git a/doc/common/tables/nova-glance.xml b/doc/common/tables/nova-glance.xml deleted file mode 100644 index eab41dc143..0000000000 --- a/doc/common/tables/nova-glance.xml +++ /dev/null @@ -1,82 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of glance configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Base URL that will be presented to users in links to glance resources
[glance]
= (ListOpt) A list of url scheme that can be downloaded directly via the direct_url. Currently supported schemes: [file].
= False(BoolOpt) Allow to perform insecure SSL (https) requests to glance
= None(ListOpt) A list of the glance api servers available to nova. Prefix with https:// for ssl-based glance api servers. ([hostname|ip]:port)
= $my_ip(StrOpt) Default glance hostname or IP address
= 0(IntOpt) Number of retries when uploading / downloading an image to / from glance.
= 9292(IntOpt) Default glance port
= http(StrOpt) Default protocol to use when connecting to glance. Set to https for SSL.
[image_file_url]
= (ListOpt) List of file systems that are configured in this file in the image_file_url:<list entry name> sections
-
diff --git a/doc/common/tables/nova-hyperv.xml b/doc/common/tables/nova-hyperv.xml deleted file mode 100644 index 946f783e34..0000000000 --- a/doc/common/tables/nova-hyperv.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of HyperV configuration options
Configuration option = Default valueDescription
[hyperv]
= 1.0(FloatOpt) Enables dynamic memory allocation (ballooning) when set to a value greater than 1. The value expresses the ratio between the total RAM assigned to an instance and its startup RAM amount. For example a ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of RAM allocated at startup
= False(BoolOpt) Enables metrics collections for an instance by using Hyper-V's metric APIs. Collected data can by retrieved by other apps and services, e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above
= False(BoolOpt) Force V1 WMI utility classes
= (StrOpt) The name of a Windows share name mapped to the "instances_path" dir and used by the resize feature to copy files to the target host. If left blank, an administrative share will be used, looking for the same "instances_path" used locally
= False(BoolOpt) Required for live migration among hosts with different CPU features
= 10(IntOpt) The number of times to retry checking for a disk mounted via iSCSI.
= 5(IntOpt) Interval between checks for a mounted iSCSI disk, in seconds.
= 60(IntOpt) The timeframe to be checked for instance power state changes.
= 2(IntOpt) Instance power state change event polling frequency.
= qemu-img.exe(StrOpt) Path of qemu-img command which is used to convert between different image types
= None(StrOpt) External virtual switch Name, if not provided, the first external virtual switch is used
= 60(IntOpt) Number of seconds to wait for instance to shut down after soft reboot request is made. We fall back to hard reboot if instance does not shutdown within this window.
-
diff --git a/doc/common/tables/nova-hypervisor.xml b/doc/common/tables/nova-hypervisor.xml deleted file mode 100644 index 73261cc039..0000000000 --- a/doc/common/tables/nova-hypervisor.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of hypervisor configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) The default format an ephemeral_volume will be formatted with on creation.
= True(BoolOpt) Force backing images to raw format
= none(StrOpt) VM image preallocation mode: "none" => no storage provisioning is done up front, "space" => storage is fully allocated at instance start
= 10(IntOpt) Amount of time, in seconds, to wait for NBD device start up.
= True(BoolOpt) Whether to use cow images
= None(StrOpt) Defines which pcpus that instance vcpus can use. For example, "4-12,^8,15"
= [](MultiStrOpt) Name of the mkfs commands for ephemeral device. The format is <os_type>=<mkfs command>
-
diff --git a/doc/common/tables/nova-ipv6.xml b/doc/common/tables/nova-ipv6.xml deleted file mode 100644 index edba4d30ac..0000000000 --- a/doc/common/tables/nova-ipv6.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of IPv6 configuration options
Configuration option = Default valueDescription
[DEFAULT]
= fd00::/48(StrOpt) Fixed IPv6 address block
= None(StrOpt) Default IPv6 gateway
= rfc2462(StrOpt) Backend to use for IPv6 generation
= False(BoolOpt) Use IPv6
-
diff --git a/doc/common/tables/nova-ironic.xml b/doc/common/tables/nova-ironic.xml deleted file mode 100644 index 2b49f13a21..0000000000 --- a/doc/common/tables/nova-ironic.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of bare metal configuration options
Configuration option = Default valueDescription
[ironic]
= None(StrOpt) Ironic keystone auth token.DEPRECATED: use admin_username, admin_password, and admin_tenant_name instead
= None(StrOpt) Ironic keystone admin password.
= None(StrOpt) Ironic keystone tenant name.
= None(StrOpt) Keystone public API endpoint.
= None(StrOpt) Ironic keystone admin name
= None(StrOpt) URL for Ironic API endpoint.
= 60(IntOpt) How many retries when a request does conflict. If <= 0, only try once, no retries.
= 2(IntOpt) How often to retry in seconds when a request does conflict
= 1(IntOpt) Version of Ironic API service endpoint.
= None(StrOpt) Log level override for ironicclient. Set this in order to override the global "default_log_levels", "verbose", and "debug" settings. DEPRECATED: use standard logging configuration.
-
diff --git a/doc/common/tables/nova-keymgr.xml b/doc/common/tables/nova-keymgr.xml deleted file mode 100644 index 7e85408814..0000000000 --- a/doc/common/tables/nova-keymgr.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of key manager configuration options
Configuration option = Default valueDescription
[keymgr]
= nova.keymgr.conf_key_mgr.ConfKeyManager(StrOpt) The full class name of the key manager API class
= None(StrOpt) Fixed key returned by key manager, specified in hex
-
diff --git a/doc/common/tables/nova-ldap.xml b/doc/common/tables/nova-ldap.xml deleted file mode 100644 index 0ea28e543a..0000000000 --- a/doc/common/tables/nova-ldap.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of LDAP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= ou=hosts,dc=example,dc=org(StrOpt) Base DN for DNS entries in LDAP
= password(StrOpt) Password for LDAP DNS
= ['dns.example.org'](MultiStrOpt) DNS Servers for LDAP DNS driver
= 86400(StrOpt) Expiry interval (in seconds) for LDAP DNS driver Statement of Authority
= hostmaster@example.org(StrOpt) Hostmaster for LDAP DNS driver Statement of Authority
= 7200(StrOpt) Minimum interval (in seconds) for LDAP DNS driver Statement of Authority
= 1800(StrOpt) Refresh interval (in seconds) for LDAP DNS driver Statement of Authority
= 3600(StrOpt) Retry interval (in seconds) for LDAP DNS driver Statement of Authority
= ldap://ldap.example.com:389(StrOpt) URL for LDAP server which will store DNS entries
= uid=admin,ou=people,dc=example,dc=org(StrOpt) User for LDAP DNS
-
diff --git a/doc/common/tables/nova-libvirt.xml b/doc/common/tables/nova-libvirt.xml deleted file mode 100644 index ee6ae9c0dd..0000000000 --- a/doc/common/tables/nova-libvirt.xml +++ /dev/null @@ -1,215 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Libvirt configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Should unused base images be removed?
= 86400(IntOpt) Unused unresized base images younger than this will not be removed
[libvirt]
= VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC(StrOpt) Migration flags to be set for block migration
= False(BoolOpt) Write a checksum for files in _base to disk
= 3600(IntOpt) How frequently to checksum base images
= (StrOpt) Override the default libvirt URI (which is dependent on virt_type)
= None(StrOpt) Set to "host-model" to clone the host CPU feature flags; to "host-passthrough" to use the host CPU model exactly; to "custom" to use a named CPU model; to "none" to not set any CPU model. If virt_type="kvm|qemu", it will default to "host-model", otherwise it will default to "none"
= None(StrOpt) Set to a named libvirt CPU model (see names listed in /usr/share/libvirt/cpu_map.xml). Only has effect if cpu_mode="custom" and virt_type="kvm|qemu"
= (ListOpt) Specific cachemodes to use for different disk types e.g: file=directsync,block=none
= None(StrOpt) Override the default disk prefix for the devices attached to a server, which is dependent on virt_type. (valid options are: sd, xvd, uvd, vd)
= (ListOpt) List of guid targets and ranges.Syntax is guest-gid:host-gid:countMaximum of 5 allowed.
= None(StrOpt) Discard option for nova managed disks. Need Libvirt(1.0.6) Qemu1.5 (raw format) Qemu1.6(qcow2 format)
= None(ListOpt) For qemu or KVM guests, set this option to specify a default machine type per host architecture. You can find a list of supported machine types in your environment by checking the output of the "virsh capabilities"command. The format of the value for this config option is host-arch=machine-type. For example: x86_64=machinetype1,armv7l=machinetype2
= $instances_path/$image_cache_subdirectory_name/%(image)s.info(StrOpt) Allows image information files to be stored in non-standard locations
= (StrOpt) Path to the ceph configuration file to use
= rbd(StrOpt) The RADOS pool in which rbd volumes are stored
= default(StrOpt) VM Images format. If default is specified, then use_cow_images flag is used instead of this one.
= None(StrOpt) LVM Volume Group that is used for VM images, when you specify images_type=lvm.
= False(BoolOpt) Inject the ssh public key at boot time
= -2(IntOpt) The partition to inject to : -2 => disable, -1 => inspect (libguestfs only), 0 => not partitioned, >0 => partition number
= False(BoolOpt) Inject the admin password at boot time, without an agent.
= None(StrOpt) The iSCSI transport iface to use to connect to target in case offload support is desired. Default format is of the form <transport_name>.<hwaddress> where <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx, ocs) and <hwaddress> is the MAC address of the interface and can be generated via the iscsiadm -m iface command. Do not confuse the iscsi_iface parameter to be provided here with the actual transport name.
= False(BoolOpt) Use multipath connection of the iSCSI volume
= False(BoolOpt) Use multipath connection of the iSER volume
= 10(IntOpt) A number of seconds to memory usage statistics period. Zero or negative value mean to disable memory usage statistics.
= True(BoolOpt) DEPRECATED: Should unused kernel images be removed? This is only safe to enable if all compute nodes have been updated to support this option (running Grizzly or newer level compute). This will be the default behavior in the 13.0.0 release.
= 3600(IntOpt) Unused resized base images younger than this will not be removed
= None(StrOpt) Rescue ami image. This will not be used if an image id is provided by the user.
= None(StrOpt) Rescue aki image
= None(StrOpt) Rescue ari image
= None(StrOpt) A path to a device that will be used as source of entropy on the host. Permitted options are: /dev/random or /dev/hwrng
= False(BoolOpt) Compress snapshot images when possible. This currently applies exclusively to qcow2 images
= None(StrOpt) Snapshot image format. Defaults to same as source image
= $instances_path/snapshots(StrOpt) Location where libvirt driver will store snapshots before uploading them to image service
= False(BoolOpt) Create sparse logical volumes (with virtualsize) if this flag is set to True.
= auto(StrOpt) The data source used to the populate the host "serial" UUID exposed to guest in the virtual BIOS.
= (ListOpt) List of uid targets and ranges.Syntax is guest-uid:host-uid:countMaximum of 5 allowed.
= True(BoolOpt) Sync virtual and real mouse cursors in Windows VMs
= True(BoolOpt) Use virtio for bridge interfaces with KVM/QEMU
= kvm(StrOpt) Libvirt domain type
= zero(StrOpt) Method used to wipe old volumes.
= 0(IntOpt) Size in MiB to wipe at start of old volumes. 0 => all
= 120(IntOpt) Number of seconds to wait for instance to shut down after soft reboot request is made. We fall back to hard reboot if instance does not shutdown within this window.
-
diff --git a/doc/common/tables/nova-livemigration.xml b/doc/common/tables/nova-livemigration.xml deleted file mode 100644 index e6f6b40998..0000000000 --- a/doc/common/tables/nova-livemigration.xml +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of live migration configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 30(IntOpt) Number of 1 second retries needed in live_migration
= 1(IntOpt) Maximum number of live migrations to run concurrently. This limit is enforced to avoid outbound live migrations overwhelming the host/network and causing failures. It is not recommended that you change this unless you are very sure that doing so is safe and stable in your environment.
[libvirt]
= 0(IntOpt) Maximum bandwidth(in MiB/s) to be used during migration. If set to 0, will choose a suitable default. Some hypervisors do not support this feature and will return an error if bandwidth is not 0. Please refer to the libvirt documentation for further details
= 800(IntOpt) Time to wait, in seconds, for migration to successfully complete transferring data before aborting the operation. Value is per GiB of guest RAM + disk to be transferred, with lower bound of a minimum of 2 GiB. Should usually be larger than downtime delay * downtime steps. Set to 0 to disable timeouts.
= 500(IntOpt) Maximum permitted downtime, in milliseconds, for live migration switchover. Will be rounded up to a minimum of 100ms. Use a large value if guest liveness is unimportant.
= 75(IntOpt) Time to wait, in seconds, between each step increase of the migration downtime. Minimum delay is 10 seconds. Value is per GiB of guest RAM + disk to be transferred, with lower bound of a minimum of 2 GiB per device
= 10(IntOpt) Number of incremental steps to reach max downtime value. Will be rounded up to a minimum of 3 steps
= VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED(StrOpt) Migration flags to be set for live migration
= 150(IntOpt) Time to wait, in seconds, for migration to make forward progress in transferring data before aborting the operation. Set to 0 to disable timeouts.
= qemu+tcp://%s/system(StrOpt) Migration target URI (any included "%s" is replaced with the migration target hostname)
-
diff --git a/doc/common/tables/nova-logging.xml b/doc/common/tables/nova-logging.xml deleted file mode 100644 index 62f921342a..0000000000 --- a/doc/common/tables/nova-logging.xml +++ /dev/null @@ -1,128 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= False(BoolOpt) Make exception message format errors fatal
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
-
diff --git a/doc/common/tables/nova-metadata.xml b/doc/common/tables/nova-metadata.xml deleted file mode 100644 index c199c2b249..0000000000 --- a/doc/common/tables/nova-metadata.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of metadata configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 15(IntOpt) Time in seconds to cache metadata; 0 to disable metadata caching entirely (not recommended). Increasingthis should improve response times of the metadata API when under heavy load. Higher values may increase memoryusage and result in longer times for host metadata changes to take effect.
= $my_ip(StrOpt) The IP address for the metadata API server
= 0.0.0.0(StrOpt) The IP address on which the metadata API will listen.
= 8775(IntOpt) The port on which the metadata API will listen.
= nova.api.manager.MetadataManager(StrOpt) OpenStack metadata service manager
= 8775(IntOpt) The port for the metadata API port
= None(IntOpt) Number of workers for metadata service. The default will be the number of CPUs available.
= nova.api.metadata.vendordata_json.JsonFileVendorData(StrOpt) Driver to use for vendor data
= None(StrOpt) File to load JSON formatted vendor data from
-
diff --git a/doc/common/tables/nova-network.xml b/doc/common/tables/nova-network.xml deleted file mode 100644 index 576da1d4cf..0000000000 --- a/doc/common/tables/nova-network.xml +++ /dev/null @@ -1,294 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of network configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Whether to allow network traffic from same network
= False(BoolOpt) Autoassigning floating IP to VM
= 0(IntOpt) Number of addresses reserved for vpn clients
= 5(IntOpt) Number of attempts to create unique mac address
= None(StrOpt) Name of network to use to set access IPs for instances
= nova(StrOpt) Default pool for floating IPs
= False(BoolOpt) Whether to batch up the application of IPTables rules during a host restart and apply all at the end of the init phase
= novalocal(StrOpt) Domain to use for building the hostnames
= 86400(IntOpt) Lifetime of a DHCP lease in seconds
= $bindir/nova-dhcpbridge(StrOpt) Location of nova-dhcpbridge
= ['/etc/nova/nova-dhcpbridge.conf'](MultiStrOpt) Location of flagfiles for dhcpbridge
= [](MultiStrOpt) If set, uses specific DNS server for dnsmasq. Can be specified multiple times.
= -1(IntOpt) Number of seconds to wait between runs of updates to DNS entries.
= (StrOpt) Override the default dnsmasq settings with this file
= 3(IntOpt) Number of times to retry ebtables commands on failure.
= 1.0(FloatOpt) Number of seconds to wait between ebtables retries.
= None(StrOpt) Firewall driver (defaults to hypervisor specific iptables driver)
= 600(IntOpt) Seconds after which a deallocated IP is disassociated
= False(BoolOpt) Whether to attempt to inject network setup into guest
= None(StrOpt) FlatDhcp will bridge into this interface if set
= None(StrOpt) Bridge for simple network instances
= 8.8.4.4(StrOpt) DNS server for simple network
= nova.network.noop_dns_driver.NoopDNSDriver(StrOpt) Full class name for the DNS Manager for floating IPs
= True(BoolOpt) If True, send a dhcp release on instance termination
= [](MultiStrOpt) Traffic to this range will always be snatted to the fallback ip, even if it would normally be bridged out of the node. Can be specified multiple times.
= ['all'](MultiStrOpt) An interface that bridges can forward to. If this is set to all then all traffic will be forwarded. Can be specified multiple times.
= None(StrOpt) Default IPv4 gateway
= $pybasedir/nova/virt/interfaces.template(StrOpt) Template file for injected network
= (StrOpt) Full class name for the DNS Zone for instance IPs
= nova.network.noop_dns_driver.NoopDNSDriver(StrOpt) Full class name for the DNS Manager for instance IPs
= (StrOpt) Regular expression to match the iptables rule that should always be on the bottom.
= DROP(StrOpt) The table that iptables to jump to when a packet is to be dropped.
= (StrOpt) Regular expression to match the iptables rule that should always be on the top.
= nova.network.l3.LinuxNetL3(StrOpt) Indicates underlying L3 management library
= nova.network.linux_net.LinuxBridgeInterfaceDriver(StrOpt) Driver used to create ethernet devices.
= br-int(StrOpt) Name of Open vSwitch bridge used with linuxnet
= False(BoolOpt) Default value for multi_host in networks. Also, if set, some rpc network calls will be sent directly to host.
= 0(IntOpt) Number of times to retry network allocation on failures
= nova.network.api.API(StrOpt) The full class name of the network API class to use
= None(IntOpt) DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK. MTU setting for network interface.
= nova.network.linux_net(StrOpt) Driver to use for network creation
= nova.network.manager.VlanManager(StrOpt) Full class name for the Manager for network
= 256(IntOpt) Number of addresses in each private subnet
= network(StrOpt) The topic network nodes listen on
= $state_path/networks(StrOpt) Location to keep network config files
= 1(IntOpt) Number of networks to support
= 120(IntOpt) Amount of time, in seconds, that ovs_vsctl should wait for a response from the database. 0 is to wait forever.
= eth0(StrOpt) Interface for public IP addresses
= $my_ip(StrOpt) Public IP of network host
= nova(StrOpt) The full class name of the security API class
= False(BoolOpt) Send gratuitous ARPs for HA setup
= 3(IntOpt) Send this many gratuitous ARPs for HA setup
= False(BoolOpt) DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK. If True in multi_host mode, all compute hosts share the same dhcp address. The same IP address used for DHCP will be added on each nova-network node which is only visible to the vms on the same host.
= False(BoolOpt) If True, unused gateway devices (VLAN and bridge) are deleted in VLAN network mode with multi hosted networks
= False(BoolOpt) If True, when a DNS entry must be updated, it sends a fanout cast to all network hosts to update their DNS entries in multi host mode
= False(BoolOpt) If set, uses the dns1 and dns2 from the network ref. as dns servers.
= False(StrOpt) Control for checking for default networks
= False(BoolOpt) Use single default gateway. Only first nic of vm will get default gateway from dhcp server
= None(StrOpt) VLANs will bridge into this interface if set
= 100(IntOpt) First VLAN for private networks
[libvirt]
= ssh(StrOpt) Use ssh or rsync transport for creating, copying, removing files on the remote host.
[vmware]
= vmnic0(StrOpt) Physical ethernet adapter name for vlan networking
-
diff --git a/doc/common/tables/nova-neutron.xml b/doc/common/tables/nova-neutron.xml deleted file mode 100644 index 85af3ab54f..0000000000 --- a/doc/common/tables/nova-neutron.xml +++ /dev/null @@ -1,127 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of neutron configuration options
Configuration option = Default valueDescription
[DEFAULT]
= default(StrOpt) Default tenant id when creating neutron networks
[neutron]
= http://localhost:5000/v2.0(StrOpt) Authorization URL for connecting to neutron in admin context. DEPRECATED: specify an auth_plugin and appropriate credentials instead.
= None(StrOpt) Password for connecting to neutron in admin context DEPRECATED: specify an auth_plugin and appropriate credentials instead.
= None(StrOpt) Tenant id for connecting to neutron in admin context DEPRECATED: specify an auth_plugin and appropriate credentials instead.
= None(StrOpt) Tenant name for connecting to neutron in admin context. This option will be ignored if neutron_admin_tenant_id is set. Note that with Keystone V3 tenant names are only unique within a domain. DEPRECATED: specify an auth_plugin and appropriate credentials instead.
= None(StrOpt) User id for connecting to neutron in admin context. DEPRECATED: specify an auth_plugin and appropriate credentials instead.
= None(StrOpt) Username for connecting to neutron in admin context DEPRECATED: specify an auth_plugin and appropriate credentials instead.
= None(StrOpt) Name of the plugin to load
= None(StrOpt) Config Section from which to load plugin specific options
= keystone(StrOpt) Authorization strategy for connecting to neutron in admin context. DEPRECATED: specify an auth_plugin and appropriate credentials instead. If an auth_plugin is specified strategy will be ignored.
= None(StrOpt) PEM encoded Certificate Authority to use when verifying HTTPs connections.
= None(StrOpt) PEM encoded client certificate cert file
= 600(IntOpt) Number of seconds before querying neutron for extensions
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) PEM encoded client certificate key file
= (StrOpt) Shared secret to validate proxies Neutron metadata requests
= br-int(StrOpt) Name of Integration Bridge used by Open vSwitch
= None(StrOpt) Region name for connecting to neutron in admin context
= False(BoolOpt) Set flag to indicate Neutron will proxy metadata requests and resolve instance ids.
= None(IntOpt) Timeout value for http requests
= http://127.0.0.1:9696(StrOpt) URL for connecting to neutron
-
diff --git a/doc/common/tables/nova-pci.xml b/doc/common/tables/nova-pci.xml deleted file mode 100644 index 7d22f06359..0000000000 --- a/doc/common/tables/nova-pci.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of PCI configuration options
Configuration option = Default valueDescription
[DEFAULT]
= [](MultiStrOpt) An alias for a PCI passthrough device requirement. This allows users to specify the alias in the extra_spec for a flavor, without needing to repeat all the PCI property requirements. For example: pci_alias = { "name": "QuickAssist", "product_id": "0443", "vendor_id": "8086", "device_type": "ACCEL" } defines an alias for the Intel QuickAssist card. (multi valued)
= [](MultiStrOpt) White list of PCI devices available to VMs. For example: pci_passthrough_whitelist = [{"vendor_id": "8086", "product_id": "0443"}]
-
diff --git a/doc/common/tables/nova-periodic.xml b/doc/common/tables/nova-periodic.xml deleted file mode 100644 index 2e4133aef2..0000000000 --- a/doc/common/tables/nova-periodic.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of periodic configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Enable periodic tasks
= 60(IntOpt) Range of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0)
-
diff --git a/doc/common/tables/nova-policy.xml b/doc/common/tables/nova-policy.xml deleted file mode 100644 index 8603e88d3d..0000000000 --- a/doc/common/tables/nova-policy.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of policy configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Permit instance snapshot operations.
= False(BoolOpt) Allow destination machine to match source for resize. Useful when testing in single-host environments.
= 0(IntOpt) Number of seconds between subsequent usage refreshes. This defaults to 0(off) to avoid additional load but it is useful to turn on to help keep quota usage up to date and reduce the impact of out of sync usage issues. Note that quotas are not updated on a periodic task, they will update on a new reservation if max_age has passed since the last reservation
= 3(IntOpt) Maximum number of devices that will result in a local image being created on the hypervisor node. A negative number means unlimited. Setting max_local_block_devices to 0 means that any request that attempts to create a local disk will fail. This option is meant to limit the number of local discs (so root local disc that is the result of --image being used, and any other ephemeral and swap disks). 0 does not mean that images will be automatically converted to volumes and boot instances from volumes - it just means that all requests that attempt to create a local disk will fail.
= (StrOpt) When set, compute API will consider duplicate hostnames invalid within the specified scope, regardless of case. Should be empty, "project" or "global".
= 1000(IntOpt) The maximum number of items returned in a single response from a collection resource
= 12(IntOpt) Length of generated instance admin passwords
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
= 86400(IntOpt) Number of seconds until a reservation expires
= False(BoolOpt) Attempt to resize the filesystem by accessing the image over a block device. This is done by the host and may not be necessary if the image contains a recent version of cloud-init. Possible mechanisms require the nbd driver (for qcow and raw), or loop (for raw).
= 0(IntOpt) Count of reservations until usage is refreshed. This defaults to 0(off) to avoid additional load but it is useful to turn on to help keep quota usage up to date and reduce the impact of out of sync usage issues.
-
diff --git a/doc/common/tables/nova-qpid.xml b/doc/common/tables/nova-qpid.xml deleted file mode 100644 index af487e6618..0000000000 --- a/doc/common/tables/nova-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/nova-quobyte.xml b/doc/common/tables/nova-quobyte.xml deleted file mode 100644 index 0da8caa6f9..0000000000 --- a/doc/common/tables/nova-quobyte.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Quobyte USP volume driver configuration options
Configuration option = Default valueDescription
[libvirt]
= None(StrOpt) Path to a Quobyte Client configuration file.
= $state_path/mnt(StrOpt) Directory where the Quobyte volume is mounted on the compute node
-
diff --git a/doc/common/tables/nova-quota.xml b/doc/common/tables/nova-quota.xml deleted file mode 100644 index a84f5498b1..0000000000 --- a/doc/common/tables/nova-quota.xml +++ /dev/null @@ -1,119 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of quota configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 600(IntOpt) Interval to pull network bandwidth usage info. Not supported on all hypervisors. Set to -1 to disable. Setting this to 0 will run at the default rate.
= False(BoolOpt) Enables or disables quota checking for tenant networks
= 20(IntOpt) Number of instance cores allowed per project
= nova.quota.DbQuotaDriver(StrOpt) Default driver to use for quota checks
= -1(IntOpt) Number of fixed IPs allowed per project (this should be at least the number of instances allowed)
= 10(IntOpt) Number of floating IPs allowed per project
= 10240(IntOpt) Number of bytes allowed per injected file
= 255(IntOpt) Length of injected file path
= 5(IntOpt) Number of injected files allowed
= 10(IntOpt) Number of instances allowed per project
= 100(IntOpt) Number of key pairs per user
= 128(IntOpt) Number of metadata items allowed per instance
= 3(IntOpt) Number of private networks allowed per project
= 51200(IntOpt) Megabytes of instance RAM allowed per project
= 20(IntOpt) Number of security rules per security group
= 10(IntOpt) Number of security groups per project
= 10(IntOpt) Number of servers per server group
= 10(IntOpt) Number of server groups per project
[cells]
= 600(IntOpt) Seconds between bandwidth updates for cells.
-
diff --git a/doc/common/tables/nova-rabbitmq.xml b/doc/common/tables/nova-rabbitmq.xml deleted file mode 100644 index 8fab07dc06..0000000000 --- a/doc/common/tables/nova-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/nova-rdp.xml b/doc/common/tables/nova-rdp.xml deleted file mode 100644 index 423780c1cc..0000000000 --- a/doc/common/tables/nova-rdp.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RDP configuration options
Configuration option = Default valueDescription
[rdp]
= False(BoolOpt) Enable RDP related features
= http://127.0.0.1:6083/(StrOpt) Location of RDP html5 console proxy, in the form "http://127.0.0.1:6083/"
-
diff --git a/doc/common/tables/nova-redis.xml b/doc/common/tables/nova-redis.xml deleted file mode 100644 index 4083434ca7..0000000000 --- a/doc/common/tables/nova-redis.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/nova-rpc.xml b/doc/common/tables/nova-rpc.xml deleted file mode 100644 index 4edb6a1238..0000000000 --- a/doc/common/tables/nova-rpc.xml +++ /dev/null @@ -1,152 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
[cells]
= cells.intercell(StrOpt) Base queue name to use when communicating between cells. Various topics by message type will be appended to this.
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
[upgrade_levels]
= None(StrOpt) Set a version cap for messages sent to the base api in any service
-
diff --git a/doc/common/tables/nova-s3.xml b/doc/common/tables/nova-s3.xml deleted file mode 100644 index 5cc7c15169..0000000000 --- a/doc/common/tables/nova-s3.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of S3 configuration options
Configuration option = Default valueDescription
[DEFAULT]
= $state_path/buckets(StrOpt) Path to S3 buckets
= /tmp(StrOpt) Parent directory for tempdir used for image decryption
= notchecked(StrOpt) Access key to use for S3 server for images
= False(BoolOpt) Whether to affix the tenant id to the access key when downloading from S3
= $my_ip(StrOpt) Hostname or IP for OpenStack to use when accessing the S3 api
= 0.0.0.0(StrOpt) IP address for S3 API to listen
= 3333(IntOpt) Port for S3 API to listen
= 3333(IntOpt) Port used when accessing the S3 api
= notchecked(StrOpt) Secret key to use for S3 server for images
= False(BoolOpt) Whether to use SSL when talking to S3
-
diff --git a/doc/common/tables/nova-scheduler.xml b/doc/common/tables/nova-scheduler.xml deleted file mode 100644 index 1f491e91cc..0000000000 --- a/doc/common/tables/nova-scheduler.xml +++ /dev/null @@ -1,198 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of scheduler configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) Force the filter to consider only keys matching the given namespace.
= .(StrOpt) The separator used between the namespace and keys
= RetryFilter, AvailabilityZoneFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ExactRamFilter, ExactDiskFilter, ExactCoreFilter(ListOpt) Which filter class names to use for filtering baremetal hosts when not specified in the request.
= 0.0(FloatOpt) Virtual CPU to physical CPU allocation ratio which affects all CPU filters. This configuration specifies a global ratio for CoreFilter. For AggregateCoreFilter, it will fall back to this configuration value if no per-aggregate setting found. NOTE: This can be set per-compute, or if set to 0.0, the value set on the scheduler node(s) will be used and defaulted to 16.0
= 1.0(FloatOpt) Virtual disk to physical disk allocation ratio
= -1.0(FloatOpt) Multiplier used for weighing host io ops. Negative numbers mean a preference to choose light workload compute hosts.
= (ListOpt) Host reserved for specific images
= (ListOpt) Images to run on isolated host
= 50(IntOpt) Ignore hosts that have too many instances
= 8(IntOpt) Tells filters to ignore hosts that have this many or more instances currently in build, resize, snapshot, migrate, rescue or unshelve task states
= 0.0(FloatOpt) Virtual ram to physical ram allocation ratio which affects all ram filters. This configuration specifies a global ratio for RamFilter. For AggregateRamFilter, it will fall back to this configuration value if no per-aggregate setting found. NOTE: This can be set per-compute, or if set to 0.0, the value set on the scheduler node(s) will be used and defaulted to 1.5
= 1.0(FloatOpt) Multiplier used for weighing ram. Negative numbers mean to stack vs spread.
= 0(IntOpt) Amount of disk in MB to reserve for the host
= 512(IntOpt) Amount of memory in MB to reserve for the host
= True(BoolOpt) Whether to force isolated hosts to run only isolated images
= ['nova.scheduler.filters.all_filters'](MultiStrOpt) Filter classes available to the scheduler which may be specified more than once. An entry of "nova.scheduler.filters.all_filters" maps to all filters included with nova.
= RetryFilter, AvailabilityZoneFilter, RamFilter, DiskFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter(ListOpt) Which filter class names to use for filtering hosts when not specified in the request.
= nova.scheduler.filter_scheduler.FilterScheduler(StrOpt) Default driver to use for the scheduler
= 60(IntOpt) How often (in seconds) to run periodic tasks in the scheduler driver of your choice. Please note this is likely to interact with the value of service_down_time, but exactly how they interact will depend on your choice of scheduler driver.
= nova.scheduler.host_manager.HostManager(StrOpt) The scheduler host manager class to use
= 1(IntOpt) New instances will be scheduled on a host chosen randomly from a subset of the N best hosts. This property defines the subset size that a host is chosen from. A value of 1 chooses the first host returned by the weighing functions. This value must be at least 1. Any value less than 1 will be ignored, and 1 will be used instead
= 120(IntOpt) Waiting time interval (seconds) between sending the scheduler a list of current instance UUIDs to verify that its view of instances is in sync with nova. If the CONF option `scheduler_tracks_instance_changes` is False, changing this option will have no effect.
= (StrOpt) Absolute path to scheduler configuration JSON file.
= nova.scheduler.manager.SchedulerManager(StrOpt) Full class name for the Manager for scheduler
= 3(IntOpt) Maximum number of attempts to schedule an instance
= scheduler(StrOpt) The topic scheduler nodes listen on
= True(BoolOpt) Determines if the Scheduler tracks changes to instances to help with its filtering decisions.
= False(BoolOpt) Flag to decide whether to use baremetal_scheduler_default_filters or not.
= nova.scheduler.weights.all_weighers(ListOpt) Which weight class names to use for weighing hosts
[cells]
= 10.0(FloatOpt) Multiplier used for weighing ram. Negative numbers mean to stack vs spread.
= nova.cells.filters.all_filters(ListOpt) Filter classes the cells scheduler should use. An entry of "nova.cells.filters.all_filters" maps to all cells filters included with nova.
= 10(IntOpt) How many retries when no cells are available.
= 2(IntOpt) How often to retry in seconds when no cells are available.
= nova.cells.weights.all_weighers(ListOpt) Weigher classes the cells scheduler should use. An entry of "nova.cells.weights.all_weighers" maps to all cell weighers included with nova.
[metrics]
= True(BoolOpt) How to treat the unavailable metrics. When a metric is NOT available for a host, if it is set to be True, it would raise an exception, so it is recommended to use the scheduler filter MetricFilter to filter out those hosts. If it is set to be False, the unavailable metric would be treated as a negative factor in weighing process, the returned value would be set by the option weight_of_unavailable.
= 1.0(FloatOpt) Multiplier used for weighing metrics.
= -10000.0(FloatOpt) The final weight value to be returned if required is set to False and any one of the metrics set by weight_setting is unavailable.
= (ListOpt) How the metrics are going to be weighed. This should be in the form of "<name1>=<ratio1>, <name2>=<ratio2>, ...", where <nameX> is one of the metrics to be weighed, and <ratioX> is the corresponding ratio. So for "name1=1.0, name2=-1.0" The final weight would be name1.value * 1.0 + name2.value * -1.0.
-
diff --git a/doc/common/tables/nova-serial_console.xml b/doc/common/tables/nova-serial_console.xml deleted file mode 100644 index 542cb1c638..0000000000 --- a/doc/common/tables/nova-serial_console.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of serial console configuration options
Configuration option = Default valueDescription
[serial_console]
= ws://127.0.0.1:6083/(StrOpt) Location of serial console proxy.
= False(BoolOpt) Enable serial console related features
= 127.0.0.1(StrOpt) IP address on which instance serial console should listen
= 10000:20000(StrOpt) Range of TCP ports to use for serial ports on compute hosts
= 127.0.0.1(StrOpt) The address to which proxy clients (like nova-serialproxy) should connect
= 0.0.0.0(StrOpt) Host on which to listen for incoming requests
= 6083(IntOpt) Port on which to listen for incoming requests
-
diff --git a/doc/common/tables/nova-spice.xml b/doc/common/tables/nova-spice.xml deleted file mode 100644 index e9417105cf..0000000000 --- a/doc/common/tables/nova-spice.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of SPICE configuration options
Configuration option = Default valueDescription
[spice]
= True(BoolOpt) Enable spice guest agent support
= False(BoolOpt) Enable spice related features
= http://127.0.0.1:6082/spice_auto.html(StrOpt) Location of spice HTML5 console proxy, in the form "http://127.0.0.1:6082/spice_auto.html"
= 0.0.0.0(StrOpt) Host on which to listen for incoming requests
= 6082(IntOpt) Port on which to listen for incoming requests
= en-us(StrOpt) Keymap for spice
= 127.0.0.1(StrOpt) IP address on which instance spice server should listen
= 127.0.0.1(StrOpt) The address to which proxy clients (like nova-spicehtml5proxy) should connect
-
diff --git a/doc/common/tables/nova-testing.xml b/doc/common/tables/nova-testing.xml deleted file mode 100644 index 4a0e4129c4..0000000000 --- a/doc/common/tables/nova-testing.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of testing configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) If True, skip using the queue and make local calls
= False(BoolOpt) If passed, use fake network devices and addresses
= False(BoolOpt) Whether to log monkey patching
= nova.api.ec2.cloud:nova.notifications.notify_decorator, nova.compute.api:nova.notifications.notify_decorator(ListOpt) List of modules/decorators to monkey patch
-
diff --git a/doc/common/tables/nova-trustedcomputing.xml b/doc/common/tables/nova-trustedcomputing.xml deleted file mode 100644 index 818317790a..0000000000 --- a/doc/common/tables/nova-trustedcomputing.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of trusted computing configuration options
Configuration option = Default valueDescription
[trusted_computing]
= /OpenAttestationWebServices/V1.0(StrOpt) Attestation web API URL
= None(StrOpt) Attestation authorization blob - must change
= 60(IntOpt) Attestation status cache valid period length
= False(BoolOpt) Disable SSL cert verification for Attestation service
= 8443(StrOpt) Attestation server port
= None(StrOpt) Attestation server HTTP
= None(StrOpt) Attestation server Cert file for Identity verification
-
diff --git a/doc/common/tables/nova-upgrade_levels.xml b/doc/common/tables/nova-upgrade_levels.xml deleted file mode 100644 index febfe653e2..0000000000 --- a/doc/common/tables/nova-upgrade_levels.xml +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of upgrade levels configuration options
Configuration option = Default valueDescription
[cells]
= nova.cells.scheduler.CellsScheduler(StrOpt) Cells scheduler to use
[upgrade_levels]
= None(StrOpt) Set a version cap for messages sent to local cells services
= None(StrOpt) Set a version cap for messages sent to cert services
= None(StrOpt) Set a version cap for messages sent to compute services. If you plan to do a live upgrade from an old version to a newer version, you should set this option to the old version before beginning the live upgrade procedure. Only upgrading to the next version is supported, so you cannot skip a release for the live upgrade procedure.
= None(StrOpt) Set a version cap for messages sent to conductor services
= None(StrOpt) Set a version cap for messages sent to console services
= None(StrOpt) Set a version cap for messages sent to consoleauth services
= None(StrOpt) Set a version cap for messages sent between cells services
= None(StrOpt) Set a version cap for messages sent to network services
= None(StrOpt) Set a version cap for messages sent to scheduler services
-
diff --git a/doc/common/tables/nova-vmware.xml b/doc/common/tables/nova-vmware.xml deleted file mode 100644 index c274d1e7ef..0000000000 --- a/doc/common/tables/nova-vmware.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VMware configuration options
Configuration option = Default valueDescription
[vmware]
= 10(IntOpt) The number of times we retry on failures, e.g., socket error, etc.
= None(StrOpt) Specify a CA bundle file to use in verifying the vCenter server certificate.
= None(StrOpt) The prefix for where cached images are stored. This is NOT the full path - just a folder prefix. This should only be used when a datastore cache should be shared between compute nodes. Note: this should only be used when the compute nodes have a shared file system.
= None(StrOpt) Name of a VMware Cluster ComputeResource.
= None(IntOpt) Set this value if affected by an increased network latency causing repeated characters when typing in a remote console.
= None(StrOpt) Regex to match the name of a datastore.
= None(StrOpt) Hostname or IP address for connection to VMware vCenter host.
= None(StrOpt) Password for connection to VMware vCenter host.
= 443(IntOpt) Port for connection to VMware vCenter host.
= None(StrOpt) Username for connection to VMware vCenter host.
= False(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "ca_file" is set.
= br-int(StrOpt) Name of Integration Bridge
= 100(IntOpt) The maximum number of ObjectContent data objects that should be returned in a single result. A positive value will cause the operation to suspend the retrieval when the count of objects reaches the specified maximum. The server may still limit the count to something less than the configured value. Any remaining objects may be retrieved with additional requests.
= None(StrOpt) The PBM default policy. If pbm_wsdl_location is set and there is no defined storage policy for the specific request then this policy will be used.
= False(BoolOpt) The PBM status.
= None(StrOpt) PBM service WSDL file location URL. e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this will disable storage policy based placement of instances.
= None(StrOpt) Identifies a proxy service that provides network access to the serial_port_service_uri. This option is ignored if serial_port_service_uri is not specified.
= None(StrOpt) Identifies the remote system that serial port traffic will be sent to. If this is not set, no serial ports will be added to the created VMs.
= 0.5(FloatOpt) The interval used for polling of remote tasks.
= True(BoolOpt) Whether to use linked clone
= None(StrOpt) Optional VIM Service WSDL Location e.g http://<server>/vimService.wsdl. Optional over-ride to default location for bug work-arounds
-
diff --git a/doc/common/tables/nova-vnc.xml b/doc/common/tables/nova-vnc.xml deleted file mode 100644 index 4584a3e4a7..0000000000 --- a/doc/common/tables/nova-vnc.xml +++ /dev/null @@ -1,110 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VNC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Become a daemon (background process)
= None(StrOpt) SSL key file (if separate from cert)
= 0.0.0.0(StrOpt) Host on which to listen for incoming requests
= 6080(IntOpt) Port on which to listen for incoming requests
= False(BoolOpt) Record sessions to FILE.[session_number]
= False(BoolOpt) Source is ipv6
= False(BoolOpt) Disallow non-encrypted connections
= /usr/share/spice-html5(StrOpt) Run webserver on same port. Serve files from DIR.
[vmware]
= 5900(IntOpt) VNC starting port
= 10000(IntOpt) Total number of VNC ports
[vnc]
= True(BoolOpt) Enable VNC related features
= en-us(StrOpt) Keymap for VNC
= http://127.0.0.1:6080/vnc_auto.html(StrOpt) Location of VNC console proxy, in the form "http://127.0.0.1:6080/vnc_auto.html"
= 127.0.0.1(StrOpt) IP address on which instance vncservers should listen
= 127.0.0.1(StrOpt) The address to which proxy clients (like nova-xvpvncproxy) should connect
= http://127.0.0.1:6081/console(StrOpt) Location of nova xvp VNC console proxy, in the form "http://127.0.0.1:6081/console"
-
diff --git a/doc/common/tables/nova-volumes.xml b/doc/common/tables/nova-volumes.xml deleted file mode 100644 index bd8104b595..0000000000 --- a/doc/common/tables/nova-volumes.xml +++ /dev/null @@ -1,180 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of volumes configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 60(IntOpt) Number of times to retry block device allocation on failures
= 3(IntOpt) Waiting time interval (seconds) between block device allocation retries on failures
= $my_ip(StrOpt) Block storage IP address of this host
= nova.volume.cinder.API(StrOpt) The full class name of the volume API class to use
= 0(IntOpt) Interval in seconds for gathering volume usages
[cinder]
= None(StrOpt) PEM encoded Certificate Authority to use when verifying HTTPs connections.
= volumev2:cinderv2:publicURL(StrOpt) Info to match when looking for cinder in the service catalog. Format is: separated values of the form: <service_type>:<service_name>:<endpoint_type>
= None(StrOpt) PEM encoded client certificate cert file
= True(BoolOpt) Allow attach between instance and volume in different availability zones.
= None(StrOpt) Override service catalog lookup with template for cinder endpoint e.g. http://localhost:8776/v1/%(project_id)s
= 3(IntOpt) Number of cinderclient retries on failed http calls
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) PEM encoded client certificate key file
= None(StrOpt) Region name of this node
= None(IntOpt) Timeout value for http requests
[hyperv]
= False(BoolOpt) Force V1 volume utility class
= 10(IntOpt) The number of times to retry to attach a volume
= 5(IntOpt) Interval between volume attachment attempts, in seconds
[libvirt]
= $state_path/mnt(StrOpt) Directory where the glusterfs volume is mounted on the compute node
= None(StrOpt) Mount options passed to the NFS client. See section of the nfs man page for details
= $state_path/mnt(StrOpt) Directory where the NFS volume is mounted on the compute node
= 3(IntOpt) Number of times to rediscover AoE target to find volume
= 5(IntOpt) Number of times to rescan iSCSI target to find volume
= 5(IntOpt) Number of times to rescan iSER target to find volume
= (ListOpt) Protocols listed here will be accessed directly from QEMU. Currently supported protocols: [gluster]
= None(StrOpt) The libvirt UUID of the secret for the rbd_uservolumes
= None(StrOpt) The RADOS client name for accessing rbd volumes
= None(StrOpt) Path or URL to Scality SOFS configuration file
= $state_path/scality(StrOpt) Base dir where Scality SOFS shall be mounted
= (StrOpt) Mount options passed to the SMBFS client. See mount.cifs man page for details. Note that the libvirt-qemu uid and gid must be specified.
= $state_path/mnt(StrOpt) Directory where the SMBFS shares are mounted on the compute node
[xenserver]
= 10(IntOpt) Time to wait for a block device to be created
-
diff --git a/doc/common/tables/nova-vpn.xml b/doc/common/tables/nova-vpn.xml deleted file mode 100644 index 3652be7458..0000000000 --- a/doc/common/tables/nova-vpn.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of VPN configuration options
Configuration option = Default valueDescription
[DEFAULT]
= $pybasedir/nova/cloudpipe/bootscript.template(StrOpt) Template for cloudpipe instance boot script
= (ListOpt) A list of dmz ranges that should be accepted
= 255.255.255.0(StrOpt) Netmask to push into openvpn config
= 10.0.0.0(StrOpt) Network to push into openvpn config
= m1.tiny(StrOpt) Flavor for vpn instances
= 0(StrOpt) Image ID used when starting up a cloudpipe vpn server
= $my_ip(StrOpt) Public IP for the cloudpipe VPN servers
= -vpn(StrOpt) Suffix to add to project name for vpn key and secgroups
= 1000(IntOpt) First Vpn port for private networks
-
diff --git a/doc/common/tables/nova-xen.xml b/doc/common/tables/nova-xen.xml deleted file mode 100644 index 14fb012963..0000000000 --- a/doc/common/tables/nova-xen.xml +++ /dev/null @@ -1,258 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Xen configuration options
Configuration option = Default valueDescription
[DEFAULT]
= nova.console.xvp.XVPConsoleProxy(StrOpt) Driver to use for the console proxy
= /etc/xvp.conf(StrOpt) Generated XVP conf file
= $pybasedir/nova/console/xvp.conf.template(StrOpt) XVP conf template
= /var/log/xvp.log(StrOpt) XVP log file
= 5900(IntOpt) Port for XVP to multiplex VNC connections on
= /var/run/xvp.pid(StrOpt) XVP master process pid file
= False(BoolOpt) Stub calls to compute worker for tests
[libvirt]
= /usr/lib/xen/boot/hvmloader(StrOpt) Location where the Xen hvmloader is kept
[xenserver]
= usr/sbin/xe-update-networking(StrOpt) Specifies the path in which the XenAPI guest agent should be located. If the agent is present, network configuration is not injected into the image. Used if compute_driver=xenapi.XenAPIDriver and flat_injected=True
= 60(IntOpt) Number of seconds to wait for agent reply to resetnetwork request
= 30(IntOpt) Number of seconds to wait for agent reply
= 300(IntOpt) Number of seconds to wait for agent to be fully operational
= all(StrOpt) Cache glance images locally. `all` will cache all images, `some` will only cache images that have the image_property `cache_in_nova=True`, and `none` turns off caching entirely
= True(BoolOpt) Ensure compute service is running on host XenAPI connects to.
= 5(IntOpt) Maximum number of concurrent XenAPI connections. Used only if compute_driver=xenapi.XenAPIDriver
= None(StrOpt) Password for connection to XenServer/Xen Cloud Platform. Used only if compute_driver=xenapi.XenAPIDriver
= None(StrOpt) URL for connection to XenServer/Xen Cloud Platform. A special value of unix://local can be used to connect to the local unix socket. Required if compute_driver=xenapi.XenAPIDriver
= root(StrOpt) Username for connection to XenServer/Xen Cloud Platform. Used only if compute_driver=xenapi.XenAPIDriver
= linux(StrOpt) Default OS type
= False(BoolOpt) Disables the use of the XenAPI agent in any image regardless of what image properties are present.
= None(IntOpt) Compression level for images, e.g., 9 for gzip -9. Range is 1-9, 9 being most compressed but most CPU intensive on dom0.
= nova.virt.xenapi.image.glance.GlanceStore(StrOpt) Dom0 plugin driver used to handle image uploads.
= 20(IntOpt) Number of seconds to wait for an SR to settle if the VDI does not exist when first introduced
= None(StrOpt) URL to the iPXE boot menu
= mkisofs(StrOpt) Name and optionally path of the tool used for ISO image creation
= None(StrOpt) Name of network to use for booting iPXE ISOs
= iqn.2010-10.org.openstack(StrOpt) IQN Prefix
= 10(IntOpt) Timeout in seconds for XenAPI login.
= 16777216(IntOpt) Maximum size in bytes of kernel or ramdisk images
= 10(IntOpt) Maximum number of retries to unplug VBD. if <=0, should try once and no retry
= xapi1(StrOpt) Name of Integration Bridge used by Open vSwitch
= False(BoolOpt) Used to enable the remapping of VBD dev (Works around an issue in Ubuntu Maverick)
= sd(StrOpt) Specify prefix to remap VBD dev to (ex. /dev/xvdb -> /dev/sdb)
= 60(IntOpt) Number of seconds to wait for instance to go to running state
= True(BoolOpt) Whether to use sparse_copy for copying data on a resize down (False will use standard dd). This speeds up resizes down considerably since large runs of zeros won't have to be rsynced
= /var/run/sr-mount(StrOpt) Base path to the storage repository
= default-sr:true(StrOpt) Filter for finding the SR to be used to install guest instances on. To use the Local Storage in default XenServer/XCP installations set this flag to other-config:i18n-key=local-storage. To select an SR with a different matching criteria, you could set it to other-config:my_favorite_sr=true. On the other hand, to fall back on the Default SR, as displayed by XenCenter, set this flag to: default-sr:true
= None(StrOpt) The iSCSI Target Host
= 3260(StrOpt) The iSCSI Target Port, default is port 3260
= None(StrOpt) Base URL for torrent files; must contain a slash character (see RFC 1808, step 6)
= 600(IntOpt) Number of seconds a download can remain at the same progress percentage w/o being considered a stall
= none(StrOpt) Whether or not to download images via Bit Torrent.
= 6891(IntOpt) End of port range to listen on
= 6881(IntOpt) Beginning of port range to listen on
= 86400(IntOpt) Cached torrent files not accessed within this number of seconds can be reaped
= 1(IntOpt) Maximum number of seeder processes to run concurrently within a given dom0. (-1 = no limit)
= 1.0(FloatOpt) Probability that peer will become a seeder. (1.0 = 100%)
= 3600(IntOpt) Number of seconds after downloading an image via BitTorrent that it should be seeded for other peers.
= False(BoolOpt) Determines if the XenAPI agent should be used when the image used does not contain a hint to declare if the agent is present or not. The hint is a glance property "xenapi_use_agent" that has the value "True" or "False". Note that waiting for the agent when it is not present will significantly increase server boot times.
= True(BoolOpt) To use for hosts with different CPUs
= 20(IntOpt) Max number of times to poll for VHD to coalesce. Used only if compute_driver=xenapi.XenAPIDriver
= 5.0(FloatOpt) The interval used for polling of coalescing vhds. Used only if compute_driver=xenapi.XenAPIDriver
= nova.virt.xenapi.vif.XenAPIBridgeDriver(StrOpt) The XenAPI VIF driver using XenServer Network APIs.
-
diff --git a/doc/common/tables/nova-xvpvncproxy.xml b/doc/common/tables/nova-xvpvncproxy.xml deleted file mode 100644 index 5d3448d5ab..0000000000 --- a/doc/common/tables/nova-xvpvncproxy.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of XCP VNC proxy configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 0.0.0.0(StrOpt) Address that the XCP VNC proxy should bind to
= 6081(IntOpt) Port that the XCP VNC proxy should bind to
-
diff --git a/doc/common/tables/nova-zeromq.xml b/doc/common/tables/nova-zeromq.xml deleted file mode 100644 index 5f8767b5d4..0000000000 --- a/doc/common/tables/nova-zeromq.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/nova-zookeeper.xml b/doc/common/tables/nova-zookeeper.xml deleted file mode 100644 index d90e37a3df..0000000000 --- a/doc/common/tables/nova-zookeeper.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Zookeeper configuration options
Configuration option = Default valueDescription
[zookeeper]
= None(StrOpt) The ZooKeeper addresses for servicegroup service in the format of host1:port,host2:port,host3:port
= 4000(IntOpt) The recv_timeout parameter for the zk session
= /servicegroups(StrOpt) The prefix used in ZooKeeper to store ephemeral nodes
= 5(IntOpt) Number of seconds to wait until retrying to join the session
-
diff --git a/doc/common/tables/octavia.xml b/doc/common/tables/octavia.xml deleted file mode 100644 index 783dec49d0..0000000000 --- a/doc/common/tables/octavia.xml +++ /dev/null @@ -1,359 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Octavia configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print more verbose output (set logging level to INFO instead of default WARNING level).
= False(BoolOpt) Print more verbose output (set logging level to INFO instead of default WARNING level).
= 0.0.0.0(StrOpt) The host IP to bind to the api service to .
= 9876(IntOpt) The port to bind to the api service to.
= simulated_handler(StrOpt) The handler that the API communicates with.
= hot_plug_plugin(StrOpt) Name of the controller plugin to use.
= (StrOpt) Region in Identity service catalog to use for communication with the OpenStack services.
= (StrOpt) Hostname to be used by the host machine for services running on it.The default value is the hostname of the host machine.
[database]
= mysql+pymysql://root:pass@127.0.0.1:3306/octavia(StrOpt) The SQLAlchemy connection string used to connect to the database.
[health_manager]
= 0.0.0.0(StrOpt) IP address the controller will listen on for heart beats from the amphora.
= 5555(IntOpt) Port number the controller will listen on for heart beats from the amphora.
= (StrOpt) List of controller ip and port pairs for the heartbeat receivers. Example [127.0.0.1:5555, 127.0.0.1:5555].
= 10(IntOpt) Number of threads performing amphora failovers.
= 50(IntOpt) Number of threads performing amphora status update.
= 10(IntOpt) Sleep time between sending hearthbeats from the amphora.
= (StrOpt) key used to authenticate the heartbeat message sent by the amphora.
= 60(IntOpt) Interval, in seconds, to wait before failing over an amphora.
= 3(IntOpt) Sleep time between health checks in seconds.
= 0(IntOpt) sets the value of the heartbeat recv buffer.
[keystone_authtoken]
= https://localhost:5000/v3(StrOpt) Complete public Identity API endpoint.
= octavia(StrOpt) Keystone account username.
= password(StrOpt) Keystone account password.
= service(StrOpt) Keystone service account tenant name to validate user tokens.
= False(BoolOpt) Verify HTTPS connections.
[keystone_authtoken_v3]
= default(StrOpt) Admin user keystone authentication domain.
= default(StrOpt) Admin project keystone authentication domain.
[certificates]
= octavia.certificates.generator.LocalCertGenerator(StrOpt) Class name which generate certificates.
= octavia.certificates.manager.LocalCertManager(StrOpt) Class name of certificate manager.
= /etc/ssl/certs/ssl-cert-snakeoil.pem(StrOpt) Absolute path to the CA Certificate for signing. Defaults to env[OS_OCTAVIA_TLS_CA_CERT].Local Cert generator only.
= /etc/ssl/private/ssl-cert-snakeoil.key(StrOpt) Absolute path to the Private Key for signing. Defaults to env[OS_OCTAVIA_TLS_CA_KEY].Local Cert generator only.
= (StrOpt) Passphrase for the Private Key. Defaults to env[OS_OCTAVIA_CA_KEY_PASS] or None.Local Cert generator only.
= sha256(StrOpt) Certificate signing digest. Defaults to env[OS_OCTAVIA_CA_SIGNING_DIGEST] or sha256.Local Cert generator only.
= /var/lib/octavia/certificates/(StrOpt) Absolute path to the certificate storage directory. Defaults to env[OS_OCTAVIA_TLS_STORAGE].Local Cert manager only.
[octavia_network]
= (StrOpt) Network to communicate with amphora.
= 15(IntOpt) The maximum attempts to retry an action with the networking service.
= 1(IntOpt) Seconds to wait before retrying an action with the networking service.
[haproxy_amphora]
= /var/lib/octavia(StrOpt) Base directory for amphora files on amphora.
= /var/lib/octavia/certs(StrOpt) Base directory for cert storage on amphora.
= /var/lib/octavia/custom_template(StrOpt) Custom haproxy template.
= /logs(StrOpt) Base director for log on amphora.
= 300(IntOpt) Retry threshold for connecting to amphorae.
= 5(IntOpt) Retry threshold for connecting to amphorae.
= barbican_cert_manager(StrOpt) Name of the cert manager to use.
= ubuntu(StrOpt) Name of user for access to amphora,ssh driver only.
= /opt/stack/.ssh/id_rsa(StrOpt) Local absolute path to the private key loaded on amphora at boot,ssh driver only.
= 0.0.0.0(StrOpt) The host IP to bind to amphora hose/REST driver only.
= 9191(IntOpt) The port to bind to.REST driver only.
= /usr/sbin/haproxy(StrOpt) The full path to haproxy.
= 2(IntOpt) The respawn count for haproxy's upstart script.
= 2(IntOpt) The respawn interval for haproxy's upstart script.
= /tmp(StrOpt) The directory to store haproxy cert files in.
[controller_worker]
= 10(IntOpt) Retry attempts to wait for Amphora to become active.
= 10(IntOpt) Seconds to wait for an Amphora to become active.
= (StrOpt) Nova instance flavor id for the Amphora.
= (StrOpt) Glance image id for the Amphora image to boot.
= (StrOpt) SSH key name used to boot the Amphora.REST driver/or debugging.
= (StrOpt) Network to attach to the Amphora.
= (StrOpt) List of security groups to attach to the Amphora.
= /etc/octavia/certs/ca_01.pem(StrOpt) Client CA for the amphora agent to use.REST driver only.
= amphora_noop_driver(StrOpt) Name of the amphora driver to use.
= compute_noop_driver(StrOpt) Name of the compute driver to use.
= network_noop_driver(StrOpt) Name of the network driver to use.
= local_cert_generator(StrOpt) Name of the cert generator to use.
[task_flow]
= serial(StrOpt) TaskFlow engine to use.
= 5(IntOpt) The maximum number of workers.
[oslo_messaging_rabbi]
= octavia(StrOpt) RabbitMQ username.
= password(StrOpt) RabbitMQ password.
= 5672(IntOpt) RabbitMQ port.
= localhost:5672(StrOpt) RabbitMQ host.
[oslo_messaging]
= 2(IntOpt) Queue Consumer Thread Pool Size.
= octavia_prov(StrOpt) Topic (i.e. Queue) Name.
[house_keeping]
= 30(IntOpt) Interval in seconds to initiate spare amphora checks.
= 0(IntOpt) Number of spare amphorae.
= 30(IntOpt) Cleanup interval for Deleted amphora.
= 604800(IntOpt) Amphora expiry age in seconds. Default is 1 week.
-
diff --git a/doc/common/tables/sahara-amqp.xml b/doc/common/tables/sahara-amqp.xml deleted file mode 100644 index 1024e534bf..0000000000 --- a/doc/common/tables/sahara-amqp.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= INFO(StrOpt) Notification level for outgoing notifications
= None(StrOpt) Notification publisher_id for outgoing notifications
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/sahara-api.xml b/doc/common/tables/sahara-api.xml deleted file mode 100644 index 3440e8efdd..0000000000 --- a/doc/common/tables/sahara-api.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[oslo_middleware]
= 114688(IntOpt) The maximum body size for each request, in bytes.
= X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[retries]
= 5(IntOpt) Number of times to retry the request to client before failing
= 10(IntOpt) Time between the retries to client (in seconds).
-
diff --git a/doc/common/tables/sahara-auth_token.xml b/doc/common/tables/sahara-auth_token.xml deleted file mode 100644 index 667a47cc98..0000000000 --- a/doc/common/tables/sahara-auth_token.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/sahara-clients.xml b/doc/common/tables/sahara-clients.xml deleted file mode 100644 index 869e01d9dc..0000000000 --- a/doc/common/tables/sahara-clients.xml +++ /dev/null @@ -1,146 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of clients configuration options
Configuration option = Default valueDescription
[cinder]
= False(BoolOpt) Allow to perform insecure SSL requests to cinder.
= 2(IntOpt) Version of the Cinder API to use.
= None(StrOpt) Location of ca certificates file to use for cinder client requests.
= internalURL(StrOpt) Endpoint type for cinder client requests
[heat]
= False(BoolOpt) Allow to perform insecure SSL requests to heat.
= None(StrOpt) Location of ca certificates file to use for heat client requests.
= internalURL(StrOpt) Endpoint type for heat client requests
[keystone]
= False(BoolOpt) Allow to perform insecure SSL requests to keystone.
= None(StrOpt) Location of ca certificates file to use for keystone client requests.
= internalURL(StrOpt) Endpoint type for keystone client requests
[manila]
= True(BoolOpt) Allow to perform insecure SSL requests to manila.
= 1(IntOpt) Version of the manila API to use.
= None(StrOpt) Location of ca certificates file to use for manila client requests.
[neutron]
= False(BoolOpt) Allow to perform insecure SSL requests to neutron.
= None(StrOpt) Location of ca certificates file to use for neutron client requests.
= internalURL(StrOpt) Endpoint type for neutron client requests
[nova]
= False(BoolOpt) Allow to perform insecure SSL requests to nova.
= None(StrOpt) Location of ca certificates file to use for nova client requests.
= internalURL(StrOpt) Endpoint type for nova client requests
[swift]
= False(BoolOpt) Allow to perform insecure SSL requests to swift.
= None(StrOpt) Location of ca certificates file to use for swift client requests.
= internalURL(StrOpt) Endpoint type for swift client requests
-
diff --git a/doc/common/tables/sahara-common.xml b/doc/common/tables/sahara-common.xml deleted file mode 100644 index aa30d2b1b9..0000000000 --- a/doc/common/tables/sahara-common.xml +++ /dev/null @@ -1,206 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= default(StrOpt) The name of the domain for the service project(ex. tenant).
= default(StrOpt) The name of the domain to which the admin user belongs.
= 0(IntOpt) Number of workers for Sahara API service (0 means all-in-one-thread configuration).
= 0(IntOpt) Maximal time (in hours) for clusters allowed to be in states other than "Active", "Deleting" or "Error". If a cluster is not in "Active", "Deleting" or "Error" state and last update of it was longer than "cleanup_time_for_incomplete_clusters" hours ago then it will be deleted automatically. (0 value means that automatic clean up is disabled).
= 24(IntOpt) Defines the period of time (in hours) after which trusts created to allow sahara to create or scale a cluster will expire. Note that this value should be significantly larger than the value of the cleanup_time_for_incomplete_clusters configuration key if use of the cluster cleanup feature is desired (the trust must last at least as long as a cluster could validly take to stall in its creation, plus the timeout value set in that key, plus one hour for the period of the cleanup job).
= 70(IntOpt) The same as global_remote_threshold, but for a single cluster.
= etc/sahara/compute.topology(StrOpt) File with nova compute topology. It should contain mapping between nova computes and racks.
= pool.ntp.org(StrOpt) Default ntp server for time sync
= False(BoolOpt) Disables event log feature.
= False(BoolOpt) Enables data locality for hadoop cluster. Also enables data locality for Swift used by hadoop. If enabled, 'compute_topology' and 'swift_topology' configuration parameters should point to OpenStack and Swift topology correspondingly.
= True(BoolOpt) Enables four-level topology for data locality. Works only if corresponding plugin supports such mode.
= False(BoolOpt) Enables sending notifications to Ceilometer
= 64(IntOpt) Size of executor thread pool.
= 100(IntOpt) Maximum number of remote operations that will be running at the same time. Note that each remote operation requires its own process to run.
= data-processing-cluster(ListOpt) List of tags to be used during operating with stack.
= heat(StrOpt) An engine which will be used to provision infrastructure for Hadoop cluster.
= 5120(IntOpt) Maximum length of job binary data in kilobytes that may be stored or retrieved in a single operation.
= 300(IntOpt) Timeout for canceling job execution (in seconds). Sahara will try to cancel job execution during this time.
= (StrOpt) Postfix for storing jobs in hdfs. Will be added to '/user/<hdfs user>/' path.
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
= None(ListOpt) Memcached servers or None for in process cache.
= 30(IntOpt) Minimal "lifetime" in seconds for a transient cluster. Cluster is guaranteed to be "alive" within this time period.
= novalocal(StrOpt) The suffix of the node's FQDN. In nova-network that is the dhcp_domain config parameter.
= None(StrOpt) Region name used to get services endpoints.
= True(BoolOpt) Enable periodic tasks.
= 60(IntOpt) Range in seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0).
= 60(IntOpt) Max interval size between periodic tasks execution in seconds.
= vanilla, hdp, spark, cdh(ListOpt) List of plugins to be loaded. Sahara preserves the order of the list when returning it.
= (StrOpt) Proxy command used to connect to instances. If set, this command should open a netcat socket, that Sahara will use for SSH and HTTP connections. Use {host} and {port} to describe the destination. Other available keywords: {tenant_id}, {network_id}, {router_id}.
= ssh(StrOpt) A method for Sahara to execute commands on VMs.
= sudo sahara-rootwrap /etc/sahara/rootwrap.conf(StrOpt) Rootwrap command to leverage. Use in conjunction with use_rootwrap=True
= etc/sahara/swift.topology(StrOpt) File with Swift topology.It should contain mapping between Swift nodes and racks.
= False(BoolOpt) Enable Sahara to use an external key manager service provided by the identity service catalog. Sahara will store all keys with the manager service.
= True(BoolOpt) If set to True, Sahara will use floating IPs to communicate with instances. To make sure that all instances have floating IPs assigned in Nova Network set "auto_assign_floating_ip=True" in nova.conf. If Neutron is used for networking, make sure that all Node Groups have "floating_ip_pool" parameter defined.
= True(BoolOpt) Enables Sahara to use Keystone API v3. If that flag is disabled, per-job clusters will not be terminated automatically.
= False(BoolOpt) Use network namespaces for communication (only valid to use in conjunction with use_neutron=True).
= False(BoolOpt) Use Neutron Networking (False indicates the use of Nova networking).
= False(BoolOpt) Use rootwrap facility to allow non-root users to run the sahara-all server instance and access private network IPs (only valid to use in conjunction with use_namespaces=True)
[conductor]
= True(BoolOpt) Perform sahara-conductor operations locally.
[keystone_authtoken]
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
-
diff --git a/doc/common/tables/sahara-cors.xml b/doc/common/tables/sahara-cors.xml deleted file mode 100644 index f2f5cc7c41..0000000000 --- a/doc/common/tables/sahara-cors.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CORS configuration options
Configuration option = Default valueDescription
[cors]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain]
= True(BoolOpt) Indicate that the actual request can include user credentials
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
= GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
= None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
= Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
= 3600(IntOpt) Maximum cache age of CORS preflight requests.
-
diff --git a/doc/common/tables/sahara-database.xml b/doc/common/tables/sahara-database.xml deleted file mode 100644 index 6fb3c225fa..0000000000 --- a/doc/common/tables/sahara-database.xml +++ /dev/null @@ -1,127 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[DEFAULT]
= sahara.db(StrOpt) Driver to use for database access.
[database]
= sqlalchemy(StrOpt) The back end to use for the database.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the database.
= 0(IntOpt) Verbosity of SQL debugging information: 0=None, 100=Everything.
= False(BoolOpt) Add Python stack traces to SQL as comment strings.
= True(BoolOpt) If True, increases the interval between retries of a database operation up to db_max_retry_interval.
= 20(IntOpt) Maximum retries in case of connection error or deadlock error before error is raised. Set to -1 to specify an infinite retry count.
= 10(IntOpt) If db_inc_retry_interval is set, the maximum seconds between retries of a database operation.
= 1(IntOpt) Seconds between retries of a database transaction.
= 3600(IntOpt) Timeout before idle SQL connections are reaped.
= None(IntOpt) If set, use this value for max_overflow with SQLAlchemy.
= None(IntOpt) Maximum number of SQL connections to keep open in a pool.
= 10(IntOpt) Maximum number of database connection retries during startup. Set to -1 to specify an infinite retry count.
= 1(IntOpt) Minimum number of SQL connections to keep open in a pool.
= TRADITIONAL(StrOpt) The SQL mode to be used for MySQL sessions. This option, including the default, overrides any server-set SQL mode. To use whatever SQL mode is set by the server configuration, set this to no value. Example: mysql_sql_mode=
= None(IntOpt) If set, use this value for pool_timeout with SQLAlchemy.
= 10(IntOpt) Interval between retries of opening a SQL connection.
= None(StrOpt) The SQLAlchemy connection string to use to connect to the slave database.
= oslo.sqlite(StrOpt) The file name to use with SQLite.
= True(BoolOpt) If True, SQLite uses synchronous mode.
= False(BoolOpt) Enable the experimental use of database reconnect on connection lost.
-
diff --git a/doc/common/tables/sahara-domain.xml b/doc/common/tables/sahara-domain.xml deleted file mode 100644 index 527d75f50a..0000000000 --- a/doc/common/tables/sahara-domain.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of domain configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) The domain Sahara will use to create new proxy users for Swift object access.
= Member(ListOpt) A list of the role names that the proxy user should assume through trust for Swift object access.
= False(BoolOpt) Enables Sahara to use a domain for creating temporary proxy users to access Swift. If this is enabled a domain must be created for Sahara to use.
-
diff --git a/doc/common/tables/sahara-logging.xml b/doc/common/tables/sahara-logging.xml deleted file mode 100644 index 37f8c8e4de..0000000000 --- a/doc/common/tables/sahara-logging.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqplib=WARN, qpid.messaging=INFO, stevedore=INFO, eventlet.wsgi.server=WARN, sqlalchemy=WARN, boto=WARN, suds=INFO, keystone=INFO, paramiko=WARN, requests=WARN, iso8601=WARN, oslo_messaging=INFO, neutronclient=INFO(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
-
diff --git a/doc/common/tables/sahara-object_store_access.xml b/doc/common/tables/sahara-object_store_access.xml deleted file mode 100644 index a7c8ac298c..0000000000 --- a/doc/common/tables/sahara-object_store_access.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Auth options for Swift access for VM configuration options
Configuration option = Default valueDescription
[object_store_access]
= None(StrOpt) Location of ca certificate file to use for identity client requests via public endpoint
= None(StrOpt) Location of ca certificate file to use for object-store client requests via public endpoint
-
diff --git a/doc/common/tables/sahara-policy.xml b/doc/common/tables/sahara-policy.xml deleted file mode 100644 index 0320a67957..0000000000 --- a/doc/common/tables/sahara-policy.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of policy configuration options
Configuration option = Default valueDescription
[oslo_policy]
= default(StrOpt) Default rule. Enforced when a requested rule is not found.
= ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
= policy.json(StrOpt) The JSON file that defines policies.
-
diff --git a/doc/common/tables/sahara-qpid.xml b/doc/common/tables/sahara-qpid.xml deleted file mode 100644 index 7e8d776dd4..0000000000 --- a/doc/common/tables/sahara-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/sahara-rabbitmq.xml b/doc/common/tables/sahara-rabbitmq.xml deleted file mode 100644 index 635fab9551..0000000000 --- a/doc/common/tables/sahara-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/sahara-redis.xml b/doc/common/tables/sahara-redis.xml deleted file mode 100644 index f099d8fd9a..0000000000 --- a/doc/common/tables/sahara-redis.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/sahara-rpc.xml b/doc/common/tables/sahara-rpc.xml deleted file mode 100644 index 9d37907c82..0000000000 --- a/doc/common/tables/sahara-rpc.xml +++ /dev/null @@ -1,138 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
-
diff --git a/doc/common/tables/sahara-timeouts.xml b/doc/common/tables/sahara-timeouts.xml deleted file mode 100644 index a8c4f90cfe..0000000000 --- a/doc/common/tables/sahara-timeouts.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of timeouts configuration options
Configuration option = Default valueDescription
[timeouts]
= 10(IntOpt) Wait for attaching volumes to instances, in seconds
= 10800(IntOpt) Wait for instances to become active, in seconds
= 10800(IntOpt) Wait for instances to be deleted, in seconds
= 300(IntOpt) Timeout for detaching volumes from instance, in seconds
= 10800(IntOpt) Assign IPs timeout, in seconds
= 10800(IntOpt) Wait for volumes to become available, in seconds
= 10800(IntOpt) Wait for instance accessibility, in seconds
-
diff --git a/doc/common/tables/sahara-zeromq.xml b/doc/common/tables/sahara-zeromq.xml deleted file mode 100644 index 29181eebbe..0000000000 --- a/doc/common/tables/sahara-zeromq.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/common/tables/swift-account-server-DEFAULT.xml b/doc/common/tables/swift-account-server-DEFAULT.xml deleted file mode 100644 index becd3f4cdc..0000000000 --- a/doc/common/tables/swift-account-server-DEFAULT.xml +++ /dev/null @@ -1,138 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [DEFAULT] in account-server.conf -
Configuration option = Default valueDescription
= 4096Maximum number of allowed pending TCP connections
= 0.0.0.0IP Address for server to bind to
= 6002Port for server to bind to
= 30Seconds to attempt bind before giving up
= offIf you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation. underlying filesystem does not support it. to setup custom log handlers. bytes you'd like fallocate to reserve, whether there is space for the given file size or not. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. server. For most cases, this should be `egg:swift#account`. replication passes account can be reclaimed
= /srv/nodeParent directory of where devices are mounted
= falseDisable "fast fail" fallocate checks if the underlying filesystem does not support it.
= falseIf true, turn on debug logging for eventlet
= 0You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. server. For most cases, this should be `egg:swift#object`.
= /dev/logLocation where syslog sends the logs to
= Comma-separated list of functions to call to setup custom log handlers.
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= 0Caps the length of log lines to the value given; no limit if set to 0, the default.
= swiftLabel used when logging
= 1.0Defines the probability of sending a sample for any given event or - timing measurement.
= localhostIf not set, the StatsD feature is disabled.
= Value will be prepended to every metric sent to the StatsD server.
= 8125Port value for the StatsD server.
= 1.0Not recommended to set this to a value less than 1.0, if frequency - of logging is too high, tune the - log_statsd_default_sample_rate instead.
= If not set, the UDP receiver for syslog is disabled.
= 514Port value for UDP receiver, if enabled.
= 1024Maximum number of clients one worker can process simultaneously -Lowering the number of clients handled per worker, and raising the -number of workers can lessen the impact that a CPU intensive, or -blocking, request can have on other requests served by the same -worker. If the maximum number of clients is set to one, then a given worker -will not perform another call while processing, allowing -other workers a chance to process it.
= trueWhether or not check if the devices are mounted to prevent accidentally writing to the root device
= /etc/swiftSwift configuration directory
= swiftUser to run as
= autoa much higher value, one can reduce the impact of slow file system operations in one request from negatively impacting other requests.
-
diff --git a/doc/common/tables/swift-account-server-account-auditor.xml b/doc/common/tables/swift-account-server-account-auditor.xml deleted file mode 100644 index 70341d5b99..0000000000 --- a/doc/common/tables/swift-account-server-account-auditor.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [account-auditor] in account-server.conf -
Configuration option = Default valueDescription
= 200Maximum accounts audited per second. Should be tuned according to individual system specs. 0 is unlimited.
= 1800Minimum time for a pass to take
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= account-auditorLabel used when logging
= /var/cache/swiftDirectory where stats for a few items will be stored
-
diff --git a/doc/common/tables/swift-account-server-account-reaper.xml b/doc/common/tables/swift-account-server-account-reaper.xml deleted file mode 100644 index d784cf5e92..0000000000 --- a/doc/common/tables/swift-account-server-account-reaper.xml +++ /dev/null @@ -1,61 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [account-reaper] in account-server.conf -
Configuration option = Default valueDescription
= 25Number of replication workers to spawn
= 0.5Connection timeout to external services
= 0Normally, the reaper begins deleting account information for deleted accounts immediately; you can set this to delay its work however. The value is in seconds, 2592000 = 30 days, for example. bind to giving up worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. By increasing the number of workers to a much higher value, one can reduce the impact of slow file system operations in one request from negatively impacting other requests.
= 3600Minimum time for a pass to take
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= account-reaperLabel used when logging
= 10Request timeout to external services
= 2592000No help text available for this option.
-
diff --git a/doc/common/tables/swift-account-server-account-replicator.xml b/doc/common/tables/swift-account-server-account-replicator.xml deleted file mode 100644 index 01de43ce59..0000000000 --- a/doc/common/tables/swift-account-server-account-replicator.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [account-replicator] in account-server.conf -
Configuration option = Default valueDescription
= 8Number of replication workers to spawn
= 0.5Connection timeout to external services
= 30Minimum time for a pass to take
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= account-replicatorLabel used when logging
= 100Caps how long the replicator spends trying to sync a database per pass
= 10Request timeout to external services
= 1000Limit number of items to get per diff
= 604800Time elapsed in seconds before an object can be reclaimed
= /var/cache/swiftDirectory where stats for a few items will be stored
= noNo help text available for this option.
= {replication_ip}::accountNo help text available for this option.
= 30Time in seconds to wait between replication passes
-
diff --git a/doc/common/tables/swift-account-server-app-account-server.xml b/doc/common/tables/swift-account-server-app-account-server.xml deleted file mode 100644 index 82f1731341..0000000000 --- a/doc/common/tables/swift-account-server-app-account-server.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [app-account-server] in account-server.conf -
Configuration option = Default valueDescription
= .Prefix to use when automatically creating accounts
= falseIf defined, tells server how to handle replication verbs in requests. When set to True (or 1), only replication verbs will be accepted. When set to False, replication verbs will be rejected. When undefined, server will accept any verb in the request.
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLog level
= account-serverLabel to use when logging
= trueWhether or not to log requests
= egg:swift#accountEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-account-server-filter-healthcheck.xml b/doc/common/tables/swift-account-server-filter-healthcheck.xml deleted file mode 100644 index 554c02e7f3..0000000000 --- a/doc/common/tables/swift-account-server-filter-healthcheck.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-healthcheck] in account-server.conf -
Configuration option = Default valueDescription
= No help text available for this option.
= egg:swift#healthcheckEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-account-server-filter-recon.xml b/doc/common/tables/swift-account-server-filter-recon.xml deleted file mode 100644 index 16d39a87b1..0000000000 --- a/doc/common/tables/swift-account-server-filter-recon.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-recon] in account-server.conf -
Configuration option = Default valueDescription
= /var/cache/swiftDirectory where stats for a few items will be stored
= egg:swift#reconEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-account-server-filter-xprofile.xml b/doc/common/tables/swift-account-server-filter-xprofile.xml deleted file mode 100644 index f79cb0e841..0000000000 --- a/doc/common/tables/swift-account-server-filter-xprofile.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-xprofile] in account-server.conf -
Configuration option = Default valueDescription
= 5.0No help text available for this option.
= falseNo help text available for this option.
= falseNo help text available for this option.
= /tmp/log/swift/profile/default.profileNo help text available for this option.
= /__profile__No help text available for this option.
= eventlet.green.profileNo help text available for this option.
= falseNo help text available for this option.
= egg:swift#xprofileEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-account-server-pipeline-main.xml b/doc/common/tables/swift-account-server-pipeline-main.xml deleted file mode 100644 index 3b913e957f..0000000000 --- a/doc/common/tables/swift-account-server-pipeline-main.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [pipeline-main] in account-server.conf -
Configuration option = Default valueDescription
= healthcheck recon account-serverNo help text available for this option.
-
diff --git a/doc/common/tables/swift-container-reconciler-DEFAULT.xml b/doc/common/tables/swift-container-reconciler-DEFAULT.xml deleted file mode 100644 index d891034a07..0000000000 --- a/doc/common/tables/swift-container-reconciler-DEFAULT.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [DEFAULT] in container-reconciler.conf -
Configuration option = Default valueDescription
= /dev/logLocation where syslog sends the logs to
= Comma-separated list of functions to call to setup custom log handlers.
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= swiftLabel used when logging
= 1.0Defines the probability of sending a sample for any given event or - timing measurement.
= localhostIf not set, the StatsD feature is disabled.
= Value will be prepended to every metric sent to the StatsD server.
= 8125Port value for the StatsD server.
= 1.0Not recommended to set this to a value less than 1.0, if frequency - of logging is too high, tune the - log_statsd_default_sample_rate instead.
= If not set, the UDP receiver for syslog is disabled.
= 514Port value for UDP receiver, if enabled.
= /etc/swiftSwift configuration directory
= swiftUser to run as
-
diff --git a/doc/common/tables/swift-container-reconciler-app-proxy-server.xml b/doc/common/tables/swift-container-reconciler-app-proxy-server.xml deleted file mode 100644 index 4441170acd..0000000000 --- a/doc/common/tables/swift-container-reconciler-app-proxy-server.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [app-proxy-server] in container-reconciler.conf -
Configuration option = Default valueDescription
= egg:swift#proxyEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-container-reconciler-container-reconciler.xml b/doc/common/tables/swift-container-reconciler-container-reconciler.xml deleted file mode 100644 index 7991ce0362..0000000000 --- a/doc/common/tables/swift-container-reconciler-container-reconciler.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [container-reconciler] in container-reconciler.conf -
Configuration option = Default valueDescription
= 30Minimum time for a pass to take
= 604800Time elapsed in seconds before an object can be reclaimed
= 3No help text available for this option.
-
diff --git a/doc/common/tables/swift-container-reconciler-filter-cache.xml b/doc/common/tables/swift-container-reconciler-filter-cache.xml deleted file mode 100644 index 641564c251..0000000000 --- a/doc/common/tables/swift-container-reconciler-filter-cache.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-cache] in container-reconciler.conf -
Configuration option = Default valueDescription
= egg:swift#memcacheEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-container-reconciler-filter-catch_errors.xml b/doc/common/tables/swift-container-reconciler-filter-catch_errors.xml deleted file mode 100644 index 601a7fac85..0000000000 --- a/doc/common/tables/swift-container-reconciler-filter-catch_errors.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-catch_errors] in container-reconciler.conf -
Configuration option = Default valueDescription
= egg:swift#catch_errorsEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-container-reconciler-filter-proxy-logging.xml b/doc/common/tables/swift-container-reconciler-filter-proxy-logging.xml deleted file mode 100644 index 756ed07937..0000000000 --- a/doc/common/tables/swift-container-reconciler-filter-proxy-logging.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-proxy-logging] in container-reconciler.conf -
Configuration option = Default valueDescription
= egg:swift#proxy_loggingEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-container-reconciler-pipeline-main.xml b/doc/common/tables/swift-container-reconciler-pipeline-main.xml deleted file mode 100644 index 1028e5ddaf..0000000000 --- a/doc/common/tables/swift-container-reconciler-pipeline-main.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [pipeline-main] in container-reconciler.conf -
Configuration option = Default valueDescription
= catch_errors proxy-logging cache proxy-serverNo help text available for this option.
-
diff --git a/doc/common/tables/swift-container-server-DEFAULT.xml b/doc/common/tables/swift-container-server-DEFAULT.xml deleted file mode 100644 index 7572d6cbda..0000000000 --- a/doc/common/tables/swift-container-server-DEFAULT.xml +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [DEFAULT] in container-server.conf -
Configuration option = Default valueDescription
= 127.0.0.1No help text available for this option.
= 4096Maximum number of allowed pending TCP connections
= 0.0.0.0IP Address for server to bind to
= 6001Port for server to bind to
= 30Seconds to attempt bind before giving up
= offIf you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation. underlying filesystem does not support it. to setup custom log handlers. bytes you'd like fallocate to reserve, whether there is space for the given file size or not. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. server. For most cases, this should be `egg:swift#account`. replication passes account can be reclaimed
= /srv/nodeParent directory of where devices are mounted
= falseDisable "fast fail" fallocate checks if the underlying filesystem does not support it.
= falseIf true, turn on debug logging for eventlet
= 0You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. server. For most cases, this should be `egg:swift#object`.
= /dev/logLocation where syslog sends the logs to
= Comma-separated list of functions to call to setup custom log handlers.
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= 0Caps the length of log lines to the value given; no limit if set to 0, the default.
= swiftLabel used when logging
= 1.0Defines the probability of sending a sample for any given event or - timing measurement.
= localhostIf not set, the StatsD feature is disabled.
= Value will be prepended to every metric sent to the StatsD server.
= 8125Port value for the StatsD server.
= 1.0Not recommended to set this to a value less than 1.0, if frequency - of logging is too high, tune the - log_statsd_default_sample_rate instead.
= If not set, the UDP receiver for syslog is disabled.
= 514Port value for UDP receiver, if enabled.
= 1024Maximum number of clients one worker can process simultaneously -Lowering the number of clients handled per worker, and raising the -number of workers can lessen the impact that a CPU intensive, or -blocking, request can have on other requests served by the same -worker. If the maximum number of clients is set to one, then a given worker -will not perform another call while processing, allowing -other workers a chance to process it.
= trueWhether or not check if the devices are mounted to prevent accidentally writing to the root device
= /etc/swiftSwift configuration directory
= swiftUser to run as
= autoa much higher value, one can reduce the impact of slow file system operations in one request from negatively impacting other requests.
-
diff --git a/doc/common/tables/swift-container-server-app-container-server.xml b/doc/common/tables/swift-container-server-app-container-server.xml deleted file mode 100644 index b89ea27b20..0000000000 --- a/doc/common/tables/swift-container-server-app-container-server.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [app-container-server] in container-server.conf -
Configuration option = Default valueDescription
= falseEnable/Disable object versioning feature
= .Prefix to use when automatically creating accounts
= 0.5Connection timeout to external services
= 3Request timeout to external services
= falseIf defined, tells server how to handle replication verbs in requests. When set to True (or 1), only replication verbs will be accepted. When set to False, replication verbs will be rejected. When undefined, server will accept any verb in the request.
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLog level
= container-serverLabel to use when logging
= trueWhether or not to log requests
= egg:swift#containerEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-container-server-container-auditor.xml b/doc/common/tables/swift-container-server-container-auditor.xml deleted file mode 100644 index c31b82e7de..0000000000 --- a/doc/common/tables/swift-container-server-container-auditor.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [container-auditor] in container-server.conf -
Configuration option = Default valueDescription
= 200Maximum containers audited per second. Should be tuned according to individual system specs. 0 is unlimited. mounted to prevent accidentally writing to the root device process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. By increasing the number of workers to a much higher value, one can reduce the impact of slow file system operations in one request from negatively impacting other requests.
= 1800Minimum time for a pass to take
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= container-auditorLabel used when logging
= /var/cache/swiftDirectory where stats for a few items will be stored
-
diff --git a/doc/common/tables/swift-container-server-container-replicator.xml b/doc/common/tables/swift-container-server-container-replicator.xml deleted file mode 100644 index 0605a843ef..0000000000 --- a/doc/common/tables/swift-container-server-container-replicator.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [container-replicator] in container-server.conf -
Configuration option = Default valueDescription
= 8Number of replication workers to spawn
= 0.5Connection timeout to external services
= 30Minimum time for a pass to take
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= container-replicatorLabel used when logging
= 100Caps how long the replicator spends trying to sync a database per pass
= 10Request timeout to external services
= 1000Limit number of items to get per diff
= 604800Time elapsed in seconds before an object can be reclaimed
= /var/cache/swiftDirectory where stats for a few items will be stored
= noNo help text available for this option.
= {replication_ip}::containerNo help text available for this option.
= 30Time in seconds to wait between replication passes
-
diff --git a/doc/common/tables/swift-container-server-container-sync.xml b/doc/common/tables/swift-container-server-container-sync.xml deleted file mode 100644 index 39dcc1fefc..0000000000 --- a/doc/common/tables/swift-container-server-container-sync.xml +++ /dev/null @@ -1,61 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [container-sync] in container-server.conf -
Configuration option = Default valueDescription
= 5Connection timeout to external services
= 60Maximum amount of time to spend syncing each container
= /etc/swift/internal-client.confNo help text available for this option.
= 300Minimum time for a pass to take
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= container-syncLabel used when logging
= 3No help text available for this option.
= http://10.1.1.1:8888,http://10.1.1.2:8888If you need to use an HTTP proxy, set it here. Defaults to no proxy.
-
diff --git a/doc/common/tables/swift-container-server-container-updater.xml b/doc/common/tables/swift-container-server-container-updater.xml deleted file mode 100644 index 92373ea08c..0000000000 --- a/doc/common/tables/swift-container-server-container-updater.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [container-updater] in container-server.conf -
Configuration option = Default valueDescription
= 60Seconds to suppress updating an account that has generated an - error (timeout, not yet found, etc.)
= 4Number of replication workers to spawn
= 0.5Connection timeout to external services
= 300Minimum time for a pass to take
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= container-updaterLabel used when logging
= 3Request timeout to external services
= /var/cache/swiftDirectory where stats for a few items will be stored
= 0.01Time in seconds to wait between objects
-
diff --git a/doc/common/tables/swift-container-server-filter-healthcheck.xml b/doc/common/tables/swift-container-server-filter-healthcheck.xml deleted file mode 100644 index 01ff5b152c..0000000000 --- a/doc/common/tables/swift-container-server-filter-healthcheck.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-healthcheck] in container-server.conf -
Configuration option = Default valueDescription
= No help text available for this option.
= egg:swift#healthcheckEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-container-server-filter-recon.xml b/doc/common/tables/swift-container-server-filter-recon.xml deleted file mode 100644 index e100e78ff9..0000000000 --- a/doc/common/tables/swift-container-server-filter-recon.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-recon] in container-server.conf -
Configuration option = Default valueDescription
= /var/cache/swiftDirectory where stats for a few items will be stored
= egg:swift#reconEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-container-server-filter-xprofile.xml b/doc/common/tables/swift-container-server-filter-xprofile.xml deleted file mode 100644 index 9283e54644..0000000000 --- a/doc/common/tables/swift-container-server-filter-xprofile.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-xprofile] in container-server.conf -
Configuration option = Default valueDescription
= 5.0No help text available for this option.
= falseNo help text available for this option.
= falseNo help text available for this option.
= /tmp/log/swift/profile/default.profileNo help text available for this option.
= /__profile__No help text available for this option.
= eventlet.green.profileNo help text available for this option.
= falseNo help text available for this option.
= egg:swift#xprofileEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-container-server-pipeline-main.xml b/doc/common/tables/swift-container-server-pipeline-main.xml deleted file mode 100644 index bedc383bc9..0000000000 --- a/doc/common/tables/swift-container-server-pipeline-main.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [pipeline-main] in container-server.conf -
Configuration option = Default valueDescription
= healthcheck recon container-serverNo help text available for this option.
-
diff --git a/doc/common/tables/swift-container-sync-realms-DEFAULT.xml b/doc/common/tables/swift-container-sync-realms-DEFAULT.xml deleted file mode 100644 index 5fa6fb6a69..0000000000 --- a/doc/common/tables/swift-container-sync-realms-DEFAULT.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [DEFAULT] in container-sync-realms.conf -
Configuration option = Default valueDescription
= 300No help text available for this option.
-
diff --git a/doc/common/tables/swift-container-sync-realms-realm1.xml b/doc/common/tables/swift-container-sync-realms-realm1.xml deleted file mode 100644 index f6f75e6875..0000000000 --- a/doc/common/tables/swift-container-sync-realms-realm1.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [realm1] in container-sync-realms.conf -
Configuration option = Default valueDescription
= https://host1/v1/No help text available for this option.
= https://host2/v1/No help text available for this option.
= realm1keyNo help text available for this option.
= realm1key2No help text available for this option.
-
diff --git a/doc/common/tables/swift-container-sync-realms-realm2.xml b/doc/common/tables/swift-container-sync-realms-realm2.xml deleted file mode 100644 index e674eacbef..0000000000 --- a/doc/common/tables/swift-container-sync-realms-realm2.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [realm2] in container-sync-realms.conf -
Configuration option = Default valueDescription
= https://host3/v1/No help text available for this option.
= https://host4/v1/No help text available for this option.
= realm2keyNo help text available for this option.
= realm2key2No help text available for this option.
-
diff --git a/doc/common/tables/swift-dispersion-dispersion.xml b/doc/common/tables/swift-dispersion-dispersion.xml deleted file mode 100644 index 7c1ec98b74..0000000000 --- a/doc/common/tables/swift-dispersion-dispersion.xml +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [dispersion] in dispersion.conf -
Configuration option = Default valueDescription
= testingNo help text available for this option.
= http://localhost:8080/auth/v1.0Endpoint for auth server, such as keystone
= test:testerDefault user for dispersion in this context
= 1.0Indicates which version of auth
= 25Number of replication workers to spawn
= yesNo help text available for this option.
= yesNo help text available for this option.
= 1.0No help text available for this option.
= noNo help text available for this option.
= publicURLIndicates whether endpoint for auth is public or internal
= noAllow accessing insecure keystone server. The keystone's certificate will not be verified.
= yesNo help text available for this option.
= yesNo help text available for this option.
= project_domainNo help text available for this option.
= projectNo help text available for this option.
= 5No help text available for this option.
= /etc/swiftSwift configuration directory
= user_domainNo help text available for this option.
-
diff --git a/doc/common/tables/swift-drive-audit-drive-audit.xml b/doc/common/tables/swift-drive-audit-drive-audit.xml deleted file mode 100644 index 30aeb090ee..0000000000 --- a/doc/common/tables/swift-drive-audit-drive-audit.xml +++ /dev/null @@ -1,69 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [drive-audit] in drive-audit.conf -
Configuration option = Default valueDescription
= /srv/nodeDirectory devices are mounted under
= 1Number of errors to find before a device is unmounted
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= /var/log/kern.*[!.][!g][!z]Location of the log file with globbing pattern to check against device errors locate device blocks with errors in the log file
= INFOLogging level
= 0Caps the length of log lines to the value given; no limit if set to 0, the default.
= FalseNo help text available for this option.
= 60Number of minutes to look back in `/var/log/kern.log`
= /var/cache/swiftDirectory where stats for a few items will be stored
= \berror\b.*\b(dm-[0-9]{1,2}\d?)\bNo help text available for this option.
= TrueNo help text available for this option.
-
diff --git a/doc/common/tables/swift-internal-client-DEFAULT.xml b/doc/common/tables/swift-internal-client-DEFAULT.xml deleted file mode 100644 index c0b373c2da..0000000000 --- a/doc/common/tables/swift-internal-client-DEFAULT.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [DEFAULT] in internal-client.conf -
Configuration option = Default valueDescription
= /dev/logLocation where syslog sends the logs to
= Comma-separated list of functions to call to setup custom log handlers.
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= swiftLabel used when logging
= 1.0Defines the probability of sending a sample for any given event or - timing measurement.
= localhostIf not set, the StatsD feature is disabled.
= Value will be prepended to every metric sent to the StatsD server.
= 8125Port value for the StatsD server.
= 1.0Not recommended to set this to a value less than 1.0, if frequency - of logging is too high, tune the - log_statsd_default_sample_rate instead.
= If not set, the UDP receiver for syslog is disabled.
= 514Port value for UDP receiver, if enabled.
= /etc/swiftSwift configuration directory
= swiftUser to run as
-
diff --git a/doc/common/tables/swift-internal-client-app-proxy-server.xml b/doc/common/tables/swift-internal-client-app-proxy-server.xml deleted file mode 100644 index 37b52a489b..0000000000 --- a/doc/common/tables/swift-internal-client-app-proxy-server.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [app-proxy-server] in internal-client.conf -
Configuration option = Default valueDescription
= egg:swift#proxyEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-internal-client-filter-cache.xml b/doc/common/tables/swift-internal-client-filter-cache.xml deleted file mode 100644 index 035295fdec..0000000000 --- a/doc/common/tables/swift-internal-client-filter-cache.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-cache] in internal-client.conf -
Configuration option = Default valueDescription
= egg:swift#memcacheEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-internal-client-filter-catch_errors.xml b/doc/common/tables/swift-internal-client-filter-catch_errors.xml deleted file mode 100644 index 8e65ee1b4e..0000000000 --- a/doc/common/tables/swift-internal-client-filter-catch_errors.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-catch_errors] in internal-client.conf -
Configuration option = Default valueDescription
= egg:swift#catch_errorsEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-internal-client-filter-proxy-logging.xml b/doc/common/tables/swift-internal-client-filter-proxy-logging.xml deleted file mode 100644 index 09a130f3d9..0000000000 --- a/doc/common/tables/swift-internal-client-filter-proxy-logging.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-proxy-logging] in internal-client.conf -
Configuration option = Default valueDescription
= egg:swift#proxy_loggingEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-internal-client-pipeline-main.xml b/doc/common/tables/swift-internal-client-pipeline-main.xml deleted file mode 100644 index 823bb16762..0000000000 --- a/doc/common/tables/swift-internal-client-pipeline-main.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [pipeline-main] in internal-client.conf -
Configuration option = Default valueDescription
= catch_errors proxy-logging cache proxy-serverNo help text available for this option.
-
diff --git a/doc/common/tables/swift-memcache-memcache.xml b/doc/common/tables/swift-memcache-memcache.xml deleted file mode 100644 index 8207544573..0000000000 --- a/doc/common/tables/swift-memcache-memcache.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [memcache] in memcache.conf -
Configuration option = Default valueDescription
= 0.3Timeout in seconds (float) for connection
= 2.0Timeout in seconds (float) for read and write
= 2Max number of connections to each memcached server per worker services
= 2Sets how memcache values are serialized and deserialized
= 127.0.0.1:11211Comma-separated list of memcached servers ip:port services
= 1.0Timeout in seconds (float) for pooled connection
= 3Number of servers to retry on failures getting a pooled connection
-
diff --git a/doc/common/tables/swift-object-expirer-DEFAULT.xml b/doc/common/tables/swift-object-expirer-DEFAULT.xml deleted file mode 100644 index 0ed1d5d0dc..0000000000 --- a/doc/common/tables/swift-object-expirer-DEFAULT.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [DEFAULT] in object-expirer.conf -
Configuration option = Default valueDescription
= /dev/logLocation where syslog sends the logs to
= Comma-separated list of functions to call to setup custom log handlers.
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= 0Caps the length of log lines to the value given; no limit if set to 0, the default.
= swiftLabel used when logging
= 1.0Defines the probability of sending a sample for any given event or - timing measurement.
= localhostIf not set, the StatsD feature is disabled.
= Value will be prepended to every metric sent to the StatsD server.
= 8125Port value for the StatsD server.
= 1.0Not recommended to set this to a value less than 1.0, if frequency - of logging is too high, tune the - log_statsd_default_sample_rate instead.
= If not set, the UDP receiver for syslog is disabled.
= 514Port value for UDP receiver, if enabled.
= /etc/swiftSwift configuration directory
= swiftUser to run as
-
diff --git a/doc/common/tables/swift-object-expirer-app-proxy-server.xml b/doc/common/tables/swift-object-expirer-app-proxy-server.xml deleted file mode 100644 index b1091319ab..0000000000 --- a/doc/common/tables/swift-object-expirer-app-proxy-server.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [app-proxy-server] in object-expirer.conf -
Configuration option = Default valueDescription
= egg:swift#proxyEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-object-expirer-filter-cache.xml b/doc/common/tables/swift-object-expirer-filter-cache.xml deleted file mode 100644 index 1c31f6fa5b..0000000000 --- a/doc/common/tables/swift-object-expirer-filter-cache.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-cache] in object-expirer.conf -
Configuration option = Default valueDescription
= egg:swift#memcacheEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-object-expirer-filter-catch_errors.xml b/doc/common/tables/swift-object-expirer-filter-catch_errors.xml deleted file mode 100644 index c8c5f0cfd5..0000000000 --- a/doc/common/tables/swift-object-expirer-filter-catch_errors.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-catch_errors] in object-expirer.conf -
Configuration option = Default valueDescription
= egg:swift#catch_errorsEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-object-expirer-filter-proxy-logging.xml b/doc/common/tables/swift-object-expirer-filter-proxy-logging.xml deleted file mode 100644 index 5b1ba75736..0000000000 --- a/doc/common/tables/swift-object-expirer-filter-proxy-logging.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-proxy-logging] in object-expirer.conf -
Configuration option = Default valueDescription
= /dev/logNo help text available for this option.
= LOG_LOCAL0No help text available for this option.
= falseNo help text available for this option.
= If access_log_headers is True and access_log_headers_only is set only - these headers are logged. Multiple headers can be defined as comma separated list like this: - access_log_headers_only = Host, X-Object-Meta-Mtime
= INFONo help text available for this option.
= swiftNo help text available for this option.
= 1.0No help text available for this option.
= localhostNo help text available for this option.
= No help text available for this option.
= 8125No help text available for this option.
= 1.0No help text available for this option.
= No help text available for this option.
= 514No help text available for this option.
= GET,HEAD,POST,PUT,DELETE,COPY,OPTIONSNo help text available for this option.
= True.No help text available for this option.
= 16The X-Auth-Token is sensitive data. If revealed to an unauthorised person, - they can now make requests against an account until the token expires. - - Set reveal_sensitive_prefix to the number of characters of the token that are logged. - For example reveal_sensitive_prefix = 12 so only first 12 characters of the token are logged. - Or, set to 0 to completely remove the token.
= egg:swift#proxy_loggingEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-object-expirer-object-expirer.xml b/doc/common/tables/swift-object-expirer-object-expirer.xml deleted file mode 100644 index f064f8d61f..0000000000 --- a/doc/common/tables/swift-object-expirer-object-expirer.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object-expirer] in object-expirer.conf -
Configuration option = Default valueDescription
= .Prefix to use when automatically creating accounts
= 1Number of replication workers to spawn
= expiring_objectsNo help text available for this option.
= 300Minimum time for a pass to take
= 0(it will actually accept(2) N + 1). Setting this to one (1) will - only handle one request at a time, without - accepting another request concurrently.
= 0for each port (disk) in the ring. If you have 24 disks per server, and this setting is 4, then each storage node will have 1 + (24 * 4) = 97 total object-server processes running. This gives complete I/O isolation, drastically reducing the impact of slow disks on storage node performance. The object-replicator and object-reconstructor need to see this setting too, so it must be in the [DEFAULT] section. See :ref:`server-per-port-configuration`.
= 604800Time elapsed in seconds before an object can be reclaimed
= /var/cache/swiftDirectory where stats for a few items will be stored
= 300No help text available for this option.
-
diff --git a/doc/common/tables/swift-object-expirer-pipeline-main.xml b/doc/common/tables/swift-object-expirer-pipeline-main.xml deleted file mode 100644 index 54e6b34ebf..0000000000 --- a/doc/common/tables/swift-object-expirer-pipeline-main.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [pipeline-main] in object-expirer.conf -
Configuration option = Default valueDescription
= catch_errors proxy-logging cache proxy-serverNo help text available for this option.
-
diff --git a/doc/common/tables/swift-object-server-DEFAULT.xml b/doc/common/tables/swift-object-server-DEFAULT.xml deleted file mode 100644 index 02b1634676..0000000000 --- a/doc/common/tables/swift-object-server-DEFAULT.xml +++ /dev/null @@ -1,170 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [DEFAULT] in object-server.conf -
Configuration option = Default valueDescription
= 4096Maximum number of allowed pending TCP connections
= 0.0.0.0IP Address for server to bind to
= 6000Port for server to bind to
= 30Seconds to attempt bind before giving up
= 60Timeout to read one chunk from a client external services
= 0.5Connection timeout to external services
= 1.0Time to wait while sending a container update on object update. object server. For most cases, this should be `egg:swift#object`. request
= /srv/nodeParent directory of where devices are mounted
= falseDisable "fast fail" fallocate checks if the underlying filesystem does not support it.
= 65536Size of chunks to read/write to disk
= falseIf true, turn on debug logging for eventlet
= expiring_objectsNo help text available for this option.
= 86400No help text available for this option.
= 0You can set fallocate_reserve to the number of bytes you'd like fallocate to reserve, whether there is space for the given file size or not. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. server. For most cases, this should be `egg:swift#object`.
= /dev/logLocation where syslog sends the logs to
= Comma-separated list of functions to call to setup custom log handlers.
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= 0Caps the length of log lines to the value given; no limit if set to 0, the default.
= swiftLabel used when logging
= 1.0Defines the probability of sending a sample for any given event or - timing measurement.
= localhostIf not set, the StatsD feature is disabled.
= Value will be prepended to every metric sent to the StatsD server.
= 8125Port value for the StatsD server.
= 1.0Not recommended to set this to a value less than 1.0, if frequency - of logging is too high, tune the - log_statsd_default_sample_rate instead.
= If not set, the UDP receiver for syslog is disabled.
= 514Port value for UDP receiver, if enabled.
= 1024Maximum number of clients one worker can process simultaneously -Lowering the number of clients handled per worker, and raising the -number of workers can lessen the impact that a CPU intensive, or -blocking, request can have on other requests served by the same -worker. If the maximum number of clients is set to one, then a given worker -will not perform another call while processing, allowing -other workers a chance to process it.
= trueWhether or not check if the devices are mounted to prevent accidentally writing to the root device
= 65536Size of chunks to read/write over the network
= 3Request timeout to external services
= 0If each disk in each storage policy ring has unique port numbers for its "ip" value, you can use this setting to have each object-server worker only service requests for the single disk matching the port in the ring. The value of this setting determines how many worker processes run for each port (disk) in the
= /etc/swiftSwift configuration directory
= swiftUser to run as
= autoa much higher value, one can reduce the impact of slow file system operations in one request from negatively impacting other requests.
-
diff --git a/doc/common/tables/swift-object-server-app-object-server.xml b/doc/common/tables/swift-object-server-app-object-server.xml deleted file mode 100644 index ccb8284e9a..0000000000 --- a/doc/common/tables/swift-object-server-app-object-server.xml +++ /dev/null @@ -1,105 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [app-object-server] in object-server.conf -
Configuration option = Default valueDescription
= Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-ObjectComma-separated list of headers that can be set in metadata of an object
= .Prefix to use when automatically creating accounts
= falseAllow non-public objects to stay in kernel's buffer cache
= 5242880Largest object size to keep in buffer cache
= 86400Maximum time allowed to upload an object
= 512On PUT requests, sync file every n MB
= 4Set to restrict the number of concurrent incoming REPLICATION requests; set to 0 for unlimited
= 1.0If the value of failures / successes of REPLICATION subrequests exceeds this ratio, the overall REPLICATION request will be aborted
= 100The number of subrequest failures before the replication_failure_ratio is checked
= 15Number of seconds to wait for an existing replication device lock before giving up.
= TrueRestricts incoming REPLICATION requests to one per device, replication_currency above allowing. This can help control I/O to each device, but you may wish to set this to False to allow multiple REPLICATION requests (up to the above replication_concurrency setting) per device.
= falseIf defined, tells server how to handle replication verbs in requests. When set to True (or 1), only replication verbs will be accepted. When set to False, replication verbs will be rejected. When undefined, server will accept any verb in the request.
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLog level
= object-serverLabel to use when logging
= trueWhether or not to log requests
= 0If > 0, Minimum time in seconds for a PUT or DELETE request to complete
= noNo help text available for this option.
= 0Size of the per-disk thread pool used for performing disk I/O. The default of 0 means to not use a per-disk thread pool. It is recommended to keep this value small, as large values can result in high read latencies due to large queue depths. A good starting point is 4 threads per disk.
= egg:swift#objectEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-object-server-filter-healthcheck.xml b/doc/common/tables/swift-object-server-filter-healthcheck.xml deleted file mode 100644 index b271457208..0000000000 --- a/doc/common/tables/swift-object-server-filter-healthcheck.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-healthcheck] in object-server.conf -
Configuration option = Default valueDescription
= No help text available for this option.
= egg:swift#healthcheckEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-object-server-filter-recon.xml b/doc/common/tables/swift-object-server-filter-recon.xml deleted file mode 100644 index d3d3a69317..0000000000 --- a/doc/common/tables/swift-object-server-filter-recon.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-recon] in object-server.conf -
Configuration option = Default valueDescription
= /var/cache/swiftDirectory where stats for a few items will be stored
= /var/lockNo help text available for this option.
= egg:swift#reconEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-object-server-filter-xprofile.xml b/doc/common/tables/swift-object-server-filter-xprofile.xml deleted file mode 100644 index 82eb65124b..0000000000 --- a/doc/common/tables/swift-object-server-filter-xprofile.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-xprofile] in object-server.conf -
Configuration option = Default valueDescription
= 5.0No help text available for this option.
= falseNo help text available for this option.
= falseNo help text available for this option.
= /tmp/log/swift/profile/default.profileNo help text available for this option.
= /__profile__No help text available for this option.
= eventlet.green.profileNo help text available for this option.
= falseNo help text available for this option.
= egg:swift#xprofileEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-object-server-object-auditor.xml b/doc/common/tables/swift-object-server-object-auditor.xml deleted file mode 100644 index bfbe83bfc4..0000000000 --- a/doc/common/tables/swift-object-server-object-auditor.xml +++ /dev/null @@ -1,69 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object-auditor] in object-server.conf -
Configuration option = Default valueDescription
= 10000000Maximum bytes audited per second. Should be tuned according to individual system specs. 0 is unlimited. mounted to prevent accidentally writing to the root device process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. By increasing the number of workers to a much higher value, one can reduce the impact of slow file system operations in one request from negatively impacting other requests. underlying filesystem does not support it. to setup custom log handlers. bytes you'd like fallocate to reserve, whether there is space for the given file size or not. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. container server. For most cases, this should be `egg:swift#container`.
= 1Number of replication workers to spawn
= 65536Size of chunks to read/write to disk
= 20Maximum files audited per second. Should be tuned according to individual system specs. 0 is unlimited.
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= object-auditorLabel used when logging
= 3600Frequency of status logs in seconds.
= No help text available for this option.
= /var/cache/swiftDirectory where stats for a few items will be stored
= 50Maximum zero byte files audited per second.
-
diff --git a/doc/common/tables/swift-object-server-object-reconstructor.xml b/doc/common/tables/swift-object-server-object-reconstructor.xml deleted file mode 100644 index df5e3cc4fe..0000000000 --- a/doc/common/tables/swift-object-server-object-reconstructor.xml +++ /dev/null @@ -1,85 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object-reconstructor] in object-server.conf -
Configuration option = Default valueDescription
= 1Number of replication workers to spawn
= onWhether or not to run replication as a daemon
= FalseIf set to True, partitions that are not supposed to be on the node will be replicated first. The default setting should not be changed, except for extreme situations.
= 60Maximum duration for an HTTP request
= 30Minimum time for a pass to take
= 1800Attempts to kill all workers if nothing replications for lockup_timeout seconds
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= object-reconstructorLabel used when logging
= 10Request timeout to external services
= 604800Time elapsed in seconds before an object can be reclaimed
= /var/cache/swiftDirectory where stats for a few items will be stored
= 15How often (in seconds) to check the ring
= 30Time in seconds to wait between replication passes
= 300Interval in seconds between logging replication statistics
-
diff --git a/doc/common/tables/swift-object-server-object-replicator.xml b/doc/common/tables/swift-object-server-object-replicator.xml deleted file mode 100644 index 0bfd289fa9..0000000000 --- a/doc/common/tables/swift-object-server-object-replicator.xml +++ /dev/null @@ -1,117 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object-replicator] in object-server.conf -
Configuration option = Default valueDescription
= 1Number of replication workers to spawn
= onWhether or not to run replication as a daemon
= autoBy default handoff partitions will be removed when it has successfully replicated to all the canonical nodes. If set to an integer n, it will remove the partition if it is successfully replicated to n nodes. The default setting should not be changed, except for extremem situations. This uses what's set here, or what's set in the DEFAULT section, or 10 (though other sections use 3 as the final default).
= FalseIf set to True, partitions that are not supposed to be on the node will be replicated first. The default setting should not be changed, except for extreme situations.
= 60Maximum duration for an HTTP request
= 30Minimum time for a pass to take
= 1800Attempts to kill all workers if nothing replications for lockup_timeout seconds
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= object-replicatorLabel used when logging
= <whatever's in the DEFAULT section or 10>Request timeout to external services
= 604800Time elapsed in seconds before an object can be reclaimed
= /var/cache/swiftDirectory where stats for a few items will be stored
= 15How often (in seconds) to check the ring
= 0No help text available for this option.
= noNo help text available for this option.
= 0No help text available for this option.
= 30Passed to rsync for a max duration (seconds) of an I/O op
= {replication_ip}::objectNo help text available for this option.
= 900Max duration (seconds) of a partition rsync
= 30Time in seconds to wait between replication passes
= 300Interval in seconds between logging replication statistics
= rsyncNo help text available for this option.
-
diff --git a/doc/common/tables/swift-object-server-object-updater.xml b/doc/common/tables/swift-object-server-object-updater.xml deleted file mode 100644 index fc52016f9c..0000000000 --- a/doc/common/tables/swift-object-server-object-updater.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object-updater] in object-server.conf -
Configuration option = Default valueDescription
= 1Number of replication workers to spawn
= 300Minimum time for a pass to take
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLogging level
= object-updaterLabel used when logging
= <whatever's in the DEFAULT section or 10>Request timeout to external services
= /var/cache/swiftDirectory where stats for a few items will be stored
= 0.01Time in seconds to wait between objects
-
diff --git a/doc/common/tables/swift-object-server-pipeline-main.xml b/doc/common/tables/swift-object-server-pipeline-main.xml deleted file mode 100644 index 640b577f0a..0000000000 --- a/doc/common/tables/swift-object-server-pipeline-main.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [pipeline-main] in object-server.conf -
Configuration option = Default valueDescription
= healthcheck recon object-serverNo help text available for this option.
-
diff --git a/doc/common/tables/swift-proxy-server-DEFAULT.xml b/doc/common/tables/swift-proxy-server-DEFAULT.xml deleted file mode 100644 index 0b206c7aed..0000000000 --- a/doc/common/tables/swift-proxy-server-DEFAULT.xml +++ /dev/null @@ -1,166 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [DEFAULT] in proxy-server.conf -
Configuration option = Default valueDescription
= secret_admin_keyto use for admin calls that are HMAC signed. Default is empty, which will disable admin calls to /info. the proxy server. For most cases, this should be `egg:swift#proxy`. request whenever it has to failover to a handoff node
= 4096Maximum number of allowed pending TCP connections
= 0.0.0.0IP Address for server to bind to
= 8080Port for server to bind to
= 30Seconds to attempt bind before giving up
= /etc/swift/proxy.crtto the ssl .crt. This should be enabled for testing purposes only.
= 60Timeout to read one chunk from a client external services
= is a list of hosts that are included with any CORS request by default and returned with the Access-Control-Allow-Origin header in addition to what the container has set. to call to setup custom log handlers. for eventlet the proxy server. For most cases, this should be `egg:swift#proxy`. request whenever it has to failover to a handoff node
= swift.valid_api_versions, container_quotas, tempurlNo help text available for this option.
= falseIf true, turn on debug logging for eventlet
= expiring_objectsNo help text available for this option.
= 86400No help text available for this option.
= trueEnables exposing configuration settings via HTTP GET /info.
= /etc/swift/proxy.keyto the ssl .key. This should be enabled for testing purposes only.
= /dev/logLocation where syslog sends the logs to
= Comma-separated list of functions to call to setup custom log handlers.
= LOG_LOCAL0Syslog log facility
= falseNo help text available for this option.
= INFOLogging level
= 0Caps the length of log lines to the value given; no limit if set to 0, the default.
= swiftLabel used when logging
= 1.0Defines the probability of sending a sample for any given event or - timing measurement.
= localhostIf not set, the StatsD feature is disabled.
= Value will be prepended to every metric sent to the StatsD server.
= 8125Port value for the StatsD server.
= 1.0Not recommended to set this to a value less than 1.0, if frequency - of logging is too high, tune the - log_statsd_default_sample_rate instead.
= If not set, the UDP receiver for syslog is disabled.
= 514Port value for UDP receiver, if enabled.
= 1024Maximum number of clients one worker can process simultaneously -Lowering the number of clients handled per worker, and raising the -number of workers can lessen the impact that a CPU intensive, or -blocking, request can have on other requests served by the same -worker. If the maximum number of clients is set to one, then a given worker -will not perform another call while processing, allowing -other workers a chance to process it.
= TrueNo help text available for this option.
= /etc/swiftSwift configuration directory
= No help text available for this option.
= swiftUser to run as
= autoa much higher value, one can reduce the impact of slow file system operations in one request from negatively impacting other requests.
-
diff --git a/doc/common/tables/swift-proxy-server-app-proxy-server.xml b/doc/common/tables/swift-proxy-server-app-proxy-server.xml deleted file mode 100644 index eef05ce424..0000000000 --- a/doc/common/tables/swift-proxy-server-app-proxy-server.xml +++ /dev/null @@ -1,149 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [app-proxy-server] in proxy-server.conf -
Configuration option = Default valueDescription
= falseIf set to 'true' authorized accounts that do not yet exist within the Swift cluster will be automatically created.
= falseWhether account PUTs and DELETEs are even callable
= .Prefix to use when automatically creating accounts
= 65536Chunk size to read from clients
= 0.5Connection timeout to external services
= No help text available for this option.
= 60Time in seconds that must elapse since the last error for a node to be considered no longer error limited
= 10Error count to consider a node error limited
= trueNo help text available for this option.
= 0If set to a positive value, trying to create a container when the account already has at least this maximum containers will result in a 403 Forbidden. Note: This is a soft limit, meaning a user might exceed the cap for recheck_account_existence before the 403s kick in.
= is a comma separated list of account names that ignore the max_containers_per_account cap.
= 86400No help text available for this option.
= 10Request timeout to external services
= 65536Chunk size to read from object servers
= trueSet object_post_as_copy = false to turn on fast posts where only the metadata changes are stored anew and the original data file is kept in place. This makes for quicker posts; but since the container metadata isn't updated in this mode, features like container sync won't be able to sync posts.
= 0.5No help text available for this option.
= 10No help text available for this option.
= r1z1=100, r1z2=200, r2=300No help text available for this option.
= 60Cache timeout in seconds to send memcached for account existence
= 60Cache timeout in seconds to send memcached for container existence
= node_timeoutRequest timeout to external services for requests that, on failure, can be recovered from. For example, object GET. from a client external services
= 2 * replicas* replicas Set to the number of nodes to contact for a normal request. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request. conf file for values will only be shown to the list of swift_owners. The exact default definition of a swift_owner is headers> up to the auth system in use, but usually indicates administrative responsibilities. paste.deploy to use for auth. To use tempauth set to: `egg:swift#tempauth` each request
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= INFOLog level
= proxy-serverLabel to use when logging
= shuffleNo help text available for this option.
= x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-controlThese are the headers whose conf file for values will only be shown to the list of swift_owners. The exact default definition of a swift_owner is headers> up to the auth system in use, but usually indicates administrative responsibilities. paste.deploy to use for auth. To use tempauth set to: `egg:swift#tempauth` each request
= 300No help text available for this option.
= egg:swift#proxyEntry point of paste.deploy in the server
= r1, r2This setting lets you trade data distribution for throughput. It makes the proxy server prefer local back-end servers for object PUT requests over non-local ones. Note that only object PUT requests are affected by the write_affinity setting; POST, GET, HEAD, DELETE, OPTIONS, and account/container PUT requests are not affected. The format is r<N> for region N or r<N>z<M> for region N, zone M. If this is set, then when handling an object PUT request, some number (see the write_affinity_node_count setting) of local backend servers will be tried before any nonlocal ones. Example: try to write to regions 1 and 2 before writing to any other nodes: write_affinity = r1, r2
= 2 * replicasThis setting is only useful in conjunction with write_affinity; it governs how many local object servers will be tried before falling back to non-local ones. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request: write_affinity_node_count = 2 * replicas
-
diff --git a/doc/common/tables/swift-proxy-server-filter-account-quotas.xml b/doc/common/tables/swift-proxy-server-filter-account-quotas.xml deleted file mode 100644 index 5b1dffeb2d..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-account-quotas.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-account-quotas] in proxy-server.conf -
Configuration option = Default valueDescription
= egg:swift#account_quotasEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-authtoken.xml b/doc/common/tables/swift-proxy-server-filter-authtoken.xml deleted file mode 100644 index 89d3e5c6fd..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-authtoken.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-authtoken] in proxy-server.conf -
Configuration option = Default valueDescription
= passwordNo help text available for this option.
= serviceNo help text available for this option.
= swiftNo help text available for this option.
= http://keystonehost:5000/No help text available for this option.
= swift.cacheNo help text available for this option.
= FalseNo help text available for this option.
= http://keystonehost:35357/No help text available for this option.
= FalseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-proxy-server-filter-bulk.xml b/doc/common/tables/swift-proxy-server-filter-bulk.xml deleted file mode 100644 index 526e0088d7..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-bulk.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-bulk] in proxy-server.conf -
Configuration option = Default valueDescription
= 0No help text available for this option.
= 10000No help text available for this option.
= 10000No help text available for this option.
= 1000No help text available for this option.
= 1000No help text available for this option.
= egg:swift#bulkEntry point of paste.deploy in the server
= 10No help text available for this option.
-
diff --git a/doc/common/tables/swift-proxy-server-filter-cache.xml b/doc/common/tables/swift-proxy-server-filter-cache.xml deleted file mode 100644 index b213c3e1d2..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-cache.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-cache] in proxy-server.conf -
Configuration option = Default valueDescription
= 2Max number of connections to each memcached server per worker services
= 2Sets how memcache values are serialized and deserialized
= 127.0.0.1:11211Comma-separated list of memcached servers ip:port services
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= falseIf True, log headers in each request
= INFOLog level
= cacheLabel to use when logging
= egg:swift#memcacheEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-catch_errors.xml b/doc/common/tables/swift-proxy-server-filter-catch_errors.xml deleted file mode 100644 index e77e810fbc..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-catch_errors.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-catch_errors] in proxy-server.conf -
Configuration option = Default valueDescription
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= falseIf True, log headers in each request
= INFOLog level
= catch_errorsLabel to use when logging
= egg:swift#catch_errorsEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-cname_lookup.xml b/doc/common/tables/swift-proxy-server-filter-cname_lookup.xml deleted file mode 100644 index 8872fc022a..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-cname_lookup.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-cname_lookup] in proxy-server.conf -
Configuration option = Default valueDescription
= 1Because CNAMES can be recursive, specifies the number of levels through which to search.
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= falseIf True, log headers in each request
= INFOLog level
= cname_lookupLabel to use when logging
= example.comDomain that matches your cloud. Multiple domains can be specified using a comma-separated - list.
= egg:swift#cname_lookupEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-container-quotas.xml b/doc/common/tables/swift-proxy-server-filter-container-quotas.xml deleted file mode 100644 index 740f35bb94..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-container-quotas.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-container-quotas] in proxy-server.conf -
Configuration option = Default valueDescription
= egg:swift#container_quotasEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-container_sync.xml b/doc/common/tables/swift-proxy-server-filter-container_sync.xml deleted file mode 100644 index 127ef3315a..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-container_sync.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-container_sync] in proxy-server.conf -
Configuration option = Default valueDescription
= trueNo help text available for this option.
= //REALM/CLUSTERNo help text available for this option.
= egg:swift#container_syncEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-dlo.xml b/doc/common/tables/swift-proxy-server-filter-dlo.xml deleted file mode 100644 index 465c7884b9..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-dlo.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-dlo] in proxy-server.conf -
Configuration option = Default valueDescription
= 86400No help text available for this option.
= 10Rate limit the download of large object segments after this segment is downloaded.
= 1Rate limit large object downloads at this rate. contact for a normal request. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request. paste.deploy to use for auth. To use tempauth set to: `egg:swift#tempauth` each request
= egg:swift#dloEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-domain_remap.xml b/doc/common/tables/swift-proxy-server-filter-domain_remap.xml deleted file mode 100644 index 6b22971bb8..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-domain_remap.xml +++ /dev/null @@ -1,62 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-domain_remap] in proxy-server.conf -
Configuration option = Default valueDescription
= No help text available for this option.
= v1Root path
= AUTHReseller prefix
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= falseIf True, log headers in each request
= INFOLog level
= domain_remapLabel to use when logging
= example.comDomain that matches your cloud. Multiple domains can be specified using a comma-separated - list.
= egg:swift#domain_remapEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-formpost.xml b/doc/common/tables/swift-proxy-server-filter-formpost.xml deleted file mode 100644 index c75aa77435..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-formpost.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-formpost] in proxy-server.conf -
Configuration option = Default valueDescription
= egg:swift#formpostEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-gatekeeper.xml b/doc/common/tables/swift-proxy-server-filter-gatekeeper.xml deleted file mode 100644 index eba26a3eab..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-gatekeeper.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-gatekeeper] in proxy-server.conf -
Configuration option = Default valueDescription
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= falseIf True, log headers in each request
= INFOLog level
= gatekeeperLabel to use when logging
= egg:swift#gatekeeperEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-healthcheck.xml b/doc/common/tables/swift-proxy-server-filter-healthcheck.xml deleted file mode 100644 index adfab22ebe..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-healthcheck.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-healthcheck] in proxy-server.conf -
Configuration option = Default valueDescription
= No help text available for this option.
= egg:swift#healthcheckEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-keystoneauth.xml b/doc/common/tables/swift-proxy-server-filter-keystoneauth.xml deleted file mode 100644 index 3463e7dcfb..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-keystoneauth.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-keystoneauth] in proxy-server.conf -
Configuration option = Default valueDescription
= trueThe backwards compatible behavior can be disabled by setting this -option to False.
= trueThis option allows middleware higher in the WSGI pipeline to override auth -processing, useful for middleware such as tempurl and formpost. If you know -you are not going to use such middleware and you want a bit of extra security, -you can set this to False.
= defaultName of the default domain. It is identified by its UUID, which by default has the value "default".
= falseIf this option is set to True, it allows to give a user whose username is the same as the project name and who has any role in the project access rights elevated to be the same as if the user had one of the operator_roles. Note that the condition compares names rather than -UUIDs. This option is deprecated. It is False by default.
= admin, swiftoperatorOperator role defines the user which is allowed to manage a tenant and create containers or give ACL to others. This parameter may be prefixed with an appropriate -prefix.
= ResellerAdminThe reseller admin role gives the ability to create and delete accounts.
= AUTHThe naming scope for the auth service. Swift
= When present, this option requires that the X-Service-Token header supplies a token from a user who has a role listed in service_roles. This parameter may be prefixed with an appropriate -prefix.
= egg:swift#keystoneauthEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-list-endpoints.xml b/doc/common/tables/swift-proxy-server-filter-list-endpoints.xml deleted file mode 100644 index 601b05bc77..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-list-endpoints.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-list-endpoints] in proxy-server.conf -
Configuration option = Default valueDescription
= /endpoints/No help text available for this option.
= egg:swift#list_endpointsEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-name_check.xml b/doc/common/tables/swift-proxy-server-filter-name_check.xml deleted file mode 100644 index 6d05ee478e..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-name_check.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-name_check] in proxy-server.conf -
Configuration option = Default valueDescription
= '"`<>Characters that are not allowed in a name
= /\./|/\.\./|/\.$|/\.\.$Substrings to forbid, using regular expression syntax
= 255Maximum length of a name
= egg:swift#name_checkEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-proxy-logging.xml b/doc/common/tables/swift-proxy-server-filter-proxy-logging.xml deleted file mode 100644 index c3f17d82a8..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-proxy-logging.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-proxy-logging] in proxy-server.conf -
Configuration option = Default valueDescription
= /dev/logNo help text available for this option.
= LOG_LOCAL0No help text available for this option.
= falseNo help text available for this option.
= If access_log_headers is True and access_log_headers_only is set only - these headers are logged. Multiple headers can be defined as comma separated list like this: - access_log_headers_only = Host, X-Object-Meta-Mtime
= INFONo help text available for this option.
= swiftNo help text available for this option.
= 1.0No help text available for this option.
= localhostNo help text available for this option.
= No help text available for this option.
= 8125No help text available for this option.
= 1.0No help text available for this option.
= No help text available for this option.
= 514No help text available for this option.
= GET,HEAD,POST,PUT,DELETE,COPY,OPTIONSNo help text available for this option.
= True.No help text available for this option.
= 16The X-Auth-Token is sensitive data. If revealed to an unauthorised person, - they can now make requests against an account until the token expires. - - Set reveal_sensitive_prefix to the number of characters of the token that are logged. - For example reveal_sensitive_prefix = 12 so only first 12 characters of the token are logged. - Or, set to 0 to completely remove the token.
= egg:swift#proxy_loggingEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-ratelimit.xml b/doc/common/tables/swift-proxy-server-filter-ratelimit.xml deleted file mode 100644 index 303287fa04..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-ratelimit.xml +++ /dev/null @@ -1,101 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-ratelimit] in proxy-server.conf -
Configuration option = Default valueDescription
= c,dComma separated lists of account names that will not be allowed. Returns a 497 response. r: for containers of size x, limit requests per second to r. Will limit PUT, DELETE, and POST requests to /a/c/o. container_listing_ratelimit_x = r: for containers of size x, limit listing requests per second to r. Will limit GET requests to /a/c.
= 0If set, will limit PUT and DELETE requests to /account_name/container_name. Number is in requests per second.
= a,bComma separated lists of account names that will not be rate limited.
= 1000Represents how accurate the proxy servers' system clocks are with each other. 1000 means that all the proxies' clock are accurate to each other within 1 millisecond. No ratelimit should be higher than the clock accuracy.
= 100No help text available for this option.
= 50No help text available for this option.
= 20No help text available for this option.
= 100No help text available for this option.
= 50No help text available for this option.
= 20No help text available for this option.
= 0To allow visibility into rate limiting set this value > 0 and all sleeps greater than the number will be logged.
= 60App will immediately return a 498 response if the necessary sleep time ever exceeds the given max_sleep_time_seconds.
= 5Number of seconds the rate counter can drop and be allowed to catch up (at a faster than listed rate). A larger number will result in larger spikes in rate but better average accuracy.
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= falseIf True, log headers in each request
= INFOLog level
= ratelimitLabel to use when logging
= egg:swift#ratelimitEntry point of paste.deploy in the server
= rNo help text available for this option.
-
diff --git a/doc/common/tables/swift-proxy-server-filter-slo.xml b/doc/common/tables/swift-proxy-server-filter-slo.xml deleted file mode 100644 index 925b8b4086..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-slo.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-slo] in proxy-server.conf -
Configuration option = Default valueDescription
= 86400No help text available for this option.
= 1000No help text available for this option.
= 2097152No help text available for this option.
= 1048576No help text available for this option.
= 10Rate limit the download of large object segments after this segment is downloaded.
= 0Rate limit large object downloads at this rate. contact for a normal request. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request. paste.deploy to use for auth. To use tempauth set to: `egg:swift#tempauth` each request
= egg:swift#sloEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-staticweb.xml b/doc/common/tables/swift-proxy-server-filter-staticweb.xml deleted file mode 100644 index 11411df2e5..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-staticweb.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-staticweb] in proxy-server.conf -
Configuration option = Default valueDescription
= egg:swift#staticwebEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-tempauth.xml b/doc/common/tables/swift-proxy-server-filter-tempauth.xml deleted file mode 100644 index 6ca029951a..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-tempauth.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-tempauth] in proxy-server.conf -
Configuration option = Default valueDescription
= trueThis option allows middleware higher in the WSGI pipeline to override auth -processing, useful for middleware such as tempurl and formpost. If you know -you are not going to use such middleware and you want a bit of extra security, -you can set this to False.
= /auth/The HTTP request path prefix for the auth service. Swift itself reserves anything beginning with the letter `v`.
= No help text available for this option.
= AUTHThe naming scope for the auth service. Swift
= /dev/logLocation where syslog sends the logs to
= LOG_LOCAL0Syslog log facility
= falseIf True, log headers in each request
= INFOLog level
= tempauthLabel to use when logging
= defaultScheme to return with storage urls: http, https, or default (chooses based on what the server is running as) This can be useful with an SSL load balancer in front of a non-SSL server.
= 86400The number of seconds a token is valid.
= egg:swift#tempauthEntry point of paste.deploy in the server
= admin .admin .reseller_adminNo help text available for this option.
= testing2 .adminNo help text available for this option.
= testing5 serviceNo help text available for this option.
= testing .adminNo help text available for this option.
= testing3No help text available for this option.
-
diff --git a/doc/common/tables/swift-proxy-server-filter-tempurl.xml b/doc/common/tables/swift-proxy-server-filter-tempurl.xml deleted file mode 100644 index 9e843ea77c..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-tempurl.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-tempurl] in proxy-server.conf -
Configuration option = Default valueDescription
= Headers allowed as exceptions to incoming_remove_headers. Simply a whitespace - delimited list of header names and names can optionally end with '*' to indicate - a prefix match.
= x-timestampHeaders to remove from incoming requests. Simply a whitespace delimited list of header names and - names can optionally end with '*' to indicate a prefix match.
= GET HEAD PUT POST DELETEHTTP methods allowed with Temporary URLs
= x-object-meta-public-*Headers allowed as exceptions to outgoing_allow_headers. Simply a whitespace delimited list of - header names and names can optionally end with '*' to indicate a prefix match.
= x-object-meta-*Headers to remove from outgoing responses. Simply a whitespace delimited list of - header names and names can optionally end with '*' to indicate a prefix - match.
= egg:swift#tempurlEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-versioned_writes.xml b/doc/common/tables/swift-proxy-server-filter-versioned_writes.xml deleted file mode 100644 index f83f9e1772..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-versioned_writes.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-versioned_writes] in proxy-server.conf -
Configuration option = Default valueDescription
= falseNo help text available for this option.
= egg:swift#versioned_writesEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-filter-xprofile.xml b/doc/common/tables/swift-proxy-server-filter-xprofile.xml deleted file mode 100644 index fa2a972f1c..0000000000 --- a/doc/common/tables/swift-proxy-server-filter-xprofile.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [filter-xprofile] in proxy-server.conf -
Configuration option = Default valueDescription
= 5.0No help text available for this option.
= falseNo help text available for this option.
= falseNo help text available for this option.
= /tmp/log/swift/profile/default.profileNo help text available for this option.
= /__profile__No help text available for this option.
= eventlet.green.profileNo help text available for this option.
= falseNo help text available for this option.
= egg:swift#xprofileEntry point of paste.deploy in the server
-
diff --git a/doc/common/tables/swift-proxy-server-pipeline-main.xml b/doc/common/tables/swift-proxy-server-pipeline-main.xml deleted file mode 100644 index 4fe77ec934..0000000000 --- a/doc/common/tables/swift-proxy-server-pipeline-main.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [pipeline-main] in proxy-server.conf -
Configuration option = Default valueDescription
= catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-serverNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-account.xml b/doc/common/tables/swift-rsyncd-account.xml deleted file mode 100644 index f1ef0c0193..0000000000 --- a/doc/common/tables/swift-rsyncd-account.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [account] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/account.lockNo help text available for this option.
= 2No help text available for this option.
= /srv/nodeNo help text available for this option.
= falseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-container.xml b/doc/common/tables/swift-rsyncd-container.xml deleted file mode 100644 index bb12c42f84..0000000000 --- a/doc/common/tables/swift-rsyncd-container.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [container] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/container.lockNo help text available for this option.
= 4No help text available for this option.
= /srv/nodeNo help text available for this option.
= falseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-object.xml b/doc/common/tables/swift-rsyncd-object.xml deleted file mode 100644 index 8de24ce514..0000000000 --- a/doc/common/tables/swift-rsyncd-object.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/object.lockNo help text available for this option.
= 8No help text available for this option.
= /srv/nodeNo help text available for this option.
= falseNo help text available for this option.
= {replication_ip}::object_{device}No help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-object6010.xml b/doc/common/tables/swift-rsyncd-object6010.xml deleted file mode 100644 index 0f2d0b477b..0000000000 --- a/doc/common/tables/swift-rsyncd-object6010.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object6010] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/object6010.lockNo help text available for this option.
= 25No help text available for this option.
= /srv/1/node/No help text available for this option.
= falseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-object6020.xml b/doc/common/tables/swift-rsyncd-object6020.xml deleted file mode 100644 index 4eb6d14797..0000000000 --- a/doc/common/tables/swift-rsyncd-object6020.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object6020] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/object6020.lockNo help text available for this option.
= 25No help text available for this option.
= /srv/2/node/No help text available for this option.
= falseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-object6030.xml b/doc/common/tables/swift-rsyncd-object6030.xml deleted file mode 100644 index 8ea838d3b2..0000000000 --- a/doc/common/tables/swift-rsyncd-object6030.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object6030] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/object6030.lockNo help text available for this option.
= 25No help text available for this option.
= /srv/3/node/No help text available for this option.
= falseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-object6040.xml b/doc/common/tables/swift-rsyncd-object6040.xml deleted file mode 100644 index c21b4ed31d..0000000000 --- a/doc/common/tables/swift-rsyncd-object6040.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object6040] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/object6040.lockNo help text available for this option.
= 25No help text available for this option.
= /srv/4/node/No help text available for this option.
= falseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-object_sda.xml b/doc/common/tables/swift-rsyncd-object_sda.xml deleted file mode 100644 index 525d8ebecc..0000000000 --- a/doc/common/tables/swift-rsyncd-object_sda.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object_sda] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/object_sda.lockNo help text available for this option.
= 4No help text available for this option.
= /srv/nodeNo help text available for this option.
= falseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-object_sdb.xml b/doc/common/tables/swift-rsyncd-object_sdb.xml deleted file mode 100644 index f1eed5bd63..0000000000 --- a/doc/common/tables/swift-rsyncd-object_sdb.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object_sdb] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/object_sdb.lockNo help text available for this option.
= 4No help text available for this option.
= /srv/nodeNo help text available for this option.
= falseNo help text available for this option.
-
diff --git a/doc/common/tables/swift-rsyncd-object_sdc.xml b/doc/common/tables/swift-rsyncd-object_sdc.xml deleted file mode 100644 index 0d7a1323ff..0000000000 --- a/doc/common/tables/swift-rsyncd-object_sdc.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [object_sdc] in rsyncd.conf -
Configuration option = Default valueDescription
= /var/lock/object_sdc.lockNo help text available for this option.
= 4No help text available for this option.
= /srv/nodeNo help text available for this option.
= falseNo help text available for this option.
= {replication_ip}::object{replication_port}No help text available for this option.
-
diff --git a/doc/common/tables/swift-swift-storage-policy-0.xml b/doc/common/tables/swift-swift-storage-policy-0.xml deleted file mode 100644 index 071bcb31fe..0000000000 --- a/doc/common/tables/swift-swift-storage-policy-0.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [storage-policy-0] in swift.conf -
Configuration option = Default valueDescription
= yes0 means to not use a per-disk thread pool. It is recommended to keep this value small, as large values can result in high read latencies due to large queue depths. A good starting point is 4 threads per disk.
= Policy-0No help text available for this option.
= replicationNo help text available for this option.
-
diff --git a/doc/common/tables/swift-swift-storage-policy-1.xml b/doc/common/tables/swift-swift-storage-policy-1.xml deleted file mode 100644 index b8924f1466..0000000000 --- a/doc/common/tables/swift-swift-storage-policy-1.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [storage-policy-1] in swift.conf -
Configuration option = Default valueDescription
= silverNo help text available for this option.
= replicationNo help text available for this option.
-
diff --git a/doc/common/tables/swift-swift-storage-policy-2.xml b/doc/common/tables/swift-swift-storage-policy-2.xml deleted file mode 100644 index 117d46d228..0000000000 --- a/doc/common/tables/swift-swift-storage-policy-2.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [storage-policy-2] in swift.conf -
Configuration option = Default valueDescription
= 10No help text available for this option.
= 4No help text available for this option.
= 1048576No help text available for this option.
= jerasure_rs_vandNo help text available for this option.
= deepfreeze10-4No help text available for this option.
= erasure_codingNo help text available for this option.
-
diff --git a/doc/common/tables/swift-swift-swift-constraints.xml b/doc/common/tables/swift-swift-swift-constraints.xml deleted file mode 100644 index 972f5685f2..0000000000 --- a/doc/common/tables/swift-swift-swift-constraints.xml +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [swift-constraints] in swift.conf -
Configuration option = Default valueDescription
= 10000The default (and maximum) number of items returned for an account listing request.
= 10000The default (and maximum) number of items returned for a container listing request.
= 0By default the maximum number of allowed headers depends on the number of max allowed metadata settings plus a default value of 32 for regular http headers. If for some reason this is not enough (custom middleware for example) it can be increased with the extra_header_count constraint.
= 256The maximum number of bytes in the utf8 encoding of an account name.
= 256The maximum number of bytes in the utf8 encoding of a container name.
= 5368709122The largest normal object that can be saved in the cluster. - This is also the limit on the size of each segment of a large object - when using the large object manifest support. This value is set in bytes. - Setting it to lower than 1MiB will cause some tests to fail. It is - STRONGLY recommended to leave this value at the default (5 * 2**30 + 2).
= 8192The max number of bytes in the utf8 encoding of each header. - Using 8192 as default because eventlet use 8192 - as maximum size of header line. You may need to increase this value when using - identity v3 API tokens including more than 7 catalog entries. - See also include_service_catalog in proxy-server.conf-sample - (documented in overview_auth.rst).
= 90The max number of metadata keys that can be stored - on a single account, container, or object.
= 128The max number of bytes in the utf8 encoding - of the name portion of a metadata header.
= 4096The max number of bytes in the utf8 encoding of the - metadata (keys + values).
= 256The max number of bytes in the utf8 encoding of a metadata value.
= 1024The max number of bytes in the utf8 encoding - of an object name.
= v0,v1,v2No help text available for this option.
-
diff --git a/doc/common/tables/swift-swift-swift-hash.xml b/doc/common/tables/swift-swift-swift-hash.xml deleted file mode 100644 index 46c0275d07..0000000000 --- a/doc/common/tables/swift-swift-swift-hash.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
Description of configuration options for - [swift-hash] in swift.conf -
Configuration option = Default valueDescription
= changemeA prefix used by hash_path to offer a bit more security when generating hashes for - paths. It simply appends this value to all paths; if someone knows this suffix, - it's easier for them to guess the hash a path will end up with. - New installations are advised to set this parameter to a random secret, - which would not be disclosed ouside the organization. - The same secret needs to be used by all swift servers of the same cluster. - - Existing installations should set this parameter to an empty string.
= changemeA suffix used by hash_path to offer a bit more security when generating hashes for - paths. It simply appends this value to all paths; if someone knows this suffix, - it's easier for them to guess the hash a path will end up with. - New installations are advised to set this parameter to a random secret, - which would not be disclosed ouside the organization. - The same secret needs to be used by all swift servers of the same cluster. - - Existing installations should set this parameter to an empty string.
-
diff --git a/doc/common/tables/trove-amqp.xml b/doc/common/tables/trove-amqp.xml deleted file mode 100644 index 68c090b2ea..0000000000 --- a/doc/common/tables/trove-amqp.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of AMQP configuration options
Configuration option = Default valueDescription
[DEFAULT]
= trove.conductor.manager.Manager(StrOpt) Qualified class name to use for conductor manager.
= trove-conductor(StrOpt) Message queue name the Conductor will listen on.
= openstack(StrOpt) The default exchange under which topics are scoped. May be overridden by an exchange name specified in the transport_url option.
= [](MultiStrOpt) The Drivers(s) to handle sending notifications. Possible values are messaging, messagingv2, routing, log, test, noop
= {'mongodb': 'c8c907af-7375-456f-b929-b637ff9209ee', 'percona': 'fd1723f5-68d2-409c-994f-a4a197892a17', 'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b', 'pxc': '75a628c3-f81b-4ffb-b10a-4087c26bc854', 'db2': 'e040cd37-263d-4869-aaa6-c62aa97523b5', 'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed', 'mariadb': '7a4f82cc-10d2-4bc6-aadc-d9aacc2a3cb5', 'postgresql': 'ac277e0d-4f21-40aa-b347-1ea31e571720', 'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415', 'couchdb': 'f0a9ab7b-66f7-4352-93d7-071521d44c7c', 'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0', 'vertica': 'a8d805ae-a3b2-c4fd-gb23-b62cee5201ae'}(DictOpt) Unique ID to tag notification events.
= notifications(ListOpt) AMQP topic used for OpenStack notifications.
= None(StrOpt) A URL representing the messaging driver to use and its full configuration. If not set, we fall back to the rpc_backend option and driver specific configuration.
-
diff --git a/doc/common/tables/trove-api.xml b/doc/common/tables/trove-api.xml deleted file mode 100644 index aad4ae68ef..0000000000 --- a/doc/common/tables/trove-api.xml +++ /dev/null @@ -1,140 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of API configuration options
Configuration option = Default valueDescription
[DEFAULT]
= admin(ListOpt) Roles to add to an admin user.
= api-paste.ini(StrOpt) File name for the paste.deploy config for trove-api.
= 0.0.0.0(StrOpt) IP address the API server will listen on.
= 8779(IntOpt) Port the API server will listen on.
= None(StrOpt) Exclude IP addresses that match this regular expression.
= trove.db.sqlalchemy.api(StrOpt) API Implementation for Trove database access.
= True(BoolOpt) Require user hostnames to be valid IP addresses.
= 200(IntOpt) Maximum number of HTTP 'DELETE' requests (per minute).
= 200(IntOpt) Maximum number of HTTP 'GET' requests (per minute).
= 200(IntOpt) Maximum number of management HTTP 'POST' requests (per minute).
= 200(IntOpt) Maximum number of HTTP 'POST' requests (per minute).
= 200(IntOpt) Maximum number of HTTP 'PUT' requests (per minute).
= /etc/trove/conf.d(StrOpt) Path to folder on the Guest where config files will be injected during instance creation.
= 20(IntOpt) Page size for listing instances.
= 16384(IntOpt) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs).
= RegionOne(StrOpt) Region name of this node. Used when searching catalog.
= LOCAL_DEV(StrOpt) The region this service is located.
= 600(IntOpt) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X.
= None(IntOpt) Number of workers for the API service. The default will be the number of CPUs available.
= http://0.0.0.0:5000/v2.0(StrOpt) Trove authentication URL.
= None(IntOpt) Number of workers for the Conductor service. The default will be the number of CPUs available.
= SecGroup(StrOpt) Prefix to use when creating Security Groups.
= 0.0.0.0/0(StrOpt) CIDR to use when creating Security Group Rules.
= True(BoolOpt) Whether Trove should add Security Groups on create.
= 20(IntOpt) Page size for listing users.
-
diff --git a/doc/common/tables/trove-auth_token.xml b/doc/common/tables/trove-auth_token.xml deleted file mode 100644 index c374bb31af..0000000000 --- a/doc/common/tables/trove-auth_token.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of authorization token configuration options
Configuration option = Default valueDescription
[keystone_authtoken]
= None(StrOpt) Service user password.
= admin(StrOpt) Service tenant name.
= None(StrOpt) This option is deprecated and may be removed in a future release. Single shared secret with the Keystone configuration used for bootstrapping a Keystone installation, or otherwise bypassing the normal authentication process. This option should not be used, use `admin_user` and `admin_password` instead.
= None(StrOpt) Service username.
= (StrOpt) Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
= 127.0.0.1(StrOpt) Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
= None(StrOpt) Name of the plugin to load
= 35357(IntOpt) Port of the admin Identity API endpoint. Deprecated, use identity_uri.
= https(StrOpt) Protocol of the admin Identity API endpoint (http or https). Deprecated, use identity_uri.
= None(StrOpt) Config Section from which to load plugin specific options
= None(StrOpt) Complete public Identity API endpoint.
= None(StrOpt) API version of the admin Identity API endpoint.
= None(StrOpt) Env key for the swift cache.
= None(StrOpt) A PEM encoded Certificate Authority to use when verifying HTTPs connections. Defaults to system CAs.
= None(StrOpt) Required if identity server requires client certificate
= False(BoolOpt) If true, the revocation list will be checked for cached tokens. This requires that PKI tokens are configured on the identity server.
= False(BoolOpt) Do not handle authorization requests within the middleware, but delegate the authorization decision to downstream WSGI components.
= permissive(StrOpt) Used to control the use and type of token binding. Can be set to: "disabled" to not check token binding. "permissive" (default) to validate binding information if the bind type is of a form known to the server and ignore it if not. "strict" like "permissive" but if the bind type is unknown the token will be rejected. "required" any form of token binding is needed to be allowed. Finally the name of a binding method that must be present in tokens.
= md5(ListOpt) Hash algorithms to use for hashing PKI tokens. This may be a single algorithm or multiple. The algorithms are those supported by Python standard hashlib.new(). The hashes will be tried in the order given, so put the preferred one first for performance. The result of the first hash will be stored in the cache. This will typically be set to multiple values only while migrating from a less secure algorithm to a more secure one. Once all the old tokens are expired this option should be set to a single value for better performance.
= None(IntOpt) Request timeout value for communicating with Identity API server.
= 3(IntOpt) How many times are we trying to reconnect when communicating with Identity API Server.
= None(StrOpt) Complete admin Identity API endpoint. This should specify the unversioned root endpoint e.g. https://localhost:35357/
= True(BoolOpt) (Optional) Indicate whether to set the X-Service-Catalog header. If False, middleware will not ask for service catalog on token validation and will not set the X-Service-Catalog header.
= False(BoolOpt) Verify HTTPS connections.
= None(StrOpt) Required if identity server requires client certificate
= 10(IntOpt) (Optional) Number of seconds that an operation will wait to get a memcached client connection from the pool.
= 300(IntOpt) (Optional) Number of seconds memcached server is considered dead before it is tried again.
= 10(IntOpt) (Optional) Maximum total number of open connections to every memcached server.
= 3(IntOpt) (Optional) Socket timeout in seconds for communicating with a memcached server.
= 60(IntOpt) (Optional) Number of seconds a connection to memcached is held unused in the pool before it is closed.
= None(StrOpt) (Optional, mandatory if memcache_security_strategy is defined) This string is used for key derivation.
= None(StrOpt) (Optional) If defined, indicate whether token data should be authenticated or authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data is encrypted and authenticated in the cache. If the value is not one of these options or empty, auth_token will raise an exception on initialization.
= False(BoolOpt) (Optional) Use the advanced (eventlet safe) memcached client pool. The advanced pool will only work under python 2.x.
= None(StrOpt) The region in which the identity server can be found.
= 10(IntOpt) Determines the frequency at which the list of revoked tokens is retrieved from the Identity service (in seconds). A high number of revocation events combined with a low cache duration may significantly reduce performance.
= None(StrOpt) Directory used to cache files related to PKI tokens.
= 300(IntOpt) In order to prevent excessive effort spent validating tokens, the middleware caches previously-seen tokens for a configurable duration (in seconds). Set to -1 to disable caching completely.
-
diff --git a/doc/common/tables/trove-backup.xml b/doc/common/tables/trove-backup.xml deleted file mode 100644 index cdafe34ee1..0000000000 --- a/doc/common/tables/trove-backup.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of backup configuration options
Configuration option = Default valueDescription
[DEFAULT]
= default_aes_cbc_key(StrOpt) Default OpenSSL aes_cbc key.
= 65536(IntOpt) Chunk size (in bytes) to stream to the Swift container. This should be in multiples of 128 bytes, since this is the size of an md5 digest block allowing the process to update the file checksum during streaming. See: http://stackoverflow.com/questions/1131220/
= trove.guestagent.backup.backup_types.InnoBackupEx(StrOpt) Runner to use for backups.
= {}(DictOpt) Additional options to be passed to the backup runner.
= 2147483648(IntOpt) Maximum size (in bytes) of each segment of the backup file.
= database_backups(StrOpt) Swift container to put backups in.
= True(BoolOpt) Compress backups using gzip.
= True(BoolOpt) Encrypt backups using OpenSSL.
= False(BoolOpt) Send backup files over snet.
= 20(IntOpt) Page size for listing backups.
-
diff --git a/doc/common/tables/trove-ca.xml b/doc/common/tables/trove-ca.xml deleted file mode 100644 index a531d389cb..0000000000 --- a/doc/common/tables/trove-ca.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of CA and SSL configuration options
Configuration option = Default valueDescription
[ssl]
= None(StrOpt) CA certificate file to use to verify connecting clients
= None(StrOpt) Certificate file to use when starting the server securely
= None(StrOpt) Private key file to use when starting the server securely
-
diff --git a/doc/common/tables/trove-clients.xml b/doc/common/tables/trove-clients.xml deleted file mode 100644 index 505c99899d..0000000000 --- a/doc/common/tables/trove-clients.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of clients configuration options
Configuration option = Default valueDescription
[DEFAULT]
= trove.common.remote.cinder_client(StrOpt) Client to send Cinder calls to.
= trove.common.remote.dns_client(StrOpt) Client to send DNS calls to.
= trove.common.remote.guest_client(StrOpt) Client to send Guest Agent calls to.
= trove.common.remote.heat_client(StrOpt) Client to send Heat calls to.
= trove.common.remote.neutron_client(StrOpt) Client to send Neutron calls to.
= trove.common.remote.nova_client(StrOpt) Client to send Nova calls to.
= trove.common.remote.swift_client(StrOpt) Client to send Swift calls to.
-
diff --git a/doc/common/tables/trove-cluster.xml b/doc/common/tables/trove-cluster.xml deleted file mode 100644 index faf7a2ff69..0000000000 --- a/doc/common/tables/trove-cluster.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of cluster configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 180(IntOpt) Maximum time (in seconds) to wait for a cluster delete.
= 36000(IntOpt) Maximum time (in seconds) to wait for a cluster to become active.
= 20(IntOpt) Page size for listing clusters.
-
diff --git a/doc/common/tables/trove-common.xml b/doc/common/tables/trove-common.xml deleted file mode 100644 index c9b212b61e..0000000000 --- a/doc/common/tables/trove-common.xml +++ /dev/null @@ -1,107 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of common configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 20(IntOpt) Page size for listing configurations.
= 20(IntOpt) Page size for listing databases.
= None(StrOpt) The default datastore id or name to use if one is not provided by the user. If the default value is None, the field becomes required in the instance create request.
= (ListOpt) List of IDs for management networks which should be attached to the instance regardless of what NICs are specified in the create API call.
= 36(IntOpt) Character length of generated passwords.
= 64(IntOpt) Size of executor thread pool.
= json(ListOpt) Filetype endings not to be reattached to an ID by the utils method correct_id_with_req.
= 0.0.0.0(StrOpt) Host to listen for RPC messages.
= None(ListOpt) Memcached servers or None for in process cache.
= /usr/lib/python/site-packages/trove(StrOpt) Directory where the Trove python module is installed.
= None(StrOpt) Set path to pydevd library, used if pydevd is not found in python sys.path.
= taskmanager(StrOpt) Message queue name the Taskmanager will listen to.
= /etc/trove/templates/(StrOpt) Path which leads to datastore templates.
= 120(IntOpt) Maximum time (in seconds) to wait for a service to become alive.
= 900(IntOpt) Maximum time (in seconds) to wait for a Guest to become active.
[keystone_authtoken]
= None(ListOpt) Optionally specify a list of memcached server(s) to use for caching. If left undefined, tokens will instead be cached in-process.
-
diff --git a/doc/common/tables/trove-compute.xml b/doc/common/tables/trove-compute.xml deleted file mode 100644 index 452014d89e..0000000000 --- a/doc/common/tables/trove-compute.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Compute configuration options
Configuration option = Default valueDescription
[DEFAULT]
= None(StrOpt) List IP addresses that match this regular expression.
= publicURL(StrOpt) Service endpoint type to use when searching catalog.
= compute(StrOpt) Service type to use when searching catalog.
= None(StrOpt) URL without the tenant segment.
= ALL(ListOpt) Permissions to grant to the 'root' user.
= True(BoolOpt) Assign the 'root' user GRANT permissions.
-
diff --git a/doc/common/tables/trove-database.xml b/doc/common/tables/trove-database.xml deleted file mode 100644 index a548db504d..0000000000 --- a/doc/common/tables/trove-database.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of database configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Allow insecure logging while executing queries through SQLAlchemy.
[database]
= sqlite:///trove_test.sqlite(StrOpt) SQL Connection.
= 3600(IntOpt) No help text available for this option.
= False(BoolOpt) No help text available for this option.
-
diff --git a/doc/common/tables/trove-db_cassandra.xml b/doc/common/tables/trove-db_cassandra.xml deleted file mode 100644 index 3bbaf56e35..0000000000 --- a/doc/common/tables/trove-db_cassandra.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Cassandra database configuration options
Configuration option = Default valueDescription
[cassandra]
= {}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental, the runner will use the default full backup.
= None(StrOpt) Namespace to load backup strategies from.
= None(StrOpt) Default strategy to perform backups.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= /var/lib/cassandra(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= None(StrOpt) Default strategy for replication.
= None(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for cassandra.
= 7000, 7001, 9042, 9160(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_couchbase.xml b/doc/common/tables/trove-db_couchbase.xml deleted file mode 100644 index 11b9b7e918..0000000000 --- a/doc/common/tables/trove-db_couchbase.xml +++ /dev/null @@ -1,139 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Couchbase database configuration options
Configuration option = Default valueDescription
[couchbase]
= {}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental, the runner will use the default full backup.
= trove.guestagent.strategies.backup.experimental.couchbase_impl(StrOpt) Namespace to load backup strategies from.
= CbBackup(StrOpt) Default strategy to perform backups.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= /var/lib/couchbase(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= None(StrOpt) Default strategy for replication.
= trove.guestagent.strategies.restore.experimental.couchbase_impl(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for couchbase.
= True(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
= 8091, 8092, 4369, 11209-11211, 21100-21199(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
[couchdb]
= {}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental, the runner will use the default full backup.
= None(StrOpt) Namespace to load backup strategies from.
= None(StrOpt) Default strategy to perform backups.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= /var/lib/couchdb(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= None(StrOpt) Default strategy for replication.
= None(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for couchdb.
= False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the "password" field.
= 5984(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_db2.xml b/doc/common/tables/trove-db_db2.xml deleted file mode 100644 index a2b5435ee1..0000000000 --- a/doc/common/tables/trove-db_db2.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of DB2 database configuration options
Configuration option = Default valueDescription
[db2]
= {}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental, the runner will use the default full backup.
= None(StrOpt) Namespace to load backup strategies from.
= None(StrOpt) Default strategy to perform backups.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= PUBLIC, DB2INST1(ListOpt) No help text available for this option.
= /home/db2inst1/db2inst1(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= None(StrOpt) Default strategy for replication.
= None(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for db2.
= False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
= 50000(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_mariadb.xml b/doc/common/tables/trove-db_mariadb.xml deleted file mode 100644 index 9fff4a848c..0000000000 --- a/doc/common/tables/trove-db_mariadb.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of MariaDB database configuration options
Configuration option = Default valueDescription
[mariadb]
= {'InnoBackupEx': 'InnoBackupExIncremental'}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental backup, the runner will use the default full backup.
= trove.guestagent.strategies.backup.mysql_impl(StrOpt) Namespace to load backup strategies from.
= InnoBackupEx(StrOpt) Default strategy to perform backups.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= /var/lib/mysql(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= trove.guestagent.strategies.replication.mysql_binlog(StrOpt) Namespace to load replication strategies from.
= MysqlBinlogReplication(StrOpt) Default strategy for replication.
= trove.guestagent.strategies.restore.mysql_impl(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for mysql.
= False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
= 3306(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= 400(IntOpt) Maximum time (in seconds) to wait for a Guest to become active.
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_mongodb.xml b/doc/common/tables/trove-db_mongodb.xml deleted file mode 100644 index 7b6c4cba61..0000000000 --- a/doc/common/tables/trove-db_mongodb.xml +++ /dev/null @@ -1,128 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of MongoDB database configuration options
Configuration option = Default valueDescription
[mongodb]
= 300(IntOpt) Maximum time to wait (in seconds) for a replica set initialization process to complete.
= trove.common.strategies.cluster.experimental.mongodb.api.MongoDbAPIStrategy(StrOpt) Class that implements datastore-specific API logic.
= {}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental, the runner will use the default full backup.
= trove.guestagent.strategies.backup.experimental.mongo_impl(StrOpt) Namespace to load backup strategies from.
= MongoDump(StrOpt) Default strategy to perform backups.
= True(BoolOpt) Enable clusters to be created and managed.
= 27019(IntOpt) Port for instances running as config servers.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= trove.common.strategies.cluster.experimental.mongodb.guestagent.MongoDbGuestAgentStrategy(StrOpt) Class that implements datastore-specific Guest Agent API logic.
= admin, local, config(ListOpt) Databases to exclude when listing databases.
= admin.os_admin, admin.root(ListOpt) Users to exclude when listing users.
= 27017(IntOpt) Port for mongod and mongos instances.
= /var/lib/mongodb(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= 3(IntOpt) The number of config servers to create per cluster.
= 1(IntOpt) The number of query routers (mongos) to create per cluster.
= None(StrOpt) Default strategy for replication.
= trove.guestagent.strategies.restore.experimental.mongo_impl(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for mongodb.
= trove.common.strategies.cluster.experimental.mongodb.taskmanager.MongoDbTaskManagerStrategy(StrOpt) Class that implements datastore-specific task manager logic.
= 2500, 27017(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_mysql.xml b/doc/common/tables/trove-db_mysql.xml deleted file mode 100644 index 5de07008a4..0000000000 --- a/doc/common/tables/trove-db_mysql.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of MySQL database configuration options
Configuration option = Default valueDescription
[mysql]
= {'InnoBackupEx': 'InnoBackupExIncremental'}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental backup, the runner will use the default full backup.
= trove.guestagent.strategies.backup.mysql_impl(StrOpt) Namespace to load backup strategies from.
= InnoBackupEx(StrOpt) Default strategy to perform backups.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= /var/lib/mysql(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= trove.guestagent.strategies.replication.mysql_gtid(StrOpt) Namespace to load replication strategies from.
= MysqlGTIDReplication(StrOpt) Default strategy for replication.
= trove.guestagent.strategies.restore.mysql_impl(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for mysql.
= False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
= 3306(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= 400(IntOpt) Maximum time (in seconds) to wait for a Guest to become active.
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_percona.xml b/doc/common/tables/trove-db_percona.xml deleted file mode 100644 index 019bc61503..0000000000 --- a/doc/common/tables/trove-db_percona.xml +++ /dev/null @@ -1,104 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Percona database configuration options
Configuration option = Default valueDescription
[percona]
= {'InnoBackupEx': 'InnoBackupExIncremental'}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental backup, the runner will use the default full backup.
= trove.guestagent.strategies.backup.mysql_impl(StrOpt) Namespace to load backup strategies from.
= InnoBackupEx(StrOpt) Default strategy to perform backups.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= /var/lib/mysql(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= trove.guestagent.strategies.replication.mysql_gtid(StrOpt) Namespace to load replication strategies from.
= NETOU7897NNLOU(StrOpt) Password for replication slave user.
= MysqlGTIDReplication(StrOpt) Default strategy for replication.
= slave_user(StrOpt) Userid for replication slave.
= trove.guestagent.strategies.restore.mysql_impl(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for percona.
= False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
= 3306(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= 450(IntOpt) Maximum time (in seconds) to wait for a Guest to become active.
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_postgresql.xml b/doc/common/tables/trove-db_postgresql.xml deleted file mode 100644 index 8283b6788c..0000000000 --- a/doc/common/tables/trove-db_postgresql.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of PostgreSQL database configuration options
Configuration option = Default valueDescription
[postgresql]
= {}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental, the runner will use the default full backup.
= trove.guestagent.strategies.backup.experimental.postgresql_impl(StrOpt) Namespace to load backup strategies from.
= PgDump(StrOpt) Default strategy to perform backups.
= /dev/vdb(StrOpt) No help text available for this option.
= postgres(ListOpt) No help text available for this option.
= os_admin, postgres, root(ListOpt) No help text available for this option.
= /var/lib/postgresql(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= trove.guestagent.strategies.restore.experimental.postgresql_impl(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for postgresql.
= False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
= 5432(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_pxc.xml b/doc/common/tables/trove-db_pxc.xml deleted file mode 100644 index 526e365c49..0000000000 --- a/doc/common/tables/trove-db_pxc.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Percona XtraDB Cluster database configuration options
Configuration option = Default valueDescription
[pxc]
= trove.common.strategies.cluster.experimental.pxc.api.PXCAPIStrategy(StrOpt) Class that implements datastore-specific API logic.
= {'InnoBackupEx': 'InnoBackupExIncremental'}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental backup, the runner will use the default full backup.
= trove.guestagent.strategies.backup.mysql_impl(StrOpt) Namespace to load backup strategies from.
= InnoBackupEx(StrOpt) Default strategy to perform backups.
= True(BoolOpt) Enable clusters to be created and managed.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= trove.common.strategies.cluster.experimental.pxc.guestagent.PXCGuestAgentStrategy(StrOpt) Class that implements datastore-specific Guest Agent API logic.
= os_admin, root, clusterrepuser(ListOpt) Users to exclude when listing users.
= 3(IntOpt) Minimum number of members in PXC cluster.
= /var/lib/mysql(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= trove.guestagent.strategies.replication.mysql_gtid(StrOpt) Namespace to load replication strategies from.
= MysqlGTIDReplication(StrOpt) Default strategy for replication.
= slave_user(StrOpt) Userid for replication slave.
= trove.guestagent.strategies.restore.mysql_impl(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for pxc.
= False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
= trove.common.strategies.cluster.experimental.pxc.taskmanager.PXCTaskManagerStrategy(StrOpt) Class that implements datastore-specific task manager logic.
= 3306, 4444, 4567, 4568(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= 450(IntOpt) Maximum time (in seconds) to wait for a Guest to become active.
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_redis.xml b/doc/common/tables/trove-db_redis.xml deleted file mode 100644 index a37a4d3344..0000000000 --- a/doc/common/tables/trove-db_redis.xml +++ /dev/null @@ -1,104 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis database configuration options
Configuration option = Default valueDescription
[redis]
= trove.common.strategies.cluster.experimental.redis.api.RedisAPIStrategy(StrOpt) Class that implements datastore-specific API logic.
= {}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental, the runner will use the default full backup.
= trove.guestagent.strategies.backup.experimental.redis_impl(StrOpt) Namespace to load backup strategies from.
= RedisBackup(StrOpt) Default strategy to perform backups.
= True(BoolOpt) Enable clusters to be created and managed.
= None(StrOpt) Device path for volume if volume support is enabled.
= trove.common.strategies.cluster.experimental.redis.guestagent.RedisGuestAgentStrategy(StrOpt) Class that implements datastore-specific Guest Agent API logic.
= /var/lib/redis(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= trove.guestagent.strategies.replication.experimental.redis_sync(StrOpt) Namespace to load replication strategies from.
= RedisSyncReplication(StrOpt) Default strategy for replication.
= trove.guestagent.strategies.restore.experimental.redis_impl(StrOpt) Namespace to load restore strategies from.
= trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for redis.
= trove.common.strategies.cluster.experimental.redis.taskmanager.RedisTaskManagerStrategy(StrOpt) Class that implements datastore-specific task manager logic.
= 6379, 16379(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-db_vertica.xml b/doc/common/tables/trove-db_vertica.xml deleted file mode 100644 index cb30d3dbc4..0000000000 --- a/doc/common/tables/trove-db_vertica.xml +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Vertica database configuration options
Configuration option = Default valueDescription
[vertica]
= trove.common.strategies.cluster.experimental.vertica.api.VerticaAPIStrategy(StrOpt) Class that implements datastore-specific API logic.
= {}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental, the runner will use the default full backup.
= None(StrOpt) Namespace to load backup strategies from.
= None(StrOpt) Default strategy to perform backups.
= 3(IntOpt) Number of members in Vertica cluster.
= True(BoolOpt) Enable clusters to be created and managed.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= trove.common.strategies.cluster.experimental.vertica.guestagent.VerticaGuestAgentStrategy(StrOpt) Class that implements datastore-specific Guest Agent API logic.
= /var/lib/vertica(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
= 2048(IntOpt) Size(MB) to be set as readahead_size for data volume
= None(StrOpt) Default strategy for replication.
= None(StrOpt) Namespace to load restore strategies from.
= trove.extensions.vertica.service.VerticaRootController(StrOpt) Root controller implementation for Vertica.
= trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy(StrOpt) Class that implements datastore-specific task manager logic.
= 5433, 5434, 22, 5444, 5450, 4803(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= 5433, 4803, 4804, 6453(ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
-
diff --git a/doc/common/tables/trove-debug.xml b/doc/common/tables/trove-debug.xml deleted file mode 100644 index e336c53439..0000000000 --- a/doc/common/tables/trove-debug.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 4096(IntOpt) Number of backlog requests to configure the socket with
= disabled(StrOpt) Enable or disable pydev remote debugging. If value is 'auto' tries to connect to remote debugger server, but in case of error continues running with debugging disabled.
= None(StrOpt) Pydev debug server host (localhost by default).
= None(IntOpt) Pydev debug server port (5678 by default).
[profiler]
= False(BoolOpt) If False fully disable profiling feature.
= True(BoolOpt) If False doesn't trace SQL requests.
-
diff --git a/doc/common/tables/trove-dns.xml b/doc/common/tables/trove-dns.xml deleted file mode 100644 index 3a7c7d2e6e..0000000000 --- a/doc/common/tables/trove-dns.xml +++ /dev/null @@ -1,104 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of DNS configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Tenant ID for DNSaaS.
= (StrOpt) Authentication URL for DNSaaS.
= (StrOpt) Domain ID used for adding DNS entries.
= (StrOpt) Domain name used for adding DNS entries.
= trove.dns.driver.DnsDriver(StrOpt) Driver for DNSaaS.
= 0.0.0.0(StrOpt) Endpoint URL for DNSaaS.
= (StrOpt) Hostname used for adding DNS entries.
= trove.dns.driver.DnsInstanceEntryFactory(StrOpt) Factory for adding DNS entries.
= (StrOpt) Management URL for DNSaaS.
= (StrOpt) Passkey for DNSaaS.
= (StrOpt) Region name for DNSaaS.
= (StrOpt) Service Type for DNSaaS.
= 120(IntOpt) Maximum time (in seconds) to wait for a DNS entry add.
= 300(IntOpt) Time (in seconds) before a refresh of DNS information occurs.
= (StrOpt) Username for DNSaaS.
= False(BoolOpt) Whether Trove should add DNS entries on create (using Designate DNSaaS).
-
diff --git a/doc/common/tables/trove-guestagent.xml b/doc/common/tables/trove-guestagent.xml deleted file mode 100644 index ec4da23093..0000000000 --- a/doc/common/tables/trove-guestagent.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of guest agent configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 60(IntOpt) Maximum time (in seconds) to wait for Guest Agent 'slow' requests (such as restarting the database).
= 5(IntOpt) Maximum time (in seconds) to wait for Guest Agent 'quick'requests (such as retrieving a list of users or databases).
= 60(IntOpt) Time (in seconds) after which a guest is considered unreachable
= 10(IntOpt) Maximum time (in seconds) for the Guest Agent to reply to a heartbeat request.
= 36000(IntOpt) Maximum time (in seconds) to wait for taking a Guest Agent replication snapshot.
= /etc/trove/trove-guestagent.conf(StrOpt) Path to the Guest Agent config file to be injected during instance creation.
= None(StrOpt) ID of the Guest Instance.
= guest_info.conf(StrOpt) The guest info filename found in the injected config location. If a full path is specified then it will be used as the path to the guest info file
= mysql, information_schema, performance_schema(ListOpt) Databases to exclude when listing databases.
= os_admin, root(ListOpt) Users to exclude when listing users.
= defaults,noatime(StrOpt) Options to use when mounting a volume.
= trove.guestagent.strategies.storage.swift(StrOpt) Namespace to load the default storage strategy from.
= SwiftStorage(StrOpt) Default strategy to store backups.
= 5(IntOpt) Time to sleep during the check for an active Guest.
-
diff --git a/doc/common/tables/trove-heat.xml b/doc/common/tables/trove-heat.xml deleted file mode 100644 index 8c8438fbca..0000000000 --- a/doc/common/tables/trove-heat.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Orchestration module configuration options
Configuration option = Default valueDescription
[DEFAULT]
= publicURL(StrOpt) Service endpoint type to use when searching catalog.
= orchestration(StrOpt) Service type to use when searching catalog.
= 60(IntOpt) Maximum time (in seconds) to wait for a Heat request to complete.
= None(StrOpt) URL without the tenant segment.
-
diff --git a/doc/common/tables/trove-logging.xml b/doc/common/tables/trove-logging.xml deleted file mode 100644 index 7e84bd5856..0000000000 --- a/doc/common/tables/trove-logging.xml +++ /dev/null @@ -1,132 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of logging configuration options
Configuration option = Default valueDescription
[DEFAULT]
= False(BoolOpt) Print debugging output (set logging level to DEBUG instead of default INFO level).
= amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN(ListOpt) List of logger=LEVEL pairs. This option is ignored if log_config_append is set.
= False(BoolOpt) Enables or disables fatal status of deprecations.
= -m 5(StrOpt) Options to use when formatting a volume.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance that is passed with the log message.
= "[instance: %(uuid)s] "(StrOpt) The format for an instance UUID that is passed with the log message.
= None(StrOpt) The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log_format).
= %Y-%m-%d %H:%M:%S(StrOpt) Format string for %%(asctime)s in log records. Default: %(default)s . This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) The base directory used for relative --log-file paths. This option is ignored if log_config_append is set.
= None(StrOpt) (Optional) Name of log file to output to. If no default is set, logging will go to stdout. This option is ignored if log_config_append is set.
= None(StrOpt) DEPRECATED. A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. This option is deprecated. Please use logging_context_format_string and logging_default_format_string instead. This option is ignored if log_config_append is set.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s(StrOpt) Format string to use for log messages with context.
= %(funcName)s %(pathname)s:%(lineno)d(StrOpt) Data to append to log format when level is DEBUG.
= %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s(StrOpt) Format string to use for log messages without context.
= %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s(StrOpt) Prefix each line of exception output with this format.
= ^private$(StrOpt) Regular expression to match Trove network labels.
= False(BoolOpt) Enables or disables publication of error events.
= LOG_USER(StrOpt) Syslog facility to receive log lines. This option is ignored if log_config_append is set.
= True(BoolOpt) Log output to standard error. This option is ignored if log_config_append is set.
= False(BoolOpt) Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set.
= True(BoolOpt) (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The format without the APP-NAME is deprecated in Kilo, and will be removed in Mitaka, along with this option. This option is ignored if log_config_append is set.
= True(BoolOpt) If set to false, will disable INFO logging level, making WARNING the default.
= False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
-
diff --git a/doc/common/tables/trove-network.xml b/doc/common/tables/trove-network.xml deleted file mode 100644 index 0bce18db80..0000000000 --- a/doc/common/tables/trove-network.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of network configuration options
Configuration option = Default valueDescription
[DEFAULT]
= trove.network.nova.NovaNetwork(StrOpt) Describes the actual network manager used for the management of network attributes (security groups, floating IPs, etc.).
= publicURL(StrOpt) Service endpoint type to use when searching catalog.
= network(StrOpt) Service type to use when searching catalog.
= None(StrOpt) URL without the tenant segment.
-
diff --git a/doc/common/tables/trove-nova.xml b/doc/common/tables/trove-nova.xml deleted file mode 100644 index 4cca0b83ba..0000000000 --- a/doc/common/tables/trove-nova.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of nova configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Admin password used to connect to Nova.
= (StrOpt) Admin tenant ID used to connect to Nova.
= (StrOpt) Admin tenant name used to connect to Nova.
= (StrOpt) Admin username used to connect to Nova.
-
diff --git a/doc/common/tables/trove-qpid.xml b/doc/common/tables/trove-qpid.xml deleted file mode 100644 index 75316bc970..0000000000 --- a/doc/common/tables/trove-qpid.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Qpid configuration options
Configuration option = Default valueDescription
[oslo_messaging_qpid]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= 60(IntOpt) Seconds between connection keepalive heartbeats.
= localhost(StrOpt) Qpid broker hostname.
= $qpid_hostname:$qpid_port(ListOpt) Qpid HA cluster host:port pairs.
= (StrOpt) Password for Qpid connection.
= 5672(IntOpt) Qpid broker port.
= tcp(StrOpt) Transport to use, either 'tcp' or 'ssl'.
= 1(IntOpt) The number of prefetched messages held by receiver.
= (StrOpt) Space separated list of SASL mechanisms to use for auth.
= True(BoolOpt) Whether to disable the Nagle algorithm.
= 1(IntOpt) The qpid topology version to use. Version 1 is what was originally used by impl_qpid. Version 2 includes some backwards-incompatible changes that allow broker federation to work. Users should update to version 2 when they are able to take everything down, as it requires a clean break.
= (StrOpt) Username for Qpid connection.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/trove-quota.xml b/doc/common/tables/trove-quota.xml deleted file mode 100644 index 3069d84ad0..0000000000 --- a/doc/common/tables/trove-quota.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of quota configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 5(IntOpt) Default maximum volume size (in GB) for an instance.
= 50(IntOpt) Default maximum number of backups created by a tenant.
= 5(IntOpt) Default maximum number of instances per tenant.
= 20(IntOpt) Default maximum volume capacity (in GB) spanning across all Trove volumes per tenant.
= trove.quota.quota.DbQuotaDriver(StrOpt) Default driver to use for quota checks.
-
diff --git a/doc/common/tables/trove-rabbitmq.xml b/doc/common/tables/trove-rabbitmq.xml deleted file mode 100644 index 5c08534dfd..0000000000 --- a/doc/common/tables/trove-rabbitmq.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RabbitMQ configuration options
Configuration option = Default valueDescription
[oslo_messaging_rabbit]
= False(BoolOpt) Auto-delete queues in AMQP.
= False(BoolOpt) Use durable queues in AMQP.
= False(BoolOpt) Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
= 2(IntOpt) How often times during the heartbeat_timeout_threshold we check the heartbeat.
= 60(IntOpt) Number of seconds after which the Rabbit broker is considered down if heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL
= 1.0(FloatOpt) How long to wait before reconnecting in response to an AMQP consumer cancel notification.
= 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
= (StrOpt) SSL certification authority file (valid only if SSL enabled).
= (StrOpt) SSL cert file (valid only if SSL enabled).
= (StrOpt) SSL key file (valid only if SSL enabled).
= (StrOpt) SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some distributions.
= False(BoolOpt) Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you must wipe the RabbitMQ database.
= localhost(StrOpt) The RabbitMQ broker address where a single node is used.
= $rabbit_host:$rabbit_port(ListOpt) RabbitMQ HA cluster host:port pairs.
= AMQPLAIN(StrOpt) The RabbitMQ login method.
= 0(IntOpt) Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry count).
= guest(StrOpt) The RabbitMQ password.
= 5672(IntOpt) The RabbitMQ broker port where a single node is used.
= 2(IntOpt) How long to backoff for between retries when connecting to RabbitMQ.
= 1(IntOpt) How frequently to retry connecting with RabbitMQ.
= False(BoolOpt) Connect over SSL for RabbitMQ.
= guest(StrOpt) The RabbitMQ userid.
= /(StrOpt) The RabbitMQ virtual host.
= False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
-
diff --git a/doc/common/tables/trove-redis.xml b/doc/common/tables/trove-redis.xml deleted file mode 100644 index 7bff9730b9..0000000000 --- a/doc/common/tables/trove-redis.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of Redis configuration options
Configuration option = Default valueDescription
[DEFAULT]
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
[matchmaker_redis]
= 127.0.0.1(StrOpt) Host to locate redis.
= (StrOpt) Password for Redis server (optional).
= 6379(IntOpt) Use this port to connect to redis host.
-
diff --git a/doc/common/tables/trove-rpc.xml b/doc/common/tables/trove-rpc.xml deleted file mode 100644 index 52dc2d2cec..0000000000 --- a/doc/common/tables/trove-rpc.xml +++ /dev/null @@ -1,146 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of RPC configuration options
Configuration option = Default valueDescription
[DEFAULT]
= 3(IntOpt) Number of times to check if a volume exists.
= 30(IntOpt) The interval (in seconds) which periodic tasks are run.
= rabbit(StrOpt) The messaging driver to use, defaults to rabbit. Other drivers include qpid and zmq.
= 30(IntOpt) Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
= 30(IntOpt) Size of RPC connection pool.
= 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
= 60(IntOpt) Seconds to wait for a response from a call.
[oslo_concurrency]
= False(BoolOpt) Enables or disables inter-process locks.
= None(StrOpt) Directory to use for lock files. For security, the specified directory should only be writable by the user running the processes that need locking. Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, a lock path must be set.
[oslo_messaging_amqp]
= False(BoolOpt) Accept clients using either SSL or plain TCP
= broadcast(StrOpt) address prefix used when broadcasting to all servers
= None(StrOpt) Name for the AMQP container
= unicast(StrOpt) address prefix when sending to any server in group
= 0(IntOpt) Timeout for inactive connections (in seconds)
= (StrOpt) Password for message broker authentication
= (StrOpt) Path to directory that contains the SASL configuration
= (StrOpt) Name of configuration file (without .conf suffix)
= (StrOpt) Space separated list of acceptable SASL mechanisms
= exclusive(StrOpt) address prefix used when sending to a specific server
= (StrOpt) CA certificate PEM file to verify server certificate
= (StrOpt) Identifying certificate PEM file to present to clients
= (StrOpt) Private key PEM file used to sign cert_file certificate
= None(StrOpt) Password for decrypting ssl_key_file (if encrypted)
= False(BoolOpt) Debug: dump AMQP frames to stdout
= (StrOpt) User name for message broker authentication
-
diff --git a/doc/common/tables/trove-swift.xml b/doc/common/tables/trove-swift.xml deleted file mode 100644 index d9eecfe302..0000000000 --- a/doc/common/tables/trove-swift.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of swift configuration options
Configuration option = Default valueDescription
[DEFAULT]
= publicURL(StrOpt) Service endpoint type to use when searching catalog.
= object-store(StrOpt) Service type to use when searching catalog.
= None(StrOpt) URL ending in AUTH_.
-
diff --git a/doc/common/tables/trove-taskmanager.xml b/doc/common/tables/trove-taskmanager.xml deleted file mode 100644 index 7cf88c51f7..0000000000 --- a/doc/common/tables/trove-taskmanager.xml +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of taskmanager configuration options
Configuration option = Default valueDescription
[DEFAULT]
= /etc/trove/cloudinit(StrOpt) Path to folder with cloudinit scripts.
= None(StrOpt) Manager class in the Guest Agent, set up by the Taskmanager on instance provision.
= {}(DictOpt) Extension for default datastore managers. Allows the use of custom managers for each of the datastores supported by Trove.
= 3600(IntOpt) Seconds to wait between pushing events.
= None(StrOpt) Transformer for exists notifications.
= 120(IntOpt) Maximum time (in seconds) to wait for a server reboot.
= 600(IntOpt) Maximum time (in seconds) to wait for a server resize.
= 36000(IntOpt) Maximum time (in seconds) to wait for a Guest instance restored from a backup to become active.
= 600(IntOpt) Maximum time (in seconds) to wait for a server resize revert.
= 60(IntOpt) Maximum time (in seconds) to wait for a server delete.
= 180(IntOpt) Maximum time (in seconds) to wait for a state change.
= True(BoolOpt) Set the service and instance task statuses to ERROR when an instance fails to become active within the configured usage_timeout.
= 5(IntOpt) Time to sleep during the check for an active Guest.
= False(BoolOpt) Use Heat for provisioning.
= False(BoolOpt) Use config drive for file injection when booting instance.
= False(BoolOpt) Whether to provision a Cinder volume for the Nova instance.
= True(BoolOpt) Enable verification of Swift checksum before starting restore. Makes sure the checksum of original backup matches the checksum of the Swift backup file.
-
diff --git a/doc/common/tables/trove-upgrades.xml b/doc/common/tables/trove-upgrades.xml deleted file mode 100644 index 882d469c00..0000000000 --- a/doc/common/tables/trove-upgrades.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of upgrades configuration options
Configuration option = Default valueDescription
[upgrade_levels]
= icehouse(StrOpt) Set a version cap for messages sent to conductor services
= icehouse(StrOpt) Set a version cap for messages sent to guestagent services
= icehouse(StrOpt) Set a version cap for messages sent to taskmanager services
-
diff --git a/doc/common/tables/trove-volume.xml b/doc/common/tables/trove-volume.xml deleted file mode 100644 index 9327bee4a9..0000000000 --- a/doc/common/tables/trove-volume.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of volume configuration options
Configuration option = Default valueDescription
[DEFAULT]
= vdb(StrOpt) Block device to map onto the created instance.
= publicURL(StrOpt) Service endpoint type to use when searching catalog.
= volumev2(StrOpt) Service type to use when searching catalog.
= None(StrOpt) URL without the tenant segment.
= None(StrOpt) Volume type to use when provisioning a Cinder volume.
= /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
= True(BoolOpt) Whether to provision a Cinder volume for datadir.
= 120(IntOpt) Maximum time (in seconds) to wait for a volume format.
= ext3(StrOpt) File system type used to format a volume.
= 60(IntOpt) Maximum time (in seconds) to wait for a volume attach.
-
diff --git a/doc/common/tables/trove-zeromq.xml b/doc/common/tables/trove-zeromq.xml deleted file mode 100644 index 13a87f3725..0000000000 --- a/doc/common/tables/trove-zeromq.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Description of ZeroMQ configuration options
Configuration option = Default valueDescription
[DEFAULT]
= True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
= *(StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. The "host" option should point or resolve to this address.
= eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
= 1(IntOpt) Number of ZeroMQ contexts, defaults to 1.
= localhost(StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP address. Must match "host" option, if running Nova.
= /var/run/openstack(StrOpt) Directory for holding IPC sockets.
= redis(StrOpt) MatchMaker driver.
= None(IntOpt) Maximum number of ingress messages to locally buffer per topic. Default is unlimited.
= True(BoolOpt) Shows whether zmq-messaging uses broker or not.
-
diff --git a/doc/config-reference/app_firewalls-ports.xml b/doc/config-reference/app_firewalls-ports.xml deleted file mode 100644 index 06bf2f0e74..0000000000 --- a/doc/config-reference/app_firewalls-ports.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - Firewalls and default ports - On some deployments, such as ones where restrictive - firewalls are in place, you might need to manually configure a - firewall to permit OpenStack service traffic. - To manually configure a firewall, you must permit traffic - through the ports that each OpenStack service uses. This table - lists the default ports that each OpenStack service - uses: - - To function properly, some OpenStack components depend on - other, non-OpenStack services. For example, the OpenStack - dashboard uses HTTP for non-secure communication. In this - case, you must configure the firewall to allow traffic to and - from HTTP. - This table lists the ports that other OpenStack components - use: - - On some deployments, the default port used by a service - may fall within the defined local port range of a host. To - check a host's local port range: - $ sysctl -a | grep ip_local_port_range - If a service's default port falls within this range, run - the following program to check if the port has already been - assigned to another application: - $ lsof -i :PORT - Configure the service to use a different port if the - default port is already being used by another application. - diff --git a/doc/config-reference/app_policy_json.xml b/doc/config-reference/app_policy_json.xml deleted file mode 100644 index f757cd50f9..0000000000 --- a/doc/config-reference/app_policy_json.xml +++ /dev/null @@ -1,266 +0,0 @@ - - - - - - The policy.json file - - Each OpenStack service, Identity, Compute, Networking and so on, has its own - role-based access policies. They determine which user can access which objects in - which way, and are defined in the service's policy.json - file. - - Whenever an API call to an OpenStack service is made, the service's policy - engine uses the appropriate policy definitions to determine if the call can be - accepted. Any changes to policy.json are effective immediately, - which allows - new policies to be implemented while the service is running. - - A policy.json file is a text file in JSON (Javascript - Object Notation) format. Each policy is defined by a one-line statement in the form - "<target>" : "<rule>". - - - The policy target, also named "action", represents an API call like "start an instance" or - "attach a volume". - - - Action names are usually qualified. - Example: OpenStack Compute features API calls to list instances, - volumes and networks. In /etc/nova/policy.json, these APIs are - represented by compute:get_all, volume:get_all - and network:get_all, respectively. - - The mapping between API calls and actions is not generally documented. - - The policy rule determines under which circumstances the API call - is permitted. Usually this involves the user who makes the - call (hereafter named the "API user") and often the object on which the - API call operates. A typical rule checks if the API user is the object's owner. - - Modifying the policy - - While recipes for editing policy.json files - are found on blogs, modifying the policy can have unexpected side effects and is - not encouraged. - - - -
- Examples - - A simple rule might look like this: - - "compute:get_all" : "" - - The target is "compute:get_all", the "list all instances" API - of the Compute service. The rule is an empty string meaning "always". This policy - allows anybody to list instances. - - You can also decline permission to use an API: - - "compute:shelve": "!" - - The exclamation mark stands for "never" or "nobody", which effectively disables - the Compute API "shelve an instance". - - Many APIs can only be called by admin users. This can be expressed by the rule - "role:admin". - The following policy ensures that only administrators can create new users in the - Identity database: - - "identity:create_user" : "role:admin" - - - You can limit APIs to any role. For example, the Orchestration service defines - a role named heat_stack_user. Whoever has this role isn't allowed to create stacks: - - "stacks:create": "not role:heat_stack_user" - - This rule makes use of the boolean operator not. - More complex rules can be built using operators and, or - and parentheses. - - You can define aliases for rules: - - "deny_stack_user": "not role:heat_stack_user" - - The policy engine - understands that "deny_stack_user" is not an API and consequently - interprets it as an alias. The stack creation policy above can then be written as: - - "stacks:create": "rule:deny_stack_user" - - This is taken verbatim from /etc/heat/policy.json. - - Rules can compare API attributes to object attributes. For example: - - "compute:start" : "user_id:%(user_id)s" - - states that only the owner of an instance can start it up. The user_id - string - before the colon is an API attribute, namely the user ID of the API user. - It is compared with the user ID of the object (in this case, an instance); more - precisely, it is compared with the user_id field of that object - in the database. If the two values are equal, permission is - granted. - - An admin user always has permission to call APIs. - This is how /etc/keystone/policy.json makes this policy explicit: - - "admin_required": "role:admin or is_admin:1", -"owner" : "user_id:%(user_id)s", -"admin_or_owner": "rule:admin_required or rule:owner", -"identity:change_password": "rule:admin_or_owner" - - The first line defines an alias for "user is an admin user". - The is_admin flag is only used when setting - up the Identity service for the first time. It indicates that the user - has admin privileges granted by the service token - (--os-token - parameter of the keystone command line client). - - - The second line creates an alias for "user owns the object" by comparing - the API's user ID with the object's user ID. - - - Line 3 defines a third alias admin_or_owner, combining - the two first aliases with the Boolean operator or. - - - - Line 4 sets up the policy that - a password can only be modified by its owner or an admin user. - - - - As a final example, let's examine a more complex rule: - - "identity:ec2_delete_credential": "rule:admin_required or - (rule:owner and user_id:%(target.credential.user_id)s)" - - - This rule determines who can use the Identity API "delete EC2 credential". - Here, boolean operators and parentheses combine three simpler rules. - admin_required and owner are the same aliases as in the - previous example. user_id:%(target.credential.user_id)s compares the - API user with the user ID of the credential object associated with the target. - - -
- -
- Syntax - - A policy.json file consists of policies and aliases of the - form target:rule or alias:definition, separated by commas - and enclosed in curly braces: - - { - "alias 1" : "definition 1", - "alias 2" : "definition 2", - ... - "target 1" : "rule 1", - "target 2" : "rule 2", - .... -} - - - - Targets are APIs and are written "service:API" or simply - "API". For example, "compute:create" or - "add_image". - - Rules determine whether the API call is allowed. - Rules can be: - - - always true. The action is always permitted. This can be written as - "" (empty string), [], or "@". - always false. The action is never permitted. Written as "!". - a special check - a comparison of two values - boolean expressions based on simpler rules - - - - Special checks are - - - <role>:<role name>, a test whether the API credentials - contain this role. - <rule>:<rule name>, - the definition of an alias. - http:<target URL>, - which delegates the check to a remote server. - The API is authorized when the server returns True. - - - - Developers can define additional special checks. - - Two values are compared in the following way: - - "value1 : value2" - - - Possible values are - - - constants: Strings, numbers, true, false - API attributes - target object attributes - the flag is_admin - - - - API attributes can be project_id, user_id or - domain_id. - - Target object attributes are fields from the object description in the database. - For example in the case of the "compute:start" API, the object is the - instance to be started. The policy for starting instances could use the - %(project_id)sattribute, that is the project that owns the instance. - The trailing s indicates this is a string. - - is_admin indicates that administrative privileges - are granted via the admin token mechanism (the --os-token - option of the keystone command). - The admin token allows initialisation of the identity database before the admin - role exists. - - The alias construct exists for convenience. An alias is short name for a - complex or hard to understand rule. - It is defined in the same way as a policy: - - alias name : alias definition - - Once an alias is defined, use the rule keyword to use it in a policy rule. - -
- -
- Older syntax - - You may encounter older policy.json files that feature a different syntax, - where JavaScript arrays are used instead of boolean operators. - For example, the EC2 credentials rule above would have been written as - follows: - - "identity:ec2_delete_credential": [ [ "rule:admin_required ], - [ "rule:owner", "user_id:%(target.credential.user_id)s)" ] ] - - The rule is an array of arrays. The innermost arrays are or'ed together, - whereas elements inside the innermost arrays are and'ed. - - While the old syntax is still supported, we recommend using the newer, more - intuitive syntax. - -
- -
diff --git a/doc/config-reference/bk-config-ref.xml b/doc/config-reference/bk-config-ref.xml deleted file mode 100644 index 35435b3237..0000000000 --- a/doc/config-reference/bk-config-ref.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - OpenStack Configuration Reference - - - OpenStack Configuration Reference - - - - - - - - OpenStack Foundation - - - - 2013 - 2014 - 2015 - OpenStack Foundation - - OpenStack - mitaka - - - - Copyright details are filled in by the - template. - - - - This document is for system administrators who want - to look up configuration options. It contains lists of - configuration options available with OpenStack and - uses auto-generation to generate options and the - descriptions from the code for each project. It - includes sample configuration files. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/config-reference/block-storage/backup/ceph-backup-driver.xml b/doc/config-reference/block-storage/backup/ceph-backup-driver.xml deleted file mode 100644 index e4738890f8..0000000000 --- a/doc/config-reference/block-storage/backup/ceph-backup-driver.xml +++ /dev/null @@ -1,58 +0,0 @@ -
- Ceph backup driver - The Ceph backup driver backs up volumes of any type to a - Ceph back-end store. The driver can also detect - whether the volume to be backed up is a Ceph RBD - volume, and if so, it tries to perform incremental and - differential backups. - For source Ceph RBD volumes, you can perform backups - within the same Ceph pool (not recommended). You can also perform - backups between different Ceph pools and between - different Ceph clusters. - At the time of writing, differential backup support in - Ceph/librbd was quite new. This driver attempts a - differential backup in the first instance. If the - differential backup fails, the driver falls back to - full backup/copy. - If incremental backups are used, multiple backups of the - same volume are stored as snapshots so that minimal - space is consumed in the backup store. It takes far - less time to restore a volume than to take a full - copy. - - Block Storage enables you to: - - - Restore to a new volume, which - is the default and recommended - action. - - - Restore to the original volume - from which the backup was taken. - The restore action takes a full - copy because this is the safest - action. - - - - To enable the Ceph backup driver, include the following - option in the cinder.conf - file: - backup_driver = cinder.backup.drivers.ceph - The following configuration options are available for - the Ceph backup driver. - - This example shows the default options for the Ceph - backup driver. - backup_ceph_conf=/etc/ceph/ceph.conf -backup_ceph_user = cinder -backup_ceph_chunk_size = 134217728 -backup_ceph_pool = backups -backup_ceph_stripe_unit = 0 -backup_ceph_stripe_count = 0 -
diff --git a/doc/config-reference/block-storage/backup/nfs-backup-driver.xml b/doc/config-reference/block-storage/backup/nfs-backup-driver.xml deleted file mode 100644 index 40d31ac27b..0000000000 --- a/doc/config-reference/block-storage/backup/nfs-backup-driver.xml +++ /dev/null @@ -1,15 +0,0 @@ -
- NFS backup driver - The backup driver for the NFS back end backs up volumes of any type - to an NFS exported backup repository. - To enable the NFS backup driver, include the following - option in the [DEFAULT] section of the cinder.conf - file: - backup_driver = cinder.backup.drivers.nfs - The following configuration options are available for - the NFS back-end backup driver. - -
diff --git a/doc/config-reference/block-storage/backup/swift-backup-driver.xml b/doc/config-reference/block-storage/backup/swift-backup-driver.xml deleted file mode 100644 index 7c7f91523a..0000000000 --- a/doc/config-reference/block-storage/backup/swift-backup-driver.xml +++ /dev/null @@ -1,36 +0,0 @@ -
- Swift backup driver - The backup driver for the swift back end performs a volume - backup to an object storage system. - To enable the swift backup driver, include the following - option in the cinder.conf - file: - backup_driver = cinder.backup.drivers.swift - The following configuration options are available for - the Swift back-end backup driver. - - - To enable the swift backup driver for 1.0 or 2.0 authentication version, - specify 1 or 2 correspondingly. For example: - backup_swift_auth_version = 2 - In addition, the 2.0 authentication system requires - backup_swift_tenant setting: - backup_swift_tenant = <None> - - This example shows the default options for the Swift - back-end backup driver. - backup_swift_url = http://localhost:8080/v1/AUTH_ -backup_swift_auth = per_user -backup_swift_auth_version = 1 -backup_swift_user = <None> -backup_swift_key = <None> -backup_swift_container = volumebackups -backup_swift_object_size = 52428800 -backup_swift_retry_attempts = 3 -backup_swift_retry_backoff = 2 -backup_compression_algorithm = zlib -
diff --git a/doc/config-reference/block-storage/backup/tsm-backup-driver.xml b/doc/config-reference/block-storage/backup/tsm-backup-driver.xml deleted file mode 100644 index 52ea8827d4..0000000000 --- a/doc/config-reference/block-storage/backup/tsm-backup-driver.xml +++ /dev/null @@ -1,29 +0,0 @@ -
- IBM Tivoli Storage Manager backup driver - The IBM Tivoli Storage Manager (TSM) backup driver enables - performing volume backups to a TSM server. - - The TSM client should be installed and configured on the - machine running the cinder-backup - service. - See the IBM Tivoli Storage Manager - Backup-Archive Client Installation and User's Guide for - details on installing the TSM client. - - To enable the IBM TSM backup driver, include the following option - in cinder.conf: - backup_driver = cinder.backup.drivers.tsm - The following configuration options are available for the - TSM backup driver. - - This example shows the default options for the TSM backup - driver. - backup_tsm_volume_prefix = backup -backup_tsm_password = password -backup_tsm_compression = True -
- diff --git a/doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml b/doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml deleted file mode 100644 index b86a022df8..0000000000 --- a/doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml +++ /dev/null @@ -1,262 +0,0 @@ - -
- Blockbridge EPS - - -
- Introduction - Blockbridge is software that transforms commodity infrastructure into - secure multi-tenant storage that operates as a programmable service. It - provides automatic encryption, secure deletion, quality of service, - replication, and programmable security capabilities on your choice of - hardware. Blockbridge uses micro-segmentation to provide isolation that - allows you to concurrently operate OpenStack, Docker, and bare-metal - workflows on shared resources. When used with OpenStack, isolated management - domains are dynamically created on a per-project basis. All volumes and - clones, within and between projects, are automatically cryptographically - isolated and implement secure deletion. - -
- -
- Architecture reference - -
- Blockbridge architecture - - - - - -
- -
- Control paths - The Blockbridge driver is packaged with the core distribution of - OpenStack. Operationally, it executes in the context of the Block Storage - service. The driver communicates with an OpenStack-specific API provided - by the Blockbridge EPS platform. Blockbridge optionally communicates with - OpenStack Identity, Compute, and Block Storage services. - -
- -
- Block storage API - Blockbridge is API driven software-defined storage. The system - implements a native HTTP API that is tailored to the specific needs of - OpenStack. Each Block Storage service operation maps to a single back-end - API request that provides ACID semantics. The API is specifically designed - to reduce, if not eliminate, the possibility of inconsistencies between - the Block Storage service and external storage infrastructure in the event - of hardware, software or data center failure. - -
- -
- Extended management - OpenStack users may utilize Blockbridge interfaces to manage - replication, auditing, statistics, and performance information on a - per-project and per-volume basis. In addition, they can manage low-level - data security functions including verification of data authenticity and - encryption key delegation. Native integration with the Identity Service - allows tenants to use a single set of credentials. Integration with Block - storage and Compute services provides dynamic metadata mapping when using - Blockbridge management APIs and tools. - -
- -
- Attribute-based provisioning - Blockbridge organizes resources using descriptive identifiers called - attributes. Attributes are assigned by administrators - of the infrastructure. They are used to describe the characteristics of - storage in an application-friendly way. Applications construct queries - that describe storage provisioning constraints and the Blockbridge storage - stack assembles the resources as described. - - Any given instance of a Blockbridge volume driver specifies a - query for resources. For example, a query could - specify '+ssd +10.0.0.0 +6nines -production iops.reserve=1000 - capacity.reserve=30%'. This query is satisfied by selecting SSD - resources, accessible on the 10.0.0.0 network, with high resiliency, for - non-production workloads, with guaranteed IOPS of 1000 and a storage - reservation for 30% of the volume capacity specified at create - time. Queries and parameters are completely administrator defined: they - reflect the layout, resource, and organizational goals of a specific - deployment. - -
-
- -
- Supported operations - - - Create, delete, clone, attach, and detach volumes - - - Create and delete volume snapshots - - - Create a volume from a snapshot - - - Copy an image to a volume - - - Copy a volume to an image - - - Extend a volume - - - Get volume statistics - - -
- -
- Supported protocols - Blockbridge provides iSCSI access to storage. A unique iSCSI data - fabric is programmatically assembled when a volume is attached to an - instance. A fabric is disassembled when a volume is detached from an - instance. Each volume is an isolated SCSI device that supports persistent - reservations. -
- -
- Configuration steps -
- Create an authentication token - Whenever possible, avoid using password-based authentication. Even - if you have created a role-restricted administrative user via Blockbridge, - token-based authentication is preferred. You can generate persistent - authentication tokens using the Blockbridge command-line tool as follows: - - $ bb -H bb-mn authorization create --notes "OpenStack" --restrict none -Authenticating to https://bb-mn/api - -Enter user or access token: system -Password for system: -Authenticated; token expires in 3599 seconds. - -== Authorization: ATH4762894C40626410 -notes OpenStack -serial ATH4762894C40626410 -account system (ACT0762594C40626440) -user system (USR1B62094C40626440) -enabled yes -created at 2015-10-24 22:08:48 +0000 -access type online -token suffix xaKUy3gw -restrict none - -== Access Token -access token 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw - -*** Remember to record your access token! -
- -
- Create volume type - Before configuring and enabling the Blockbridge volume driver, - register an OpenStack volume type and associate it with a - volume_backend_name. In this example, a volume type, - 'Production', is associated with the volume_backend_name - 'blockbridge_prod': $ cinder type-create Production -$ cinder type-key Production volume_backend_name=blockbridge_prod - -
- -
- Specify volume driver - Configure the Blockbridge volume driver in - /etc/cinder/cinder.conf. Your - volume_backend_name must match the value specified in the - cinder type-key command in the previous step. - volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver -volume_backend_name = blockbridge_prod - -
- -
- Specify API endpoint and authentication - Configure the API endpoint and authentication. The following - example uses an authentication token. You must create your own as - described above. - blockbridge_api_host = [ip or dns of management cluster] -blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw - -
- -
- Specify resource query - By default, a single pool is configured (implied) with a default - resource query of '+openstack'. Within Blockbridge, - datastore resources that advertise the 'openstack' attribute will be - selected to fulfill OpenStack provisioning requests. If you prefer a more - specific query, define a custom pool configuration. - blockbridge_pools = Production: +production +qos iops.reserve=5000 - - Pools support storage systems that offer multiple classes of - service. You may wish to configure multiple pools to implement more - sophisticated scheduling capabilities. - -
-
- -
- Configuration options - -
- -
- Configuration example - cinder.conf example file - [Default] -enabled_backends = bb_devel bb_prod - -[bb_prod] -volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver -volume_backend_name = blockbridge_prod -blockbridge_api_host = [ip or dns of management cluster] -blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw -blockbridge_pools = Production: +production +qos iops.reserve=5000 - -[bb_devel] -volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver -volume_backend_name = blockbridge_devel -blockbridge_api_host = [ip or dns of management cluster] -blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw -blockbridge_pools = Development: +development -
- -
- Multiple volume types - Volume types are exposed to tenants, - pools are not. To offer multiple classes of storage to - OpenStack tenants, you should define multiple volume types. Simply repeat - the process above for each desired type. Be sure to specify a unique - volume_backend_name and pool configuration for each type. The - - cinder.conf example included with this - documentation illustrates configuration of multiple types. - -
- -
- Testing resources - Blockbridge is freely available for testing purposes and deploys in - seconds as a Docker container. This is the same container used to run - continuous integration for OpenStack. For more information visit www.blockbridge.io. - -
- -
diff --git a/doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml b/doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml deleted file mode 100644 index 13ed6f3b09..0000000000 --- a/doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml +++ /dev/null @@ -1,122 +0,0 @@ -
- Ceph RADOS Block Device (RBD) - If you use KVM or QEMU as your hypervisor, you can configure - the Compute service to use - Ceph RADOS block devices (RBD) for volumes. - Ceph is a massively scalable, open source, distributed storage system. It is comprised of - an object store, block store, and a POSIX-compliant distributed file system. The platform - can auto-scale to the exabyte level and beyond. It runs on commodity hardware, is - self-healing and self-managing, and has no single point of failure. Ceph is in the Linux - kernel and is integrated with the OpenStack cloud operating system. Due to its open-source - nature, you can install and use this portable storage platform in public or private clouds.
- Ceph architecture - - - - - -
-
- - RADOS - Ceph is based on RADOS: Reliable Autonomic Distributed Object - Store. RADOS distributes objects across the storage cluster and - replicates objects for fault tolerance. RADOS contains the following major - components: - - - Object Storage Device (OSD) Daemon. The storage daemon - for the RADOS service, which interacts with the OSD (physical or logical storage - unit for your data). - You must run this daemon on each server in your cluster. For each OSD, you can - have an associated hard drive disk. For performance purposes, pool your hard - drive disk with raid arrays, logical volume management (LVM), or B-tree file - system (Btrfs) pooling. By default, the following pools - are created: data, metadata, and RBD. - - - Meta-Data Server (MDS). - Stores metadata. MDSs build a POSIX file system on - top of objects for Ceph clients. However, if you - do not use the Ceph file system, you do not need a - metadata server. - - - Monitor (MON). A lightweight daemon that handles all - communications with external applications and clients. It also provides a - consensus for distributed decision making in a Ceph/RADOS cluster. For instance, - when you mount a Ceph shared on a client, you point to the address of a MON - server. It checks the state and the consistency of the data. In an ideal setup, - you must run at least three ceph-mon daemons on separate - servers. - - - Ceph developers recommend XFS for production deployments, Btrfs - for testing, development, and any non-critical deployments. Btrfs has the correct - feature set and roadmap to serve Ceph in the long-term, but XFS and ext4 provide the - necessary stability for today’s deployments. - - If using Btrfs, ensure that you use the correct version - (see Ceph - Dependencies). - For more information about usable file systems, see ceph.com/ceph-storage/file-system/. - - - - Ways to store, use, and expose data - To store and access your data, you can use the following - storage systems: - - - RADOS. Use as an object, - default storage mechanism. - - - RBD. Use as a block device. - The Linux kernel RBD (RADOS block device) driver - allows striping a Linux block device over multiple - distributed object store data objects. It is - compatible with the KVM RBD image. - - - CephFS. Use as a file, - POSIX-compliant file system. - - - Ceph exposes RADOS; you can access it through the following interfaces: - - - RADOS Gateway. OpenStack Object Storage and Amazon-S3 - compatible RESTful interface (see RADOS_Gateway). - - - librados, and its related C/C++ bindings. - - - RBD and QEMU-RBD. Linux - kernel and QEMU block devices that stripe data - across multiple objects. - - - - - Driver options - The following table contains the configuration options - supported by the Ceph RADOS Block Device driver. - - Deprecation notice - The option has been - deprecated and replaced by . - - - -
diff --git a/doc/config-reference/block-storage/drivers/datera-volume-driver.xml b/doc/config-reference/block-storage/drivers/datera-volume-driver.xml deleted file mode 100644 index 9f9f5796dd..0000000000 --- a/doc/config-reference/block-storage/drivers/datera-volume-driver.xml +++ /dev/null @@ -1,21 +0,0 @@ - -
- Datera volume driver - - - - Set the following in your cinder.conf to - use the Datera volume driver: - - volume_driver = cinder.volume.drivers.datera.DateraDriver - - Use the following options to configure the volume driver: - - -
diff --git a/doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml b/doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml deleted file mode 100644 index 24b2ff1302..0000000000 --- a/doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml +++ /dev/null @@ -1,181 +0,0 @@ - -
- Dell EqualLogic volume driver - The Dell EqualLogic volume driver interacts with configured - EqualLogic arrays and supports various operations. - - Supported operations - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Clone a volume. - - - The OpenStack Block Storage service supports: - - Multiple instances of Dell EqualLogic Groups or Dell - EqualLogic Group Storage Pools and multiple pools on a single array. - - Multiple instances of Dell EqualLogic Groups or Dell - EqualLogic Group Storage Pools or multiple pools on a single array. - - - The Dell EqualLogic volume driver's ability to access the EqualLogic - Group is dependent upon the generic block storage driver's SSH settings - in the /etc/cinder/cinder.conf file (see for reference). - - The following sample /etc/cinder/cinder.conf - configuration lists the relevant settings for a typical Block Storage - service using a single Dell EqualLogic Group: - Default (single-instance) configuration -[DEFAULT] -#Required settings - -volume_driver = cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver -san_ip = IP_EQLX -san_login = SAN_UNAME -san_password = SAN_PW -eqlx_group_name = EQLX_GROUP -eqlx_pool = EQLX_POOL - -#Optional settings - -san_thin_provision = true|false -eqlx_use_chap = true|false -eqlx_chap_login = EQLX_UNAME -eqlx_chap_password = EQLX_PW -eqlx_cli_max_retries = 5 -san_ssh_port = 22 -ssh_conn_timeout = 30 -san_private_key = SAN_KEY_PATH -ssh_min_pool_conn = 1 -ssh_max_pool_conn = 5 - - In this example, replace the following variables accordingly: - - - IP_EQLX - - The IP address used to reach the Dell EqualLogic Group through - SSH. This field has no default value. - - - - SAN_UNAME - - The user name to login to the Group manager via SSH at - the san_ip. Default user name is grpadmin. - - - - SAN_PW - - The corresponding password of SAN_UNAME. - Not used when san_private_key is set. Default - password is password. - - - - EQLX_GROUP - - The group to be used for a pool where the Block Storage service - will create volumes and snapshots. Default group is group-0. - - - - EQLX_POOL - - The pool where the Block Storage service will create volumes - and snapshots. Default pool is default. This - option cannot be used for multiple pools utilized by the Block - Storage service on a single Dell EqualLogic Group. - - - - EQLX_UNAME - - The CHAP login account for each - volume in a pool, if eqlx_use_chap is set - to true. Default account name is chapadmin. - - - - EQLX_PW - - The corresponding password of EQLX_UNAME. - The default password is randomly generated in hexadecimal, so you - must set this password manually. - - - - SAN_KEY_PATH (optional) - - The filename of the private key used - for SSH authentication. This provides password-less login to the - EqualLogic Group. Not used when san_password - is set. There is no default value. - - - - In addition, enable thin provisioning for SAN volumes using the - default san_thin_provision = true - setting. - - Multi back-end Dell EqualLogic configuration - The following example shows the typical configuration for a Block - Storage service that uses two Dell EqualLogic back ends: - enabled_backends = backend1,backend2 -san_ssh_port = 22 -​ssh_conn_timeout = 30 -​san_thin_provision = true - ​ -​[backend1] -​volume_driver = cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver -​volume_backend_name = backend1 -​san_ip = IP_EQLX1 -​san_login = SAN_UNAME -san_password = SAN_PW -​eqlx_group_name = EQLX_GROUP -​eqlx_pool = EQLX_POOL - ​ -​[backend2] -​volume_driver = cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver -​volume_backend_name = backend2 -​san_ip = IP_EQLX2 -san_login = SAN_UNAME -san_password = SAN_PW -​eqlx_group_name = EQLX_GROUP -​eqlx_pool = EQLX_POOL - In this example: - - Thin provisioning for SAN volumes is enabled - (san_thin_provision = true). This is - recommended when setting up Dell EqualLogic back ends. - Each Dell EqualLogic back-end configuration - ([backend1] and [backend2]) - has the same required settings as a single back-end configuration, - with the addition of volume_backend_name. - The san_ssh_port option is set - to its default value, 22. This option sets the port used - for SSH. - The ssh_conn_timeout option is - also set to its default value, 30. This option sets the - timeout in seconds for CLI commands over SSH. - The IP_EQLX1 and IP_EQLX2 - refer to the IP addresses used to reach the Dell EqualLogic - Group of backend1 and backend2 - through SSH, respectively. - - - For information on configuring multiple back ends, see - Configure a multiple-storage back end. -
diff --git a/doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml b/doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml deleted file mode 100644 index 399e9b4433..0000000000 --- a/doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml +++ /dev/null @@ -1,144 +0,0 @@ - -
- Dell Storage Center Fibre Channel and iSCSI drivers - The Dell Storage Center volume driver interacts with configured - Storage Center arrays. - The Dell Storage Center driver manages Storage Center arrays through - Enterprise Manager. Enterprise Manager connection settings and Storage - Center options are defined in the cinder.conf file. - - - Prerequisite: Dell Enterprise Manager 2015 R1 or later must be used. - - - Supported operations - The Dell Storage Center volume driver provides the following - Cinder volume operations: - - - Create, delete, attach (map), and detach (unmap) volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - - - Extra spec options - Volume type extra specs can be used to select different Storage - Profiles. - Storage Profiles control how Storage Center manages volume data. - For a given volume, the selected Storage Profile dictates which disk - tier accepts initial writes, as well as how data progression moves data - between tiers to balance performance and cost. Predefined Storage - Profiles are the most effective way to manage data in Storage - Center. - By default, if no Storage Profile is specified in the volume extra - specs, the default Storage Profile for the user account configured for - the Block Storage driver is used. The extra spec key - storagetype:storageprofile with the value of the name - of the Storage Profile on the Storage Center can be set to allow to use - Storage Profiles other than the default. - For ease of use from the command line, spaces in Storage Profile - names are ignored. As an example, here is how to define two volume - types using the High Priority and Low - Priority Storage Profiles: - $ cinder type-create "GoldVolumeType" -$ cinder type-key "GoldVolumeType" set storagetype:storageprofile=highpriority -$ cinder type-create "BronzeVolumeType" -$ cinder type-key "BronzeVolumeType" set storagetype:storageprofile=lowpriority - - - iSCSI configuration - Use the following instructions to update the configuration - file for iSCSI: - Sample iSCSI Configuration - default_volume_type = delliscsi -enabled_backends = delliscsi - -[delliscsi] -# Name to give this storage backend -volume_backend_name = delliscsi -# The iSCSI driver to load -volume_driver = cinder.volume.drivers.dell.dell_storagecenter_iscsi.DellStorageCenterISCSIDriver -# IP address of Enterprise Manager -san_ip = 172.23.8.101 -# Enterprise Manager user name -san_login = Admin -# Enterprise Manager password -san_password = secret -# The Storage Center iSCSI IP address -iscsi_ip_address = 192.168.0.20 -# The Storage Center serial number to use -dell_sc_ssn = 64702 - -# ==Optional settings== -# The Enterprise Manager API port -dell_sc_api_port = 3033 -# Server folder to place new server definitions -dell_sc_server_folder = devstacksrv -# Volume folder to place created volumes -dell_sc_volume_folder = devstackvol/Cinder -# The iSCSI IP port -iscsi_port = 3260 - - - - - Fibre Channel configuration - Use the following instructions to update the configuration - file for fibre channel: - Sample FC configuration - default_volume_type = dellfc -enabled_backends = dellfc - -[dellfc] -# Name to give this storage backend -volume_backend_name = dellfc -# The FC driver to load -volume_driver = cinder.volume.drivers.dell.dell_storagecenter_fc.DellStorageCenterFCDriver -# IP address of Enterprise Manager -san_ip = 172.23.8.101 -# Enterprise Manager user name -san_login = Admin -# Enterprise Manager password -san_password = secret -# The Storage Center serial number to use -dell_sc_ssn = 64702 - -# Optional settings - -# The Enterprise Manager API port -dell_sc_api_port = 3033 -# Server folder to place new server definitions -dell_sc_server_folder = devstacksrv -# Volume folder to place created volumes -dell_sc_volume_folder = devstackvol/Cinder - - - - - Driver options - The following table contains the configuration options specific to the - Dell Storage Center volume driver. - - -
diff --git a/doc/config-reference/block-storage/drivers/dothill-driver.xml b/doc/config-reference/block-storage/drivers/dothill-driver.xml deleted file mode 100644 index b4ed6bc97e..0000000000 --- a/doc/config-reference/block-storage/drivers/dothill-driver.xml +++ /dev/null @@ -1,271 +0,0 @@ - -
- Dot Hill AssuredSAN Fibre Channel and iSCSI drivers - - The DotHillFCDriver and - DotHillISCSIDriver Cinder drivers allow Dot Hill arrays - to be used for block storage in OpenStack deployments. - - - System requirements - - To use the Dot Hill drivers, the following are required: - - - - Dot Hill AssuredSAN array with: - - - - iSCSI or FC host interfaces - - - - G22x firmware or later - - - - Appropriate licenses for the snapshot and copy volume - features - - - - - - Network connectivity between the OpenStack host and the array - management interfaces - - - - HTTPS or HTTP must be enabled on the array - - - - - - Supported operations - - - - Create, delete, attach, and detach volumes. - - - - Create, list, and delete volume snapshots. - - - - Create a volume from a snapshot. - - - - Copy an image to a volume. - - - - Copy a volume to an image. - - - - Clone a volume. - - - - Extend a volume. - - - - Migrate a volume with back-end assistance. - - - - Retype a volume. - - - - Manage and unmanage a volume. - - - - - - Configuring the array - - - - Verify that the array can be managed via an HTTPS connection. - HTTP can also be used if dothill_api_protocol=http - is placed into the appropriate sections of the - cinder.conf file. - - Confirm that virtual pools A and B are present if you plan to - use virtual pools for OpenStack storage. - - If you plan to use vdisks instead of virtual pools, create or - identify one or more vdisks to be used for OpenStack storage; - typically this will mean creating or setting aside one disk group for - each of the A and B controllers. - - - - Edit the cinder.conf file to define an - storage backend entry for each storage pool on the array that will be - managed by OpenStack. Each entry consists of a unique section name, - surrounded by square brackets, followed by options specified in - key=value format. - - - - The dothill_backend_name value - specifies the name of the storage pool or vdisk on the - array. - - - - The volume_backend_name option value - can be a unique value, if you wish to be able to assign volumes - to a specific storage pool on the array, or a name that's shared - among multiple storage pools to let the volume scheduler choose - where new volumes are allocated. - - - - The rest of the options will be repeated for each storage - pool in a given array: the appropriate Cinder driver name; IP - address or hostname of the array management interface; the - username and password of an array user account with - manage privileges; and the iSCSI IP addresses - for the array if using the iSCSI transport protocol. - - - - In the examples below, two backends are defined, one for pool A - and one for pool B, and a common - volume_backend_name is used so that a single volume - type definition can be used to allocate volumes from both - pools. - - - iSCSI example backend entries - - [pool-a] -dothill_backend_name = A -volume_backend_name = dothill-array -volume_driver = cinder.volume.drivers.dothill.dothill_iscsi.DotHillISCSIDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage -dothill_iscsi_ips = 10.2.3.4,10.2.3.5 - -[pool-b] -dothill_backend_name = B -volume_backend_name = dothill-array -volume_driver = cinder.volume.drivers.dothill.dothill_iscsi.DotHillISCSIDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage -dothill_iscsi_ips = 10.2.3.4,10.2.3.5 - - - - Fibre Channel example backend entries - - [pool-a] -dothill_backend_name = A -volume_backend_name = dothill-array -volume_driver = cinder.volume.drivers.dothill.dothill_fc.DotHillFCDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage - -[pool-b] -dothill_backend_name = B -volume_backend_name = dothill-array -volume_driver = cinder.volume.drivers.dothill.dothill_fc.DotHillFCDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage - - - - - If any volume_backend_name value refers to a - vdisk rather than a virtual pool, add an additional statement - dothill_backend_type = linear to that backend - entry. - - - - If HTTPS is not enabled in the array, include - dothill_api_protocol = http in each of the backend - definitions. - - - - If HTTPS is enabled, you can enable certificate verification - with the option dothill_verify_certificate=True. - You may also use the - dothill_verify_certificate_path parameter to - specify the path to a CA_BUNDLE file containing CAs other than those - in the default list. - - - - Modify the [DEFAULT] section of the - cinder.conf file to add an - enabled_backends parameter specifying the backend - entries you added, and a default_volume_type - parameter specifying the name of a volume type that you will create in - the next step. - - - [DEFAULT] section changes - - [DEFAULT] - ... -enabled_backends = pool-a,pool-b -default_volume_type = dothill - ... - - - - - Create a new volume type for each distinct - volume_backend_name value that you added to - cinder.conf. The example below assumes that the same - volume_backend_name=dothill-array option was - specified in all of the entries, and specifies that the volume type - dothill can be used to allocate volumes from any of - them. - Creating a volume type - - $ cinder type-create dothill - - $ cinder type-key dothill set - volume_backend_name=dothill-array - - - - - After modifying cinder.conf, restart the - cinder-volume service. - - - - - - Driver-specific options - - The following table contains the configuration options that are - specific to the Dot Hill drivers. - - - -
diff --git a/doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml b/doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml deleted file mode 100644 index acf163a9d6..0000000000 --- a/doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml +++ /dev/null @@ -1,134 +0,0 @@ - -
- EMC ScaleIO Block Storage driver configuration - - ScaleIO is a software-only solution that uses existing servers' local disks and LAN to create a virtual SAN that - has all of the benefits of external storage but at a fraction of the cost and complexity. Using the driver, - OpenStack Block Storage hosts can connect to a ScaleIO Storage cluster. - This section explains how to configure and connect an OpenStack block storage host to a ScaleIO storage cluster. -
- Support matrix - - - ScaleIO: Version 1.32 - - -
-
- Supported operations - - - Create, delete, clone, attach, and detach volumes - - - Create and delete volume snapshots - - - Create a volume from a snapshot - - - Copy an image to a volume - - - Copy a volume to an image - - - Extend a volume - - - Get volume statistics - - -
-
- ScaleIO Block Storage driver configuration - Edit the cinder.conf file by adding the configuration below under the - [DEFAULT] section of the file in case of a single back end or under a separate section in case - of multiple back ends (for example [ScaleIO]). The configuration file is usually located under the - following path /etc/cinder/cinder.conf. - For a configuration example, refer to the configuration example. -
- ScaleIO driver name - Configure the driver name by adding the following parameter: - volume_driver = cinder.volume.drivers.emc.scaleio.ScaleIODriver -
-
- ScaleIO MDM server IP - The ScaleIO Meta Data Manager monitors and maintains the available resources and permissions. - To retrieve the MDM server IP, use the drv_cfg --query_mdms CLI command. - Configure the MDM server IP by adding the following parameter: - san_ip = ScaleIO MDM IP -
-
- ScaleIO protection domain name - ScaleIO allows multiple protection domains (groups of SDSs that provide backup for each other). - To retrieve the available protection domains, use the scli --query_all and search - for the protection domains section. - Configure the protection domain for newly created volumes by adding the following parameter: - sio_protection_domain_name = ScaleIO Protection Domain -
-
- ScaleIO storage pool name - A ScaleIO storage pool is a set of physical devices in a protection domain. - To retrieve the available storage pools, use the scli --query_all and search - for available storage pools. - Configure the storage pool for newly created volumes by adding the following parameter: - sio_storage_pool_name = ScaleIO Storage Pool -
-
- ScaleIO storage pools - Multiple storage pools and protection domains can be listed for use by the virtual machines. - To retrieve the available storage pools, use the scli --query_all and search - for available storage pools. - Configure the available storage pools by adding the following parameter: - sio_storage_pools = Comma separated list of protection domain:storage pool name -
-
- ScaleIO user credentials - OpenStack Block Storage requires a ScaleIO user with administrative privileges. ScaleIO recommends - creating a dedicated OpenStack user account that holds an administrative user role. - Refer to the ScaleIO User Guide for details on user account management - Configure the user credentials by adding the following parameters: - san_login = ScaleIO username - san_password = ScaleIO password -
-
-
- Multiple back ends - Configuring multiple storage back ends enables you to create several back-end storage solutions that serve the same OpenStack Compute resources. - When a volume is created, the scheduler selects the appropriate back end to handle the request, according to the specified volume type. -
-
- Restarting OpenStack Block Storage - Save thecinder.conffile and restart cinder by running the following command: - $ openstack-service restart cinder-volume -
-
- Configuration example - cinder.conf example file - You can update the cinder.conf file by editing the necessary parameters as follows: - -[Default] -enabled_backends = scaleio - -[scaleio] -volume_driver = cinder.volume.drivers.emc.scaleio.ScaleIODriver -volume_backend_name = scaleio -san_ip = MDM_IP -sio_protection_domain_name = Default_domain -sio_storage_pool_name = Default_pool -sio_storage_pools = Domain1:Pool1,Domain2:Pool2 -san_login = SIO_USER -san_password = SIO_PASSWD - -
-
- Configuration options - The ScaleIO driver supports these configuration options: - -
-
diff --git a/doc/config-reference/block-storage/drivers/emc-vmax-driver.xml b/doc/config-reference/block-storage/drivers/emc-vmax-driver.xml deleted file mode 100644 index 7149d609e0..0000000000 --- a/doc/config-reference/block-storage/drivers/emc-vmax-driver.xml +++ /dev/null @@ -1,372 +0,0 @@ - -
- - EMC VMAX iSCSI and FC drivers - The EMC VMAX drivers, EMCVMAXISCSIDriver - and EMCVMAXFCDriver, support the use of EMC - VMAX storage arrays under OpenStack Block Storage. They both - provide equivalent functions and differ only in support for - their respective host attachment methods. - The drivers perform volume operations by communicating with - the backend VMAX storage. It uses a CIM client in Python - called PyWBEM to perform CIM operations over HTTP. - The EMC CIM Object Manager (ECOM) is packaged with the EMC - SMI-S provider. It is a CIM server that enables CIM clients to - perform CIM operations over HTTP by using SMI-S in the - back-end for VMAX storage operations. - The EMC SMI-S Provider supports the SNIA Storage Management - Initiative (SMI), an ANSI standard for storage management. It - supports the VMAX storage system. -
- System requirements - EMC SMI-S Provider V4.6.2.8 and higher is required. You - can download SMI-S from the - EMC's support - web site (login is required). - See the EMC SMI-S Provider - release notes for installation instructions. - EMC storage VMAX Family is supported. -
-
- Supported operations - VMAX drivers support these operations: - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - Retype a volume. - - - Create a volume from a snapshot. - - - VMAX drivers also support the following features: - - - FAST automated storage tiering policy. - - - Dynamic masking view creation. - - - Striped volume creation. - - -
-
- - Set up the VMAX drivers - - To set up the EMC VMAX drivers - - Install the python-pywbem - package for your distribution. See . - - - Download SMI-S from PowerLink and install it. - Add your VMAX arrays to SMI-S. - For information, see and the SMI-S release - notes. - - - Change configuration files. See and . - - - Configure connectivity. For FC driver, - see . - For iSCSI driver, see . - - -
- Install the <package>python-pywbem</package> - package - Install the python-pywbem - package for your distribution, as follows: - - - On Ubuntu: - # apt-get install python-pywbem - - - On openSUSE: - # zypper install python-pywbem - - - On Red Hat Enterprise Linux, CentOS, and Fedora: - # yum install pywbem - - -
-
- Set up SMI-S - You can install SMI-S on a non-OpenStack host. - Supported platforms include different flavors of - Windows, Red Hat, and SUSE Linux. SMI-S can be - installed on a physical server or a VM hosted by - an ESX server. Note that the supported hypervisor - for a VM running SMI-S is ESX only. See the EMC - SMI-S Provider release notes for more information - on supported platforms and installation instructions. - - - You must discover storage arrays on the SMI-S - server before you can use the VMAX drivers. - Follow instructions in the SMI-S release - notes. - - SMI-S is usually installed at - /opt/emc/ECIM/ECOM/bin on - Linux and C:\Program - Files\EMC\ECIM\ECOM\bin on Windows. - After you install and configure SMI-S, go to that - directory and type - TestSmiProvider.exe. - Use addsys in - TestSmiProvider.exe to add an - array. Use dv and examine the - output after the array is added. Make sure that the - arrays are recognized by the SMI-S server before using - the EMC VMAX drivers. -
-
- <filename>cinder.conf</filename> configuration - file - Make the following changes in - /etc/cinder/cinder.conf. - Add the following entries, where - 10.10.61.45 is the IP address - of the VMAX iSCSI target: - enabled_backends = CONF_GROUP_ISCSI, CONF_GROUP_FC -[CONF_GROUP_ISCSI] -iscsi_ip_address = 10.10.61.45 -volume_driver = cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver -cinder_emc_config_file = /etc/cinder/cinder_emc_config_CONF_GROUP_ISCSI.xml -volume_backend_name=ISCSI_backend -[CONF_GROUP_FC] -volume_driver = cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver -cinder_emc_config_file = /etc/cinder/cinder_emc_config_CONF_GROUP_FC.xml -volume_backend_name=FC_backend - In this example, two backend configuration groups are - enabled: CONF_GROUP_ISCSI and - CONF_GROUP_FC. Each configuration - group has a section describing unique parameters for - connections, drivers, the volume_backend_name, and the - name of the EMC-specific configuration file containing - additional settings. Note that the file name is in the - format - /etc/cinder/cinder_emc_config_[confGroup].xml. - - Once the cinder.conf and - EMC-specific configuration files have been created, cinder - commands need to be issued in order to create and - associate OpenStack volume types with the declared - volume_backend_names: - $ cinder type-create VMAX_ISCSI -$ cinder type-key VMAX_ISCSI set volume_backend_name=ISCSI_backend -$ cinder type-create VMAX_FC -$ cinder type-key VMAX_FC set volume_backend_name=FC_backend - By issuing these commands, the Block Storage volume type - VMAX_ISCSI is associated with the - ISCSI_backend, and the type VMAX_FC - is associated with the FC_backend. - Restart the - cinder-volume service. -
-
- <filename>cinder_emc_config_CONF_GROUP_ISCSI.xml - </filename> configuration file - Create the - /etc/cinder/cinder_emc_config_CONF_GROUP_ISCSI.xml - file. You do not need to restart the service - for this change. - Add the following lines to the XML file: - <?xml version="1.0" encoding="UTF-8" ?> -<EMC> - <EcomServerIp>1.1.1.1</EcomServerIp> - <EcomServerPort>00</EcomServerPort> - <EcomUserName>user1</EcomUserName> - <EcomPassword>password1</EcomPassword> - <PortGroups> - <PortGroup>OS-PORTGROUP1-PG</PortGroup> - <PortGroup>OS-PORTGROUP2-PG</PortGroup> - </PortGroups> - <Array>111111111111</Array> - <Pool>FC_GOLD1</Pool> - <FastPolicy>GOLD1</FastPolicy> -</EMC> - Where: - - - EcomServerIp and - EcomServerPort are the IP - address and port number of the ECOM server which - is packaged with SMI-S. - - - EcomUserName and - EcomPassword are - credentials for the ECOM server. - - - PortGroups supplies the - names of VMAX port groups that have been - pre-configured to expose volumes managed by this - backend. Each supplied port group should have - sufficient number and distribution of ports - (across directors and switches) as to ensure - adequate bandwidth and failure protection for the - volume connections. - PortGroups can contain - one or more port groups of either iSCSI or FC - ports. When a dynamic masking view is created by - the VMAX driver, the port group is chosen - randomly from the - PortGroup list, to evenly - distribute load across the set of groups provided. - Make sure that the - PortGroups set contains - either all FC or all iSCSI port groups (for a - given backend), as appropriate for the configured - driver (iSCSI or FC). - - - The Array tag holds - the unique VMAX array serial number. - - - The Pool tag holds the - unique pool name within a given array. For - backends not using FAST automated tiering, the - pool is a single pool that has been created by - the administrator. For backends exposing FAST - policy automated tiering, the pool is the bind - pool to be used with the FAST policy. - - - The FastPolicy tag - conveys the name of the FAST Policy to be used. - By including this tag, volumes managed by this - backend are treated as under FAST control. - Omitting the FastPolicy - tag means FAST is not enabled on the provided - storage pool. - - -
-
- FC Zoning with VMAX - Zone Manager is recommended when using the VMAX FC - driver, especially for larger configurations where - pre-zoning would be too complex and open-zoning would - raise security concerns. -
-
- iSCSI with VMAX - - - Make sure the iscsi-initiator-utils - package is installed on the host (use - apt-get, zypper, or yum, depending on Linux - flavor). - - - Verify host is able to ping VMAX iSCSI target - ports. - - -
-
-
- VMAX masking view and group naming info - - Masking view names - Masking views are dynamically created by the VMAX FC - and iSCSI drivers using the following naming - conventions: - OS-[shortHostName][poolName]-I-MV (for Masking Views using iSCSI) - OS-[shortHostName][poolName]-F-MV (for Masking Views using FC) - - - Initiator group names - For each host that is attached to VMAX volumes - using the drivers, an initiator group is created - or re-used (per attachment type). All initiators of - the appropriate type known for that host are included - in the group. At each new attach volume operation, - the VMAX driver retrieves the initiators (either - WWNNs or IQNs) from OpenStack and adds or updates the - contents of the Initiator Group as required. Names - are of the following format: - OS-[shortHostName]-I-IG (for iSCSI initiators) - OS-[shortHostName]-F-IG (for Fibre Channel initiators) - Hosts attaching to VMAX storage managed by the - OpenStack environment cannot also be attached to - storage on the same VMAX not being managed by - OpenStack. This is due to limitations on VMAX - Initiator Group membership. - - - - FA port groups - VMAX array FA ports to be used in a new masking view - are chosen from the list provided in the EMC - configuration file. - - - Storage group names - As volumes are attached to a host, they are either - added to an existing storage group (if it exists) or a - new storage group is created and the volume is then - added. Storage groups contain volumes created from a - pool (either single-pool or FAST-controlled), attached - to a single host, over a single connection type (iSCSI - or FC). Names are formed: - OS-[shortHostName][poolName]-I-SG (attached over iSCSI) - OS-[shortHostName][poolName]-F-SG (attached over Fibre Channel) - -
-
- Concatenated or striped volumes - In order to support later expansion of created volumes, - the VMAX Block Storage drivers create concatenated volumes - as the default layout. If later expansion is not required, - users can opt to create striped volumes in order to optimize - I/O performance. - Below is an example of how to create striped volumes. - First, create a volume type. Then define the extra spec for - the volume type storagetype:stripecount - representing the number of meta members in the striped - volume. The example below means that each volume created - under the GoldStriped volume type will - be striped and made up of 4 meta members. - - $ cinder type-create GoldStriped -$ cinder type-key GoldStriped set volume_backend_name=GOLD_BACKEND -$ cinder type-key GoldStriped set storagetype:stripecount=4 -
-
diff --git a/doc/config-reference/block-storage/drivers/emc-vnx-driver.xml b/doc/config-reference/block-storage/drivers/emc-vnx-driver.xml deleted file mode 100644 index f1dd39c6fb..0000000000 --- a/doc/config-reference/block-storage/drivers/emc-vnx-driver.xml +++ /dev/null @@ -1,1421 +0,0 @@ -
- - EMC VNX driver - EMC VNX driver consists of EMCCLIISCSIDriver - and EMCCLIFCDriver, and supports both iSCSI and FC protocol. - EMCCLIISCSIDriver (VNX iSCSI driver) and - EMCCLIFCDriver (VNX FC driver) are separately - based on the ISCSIDriver and FCDriver - defined in Block Storage. - -
- Overview - The VNX iSCSI driver and VNX FC driver perform the volume - operations by executing Navisphere CLI (NaviSecCLI) - which is a command line interface used for management, diagnostics, and reporting - functions for VNX. -
- System requirements - - - VNX Operational Environment for Block version 5.32 or - higher. - - - VNX Snapshot and Thin Provisioning license should be - activated for VNX. - - - Navisphere CLI v7.32 or higher is installed along with - the driver. - - -
-
- Supported operations - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Clone a volume. - - - Extend a volume. - - - Migrate a volume. - - - Retype a volume. - - - Get volume statistics. - - - Create and delete consistency groups. - - - Create, list, and delete consistency group snapshots. - - - Modify consistency groups. - - - Efficient non-disruptive volume backup. - - -
-
-
- Preparation - This section contains instructions to prepare the Block Storage - nodes to use the EMC VNX driver. You install the Navisphere - CLI, install the driver, ensure you have correct zoning - configurations, and register the driver. -
- Install Navisphere CLI - Navisphere CLI needs to be installed on all Block Storage nodes - within an OpenStack deployment. You need to download different - versions for different platforms. - - - For Ubuntu x64, DEB is available at EMC - OpenStack Github. - - - For all other variants of Linux, Navisphere CLI is available at - Downloads for VNX2 Series or - Downloads for VNX1 Series. - - - After installation, set the security level of Navisphere CLI to low: - $ /opt/Navisphere/bin/naviseccli security -certificate -setLevel low - - -
-
- Check array software - Make sure your have following software installed for certain features. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Required software
FeatureSoftware Required
AllThinProvisioning
AllVNXSnapshots
FAST cache supportFASTCache
Create volume with type compressedCompression
Create volume with type deduplicatedDeduplication
- - You can check the status of your array software in the - "Software" page of "Storage System - Properties". Here is how it looks like. - -
- Installed software on VNX - - - - - -
-
-
- Install EMC VNX driver - Both EMCCLIISCSIDriver and - EMCCLIFCDriver are included in the Block Storage - installer package: - - - emc_vnx_cli.py - - - emc_cli_fc.py (for - ) - - - emc_cli_iscsi.py (for - ) - - -
-
- Network configuration - - For FC Driver, FC zoning is properly configured between hosts and - VNX. Check for - reference. - - - For iSCSI Driver, make sure your VNX iSCSI port is accessible by - your hosts. Check for - reference. - - - You can use initiator_auto_registration=True - configuration to avoid register the ports manually. Please check - the detail of the configuration in - for reference. - - - If you are trying to setup multipath, please refer to - Multipath Setup in - . - -
-
-
- Backend configuration - Make the following changes in - /etc/cinder/cinder.conf file: - - Changes to your configuration won't take - effect until your restart your cinder service. - -
- Minimum configuration - - Here is a sample of minimum backend configuration. See following - sections for the detail of each option Replace - EMCCLIFCDriver to - EMCCLIISCSIDriver if your are using the iSCSI - driver. - - [DEFAULT] -enabled_backends = vnx_array1 - -[vnx_array1] -san_ip = 10.10.72.41 -san_login = sysadmin -san_password = sysadmin -naviseccli_path = /opt/Navisphere/bin/naviseccli -volume_driver=cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver -initiator_auto_registration=True -
-
- Multi-backend configuration - - Here is a sample of a multi-backend configuration. See following - sections for the detail of each option. Replace - EMCCLIFCDriver to - EMCCLIISCSIDriver if your are using the iSCSI - driver. - - [DEFAULT] -enabled_backends=backendA, backendB - -[backendA] -storage_vnx_pool_names = Pool_01_SAS, Pool_02_FLASH -san_ip = 10.10.72.41 -storage_vnx_security_file_dir = /etc/secfile/array1 -naviseccli_path = /opt/Navisphere/bin/naviseccli -volume_driver=cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver -initiator_auto_registration=True - -[backendB] -storage_vnx_pool_names = Pool_02_SAS -san_ip = 10.10.26.101 -san_login = username -san_password = password -naviseccli_path = /opt/Navisphere/bin/naviseccli -volume_driver=cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver -initiator_auto_registration=True - - For more details on multi-backends, see - OpenStack - Cloud Administration Guide - -
-
- Required configurations -
- IP of the VNX Storage Processors - - Specify the SP A and SP B IP to connect. - - san_ip = <IP of VNX Storage Processor A> -san_secondary_ip = <IP of VNX Storage Processor B> -
-
- VNX login credentials - - There are two ways to specify the credentials. - - - - - Use plain text username and password. - - - - - Supply for plain username and password as below. - - san_login = <VNX account with administrator role> -san_password = <password for VNX account> -storage_vnx_authentication_type = global - - Valid values for - storage_vnx_authentication_type are: - global (default), local, - ldap - - - - Use Security file - - - - This approach avoids the plain text password in your cinder - configuration file. Supply a security file as below: - - storage_vnx_security_file_dir=<path to security file> - Please check Unisphere CLI user guide or - for how to create a security file. -
-
- Path to your Unisphere CLI - - Specify the absolute path to your naviseccli. - - naviseccli_path = /opt/Navisphere/bin/naviseccli -
-
- Driver name - - - For the FC Driver, add the following option: - - - volume_driver=cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver - - - For iSCSI Driver, add following option: - - - volume_driver=cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver -
-
-
- Optional configurations -
- VNX pool names - - Specify the list of pools to be managed, separated by ','. They - should already exist in VNX. - - storage_vnx_pool_names = pool 1, pool 2 - - If this value is not specified, all pools of the array will be - used. - -
-
- Initiator auto registration - - When initiator_auto_registration=True, the - driver will automatically register initiators to all working - target ports of the VNX array during volume attaching (The - driver will skip those initiators that have already been - registered) if the option io_port_list is not - specified in cinder.conf. - - - If the user wants to register the initiators with some specific - ports but not register with the other ports, this functionality - should be disabled. - - - When a comma-separated list is given to - io_port_list, the driver will only register the - initiator to the ports specified in the list and only return - target port(s) which belong to the target ports in the - io_port_list instead of all target ports. - - - - - Example for FC ports: - - io_port_list=a-1,B-3 - - a or B is - Storage Processor, number - 1 and 3 are - Port ID. - - - - - Example for iSCSI ports: - - io_port_list=a-1-0,B-3-0 - - a or B is - Storage Processor, the - first numbers 1 and 3 - are Port ID and the - second number 0 is - Virtual Port ID - - - - - - - - Rather than de-registered, the registered ports will be - simply bypassed whatever they are in 'io_port_list' or not. - - - - - The driver will raise an exception if ports in - io_port_list are not existed in VNX - during startup. - - - - -
-
- Force delete volumes in storage group - - Some available volumes may remain in storage - group on the VNX array due to some OpenStack timeout issue. But - the VNX array do not allow the user to delete the volumes which - are in storage group. Option - force_delete_lun_in_storagegroup is - introduced to allow the user to delete the - available volumes in this tricky situation. - - - When force_delete_lun_in_storagegroup=True in - the back-end section, the driver will move the volumes out of - storage groups and then delete them if the user tries to delete - the volumes that remain in storage group on the VNX array. - - - The default value of - force_delete_lun_in_storagegroup is - False. - -
-
- Over subscription in thin provisioning - - Over subscription allows that the sum of all volumes' capacity - (provisioned capacity) to be larger than the pool's total - capacity. - - - max_over_subscription_ratio in the back-end - section is the ratio of provisioned capacity over total - capacity. - - - The default value of - max_over_subscription_ratio is 20.0, which - means the provisioned capacity can not exceed the total - capacity. If the value of this ratio is set larger than 1.0, the - provisioned capacity can exceed the total capacity. - -
-
- Storage group automatic deletion - - For volume attaching, the driver has a storage group on VNX for - each compute node hosting the vm instances which are going to - consume VNX Block Storage (using compute node's hostname as - storage group's name). All the volumes attached to the VM - instances in a compute node will be put into the storage group. - If destroy_empty_storage_group=True, the - driver will remove the empty storage group after its last volume - is detached. For data safety, it does not suggest to set - destroy_empty_storage_group=True unless the - VNX is exclusively managed by one Block Storage node because - consistent lock_path is required for operation synchronization - for this behavior. - -
-
- Initiator auto deregistration - - Enabling storage group automatic deletion is the precondition of - this function. If - initiator_auto_deregistration=True is set, - the driver will deregister all the initiators of the host after - its storage group is deleted. - -
-
- FC SAN auto zoning - - The EMC VNX FC driver supports FC SAN auto zoning when ZoneManager - is configured. Set zoning_mode to - fabric in DEFAULT section - to enable this feature. For ZoneManager configuration, please - refer to Block Storage official guide. - -
-
- Volume number threshold - - In VNX, there is a limitation on the number of pool volumes that - can be created in the system. When the limitation is reached, no - more pool volumes can be created even if there is remaining - capacity in the storage pool. In other words, if the scheduler - dispatches a volume creation request to a back end that has free - capacity but reaches the volume limitation, the creation fails. - - - The default value of - check_max_pool_luns_threshold is - False. When - check_max_pool_luns_threshold=True, the - pool-based back end will check the limit and will report 0 free - capacity to the scheduler if the limit is reached. So the scheduler - will be able to skip this kind of pool-based back end that runs out - of the pool volume number. - -
-
- iSCSI initiators - - iscsi_initiators is a dictionary of IP - addresses of the iSCSI initiator ports on OpenStack Nova/Cinder - nodes which want to connect to VNX via iSCSI. If this option is - configured, the driver will leverage this information to find an - accessible iSCSI target portal for the initiator when attaching - volume. Otherwise, the iSCSI target portal will be chosen in a - relative random way. - - - This option is only valid for iSCSI driver. - - - Here is an example. VNX will connect host1 - with 10.0.0.1 and - 10.0.0.2. And it will connect - host2 with 10.0.0.3. - - - The key name (like host1 in the example) - should be the output of command hostname. - - iscsi_initiators = {"host1":["10.0.0.1", "10.0.0.2"],"host2":["10.0.0.3"]} -
-
- Default timeout - - Specify the timeout(minutes) for operations like LUN migration, - LUN creation, etc. For example, LUN migration is a typical long - running operation, which depends on the LUN size and the load of - the array. An upper bound in the specific deployment can be set - to avoid unnecessary long wait. - - - The default value for this option is infinite. - - - Example: - - default_timeout = 10 -
-
- Max LUNs per storage group - - max_luns_per_storage_group specify the max - number of LUNs in a storage group. Default value is 255. It is - also the max value supportedby VNX. - -
-
- Ignore pool full threshold - - if ignore_pool_full_threshold is set to - True, driver will force LUN creation even if - the full threshold of pool is reached. Default to - False - -
-
-
-
- Extra spec options - - Extra specs are used in volume types created in cinder as the - preferred property of the volume. - - - The Block storage scheduler will use extra specs to find the suitable back end - for the volume and the Block storage driver will create the volume based on the - properties specified by the extra spec. - - - Use following command to create a volume type: - - $ cinder type-create "demoVolumeType" - - Use following command to update the extra spec of a volume type: - - $ cinder type-key "demoVolumeType" set provisioning:type=thin - - Volume types can also be configured in OpenStack Horizon. - - - In VNX Driver, we defined several extra specs. They are introduced - below: - -
- Provisioning type - - - - Key: provisioning:type - - - - - Possible Values: - - - - - thick - - - - - Volume is fully provisioned. - - - creating a <literal>thick</literal> volume type: - $ cinder type-create "ThickVolumeType" -$ cinder type-key "ThickVolumeType" set provisioning:type=thick thick_provisioning_support='<is> True' - - - - - thin - - - - - Volume is virtually provisioned - - - creating a <literal>thin</literal> volume type: - $ cinder type-create "ThinVolumeType" -$ cinder type-key "ThinVolumeType" set provisioning:type=thin thin_provisioning_support='<is> True' - - - - - deduplicated - - - - - Volume is thin and deduplication is - enabled. The administrator shall go to VNX to configure the - system level deduplication settings. To create a deduplicated - volume, the VNX Deduplication license must be activated on - VNX, and specify - deduplication_support=True to - let Block Storage scheduler find the proper volume back end. - - - creating a <literal>deduplicated</literal> volume type: - $ cinder type-create "DeduplicatedVolumeType" -$ cinder type-key "DeduplicatedVolumeType" set provisioning:type=deduplicated deduplication_support='<is> True' - - - - - compressed - - - - - Volume is thin and compression is enabled. - The administrator shall go to the VNX to configure the system - level compression settings. To create a compressed volume, the - VNX Compression license must be activated on VNX , and use - compression_support=True to - let Block Storage scheduler find a volume back end. VNX does - not support creating snapshots on a compressed volume. - - - creating a <literal>compressed</literal> volume type: - $ cinder type-create "CompressedVolumeType" -$ cinder type-key "CompressedVolumeType" set provisioning:type=compressed compression_support='<is> True' - - - - - Default: thick - - - - - provisioning:type replaces the old spec key - storagetype:provisioning. The latter one will - be obsoleted in the next release. If both provisioning:typeand - storagetype:provisioning are set in the volume - type, the value of provisioning:type will be - used. - -
-
- Storage tiering support - - - - Key: storagetype:tiering - - - - - Possible Values: - - - - - StartHighThenAuto - - - - - Auto - - - - - HighestAvailable - - - - - LowestAvailable - - - - - NoMovement - - - - - - - Default: StartHighThenAuto - - - - - VNX supports fully automated storage tiering which requires the - FAST license activated on the VNX. The OpenStack administrator can - use the extra spec key storagetype:tiering to - set the tiering policy of a volume and use the key - fast_support='<is> True' to let Block - Storage scheduler find a volume back end which manages a VNX with - FAST license activated. Here are the five supported values for the - extra spec key storagetype:tiering: - - - creating a volume types with tiering policy: - $ cinder type-create "ThinVolumeOnLowestAvaibleTier" -$ cinder type-key "CompressedVolumeOnLowestAvaibleTier" set provisioning:type=thin storagetype:tiering=Auto fast_support='<is> True' - - - Tiering policy can not be applied to a deduplicated volume. - Tiering policy of the deduplicated LUN align with the settings of - the pool. - -
-
- FAST cache support - - - - Key: fast_cache_enabled - - - - - Possible Values: - - - - - True - - - - - False - - - - - - - Default: False - - - - - VNX has FAST Cache feature which requires the FAST Cache license - activated on the VNX. Volume will be created on the backend with - FAST cache enabled when True is specified. - -
-
- Snap-copy - - - - Key: copytype:snap - - - - - Possible Values: - - - - - True - - - - - False - - - - - - - Default: False - - - - - The VNX driver supports snap-copy, which extremely accelerates the - process for creating a copied volume. - - - By default, the driver will do full data copy when creating a - volume from a snapshot or cloning a volume, which is - time-consuming especially for large volumes. When the snap-copy is - used, the driver will simply create a snapshot and mount it as a - volume for the 2 kinds of operations which will be instant even - for large volumes. - - - To enable this functionality, the source volume should have - copytype:snap=True in the extra specs of its - volume type. Then the new volume cloned from the source or copied - from the snapshot for the source, will be in fact a snap-copy - instead of a full copy. If a full copy is needed, retype/migration - can be used to convert the snap-copy volume to a full-copy volume - which may be time-consuming. - - $ cinder type-create "SnapCopy" -$ cinder type-key "SnapCopy" set copytype:snap=True - - User can determine whether the volume is a snap-copy volume or not - by showing its metadata. If the 'lun_type' in metadata is 'smp', - the volume is a snap-copy volume. Otherwise, it is a full-copy - volume. - - $ cinder metadata-show <volume> - - Constraints: - - - - - copytype:snap=True is not allowed in the - volume type of a consistency group. - - - - - Clone and snapshot creation are not allowed on a copied volume - created through the snap-copy before it is converted to a full - copy. - - - - - The number of snap-copy volume created from a source volume is - limited to 255 at one point in time. - - - - - The source volume which has snap-copy volume can not be - deleted. - - - -
-
- Pool name - - - - Key: pool_name - - - - - Possible Values: name of the storage pool managed by cinder - - - - - Default: None - - - - - If the user wants to create a volume on a certain storage pool in - a backend that manages multiple pools, a volume type with a extra - spec specified storage pool should be created first, then the user - can use this volume type to create the volume. - - - Creating the volume type: - $ cinder type-create "HighPerf" -$ cinder type-key "HighPerf" set pool_name=Pool_02_SASFLASH volume_backend_name=vnx_41 - -
-
- Obsoleted extra specs in Liberty - - Please avoid using following extra spec keys. - - - - - storagetype:provisioning - - - - - storagetype:pool - - - -
-
-
- Advanced features -
- Read-only volumes - - OpenStack supports read-only volumes. The following command can be - used to set a volume as read-only. - - $ cinder readonly-mode-update <volume> True - - After a volume is marked as read-only, the driver will forward the - information when a hypervisor is attaching the volume and the - hypervisor will make sure the volume is read-only. - -
-
- Efficient non-disruptive volume backup - - The default implementation in Cinder for non-disruptive volume - backup is not efficient since a cloned volume will be created - during backup. - - - The approach of efficient backup is to create a snapshot for the - volume and connect this snapshot (a mount point in VNX) to the - Cinder host for volume backup. This eliminates migration time - involved in volume clone. - - - Constraints: - - - - - Backup creation for a snap-copy volume is not allowed if the - volume status is in-use since snapshot - cannot be taken from this volume. - - - -
-
-
- Best practice -
- Multipath setup - - Enabling multipath volume access is recommended for robust data - access. The major configuration includes: - - - - - Install multipath-tools, - sysfsutils and sg3-utils - on nodes hosting Nova-Compute and Cinder-Volume services - (Please check the operating system manual for the system - distribution for specific installation steps. For Red Hat - based distributions, they should be - device-mapper-multipath, - sysfsutils and - sg3_utils). - - - - - Specify use_multipath_for_image_xfer=true - in cinder.conf for each FC/iSCSI back end. - - - - - Specify iscsi_use_multipath=True in - libvirt section of - nova.conf. This option is valid for both - iSCSI and FC driver. - - - - - For multipath-tools, here is an EMC recommended sample of - /etc/multipath.conf. - - - user_friendly_names is not specified in the - configuration and thus it will take the default value - no. It is NOT recommended to set it to - yes because it may fail operations such as VM - live migration. - - blacklist { - # Skip the files under /dev that are definitely not FC/iSCSI devices - # Different system may need different customization - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^hd[a-z][0-9]*" - devnode "^cciss!c[0-9]d[0-9]*[p[0-9]*]" - - # Skip LUNZ device from VNX - device { - vendor "DGC" - product "LUNZ" - } -} - -defaults { - user_friendly_names no - flush_on_last_del yes -} - -devices { - # Device attributed for EMC CLARiiON and VNX series ALUA - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy group_by_prio - path_selector "round-robin 0" - path_checker emc_clariion - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio alua - failback immediate - } -} - - When multipath is used in OpenStack, multipath faulty devices may - come out in Nova-Compute nodes due to different issues - (Bug - 1336683 is a typical example). - - - A solution to completely avoid faulty devices has not been found - yet. faulty_device_cleanup.py mitigates this - issue when VNX iSCSI storage is used. Cloud administrators can - deploy the script in all Nova-Compute nodes and use a CRON job to - run the script on each Nova-Compute node periodically so that - faulty devices will not stay too long. Please refer to: - - VNX faulty device cleanup for detailed usage and the script. - -
-
-
- Restrictions and limitations -
- iSCSI port cache - - EMC VNX iSCSI driver caches the iSCSI ports information, so that - the user should restart the cinder-volume service or wait for - seconds (which is configured by - periodic_interval in - cinder.conf) before any volume attachment - operation after changing the iSCSI port configurations. Otherwise - the attachment may fail because the old iSCSI port configurations - were used. - -
-
- No extending for volume with snapshots - - VNX does not support extending the thick volume which has a - snapshot. If the user tries to extend a volume which has a - snapshot, the status of the volume would change to - error_extending. - -
-
- Limitations for deploying cinder on computer node - - It is not recommended to deploy the driver on a compute node if - cinder upload-to-image --force True is used - against an in-use volume. Otherwise, - cinder upload-to-image --force True will - terminate the data access of the vm instance to the volume. - -
-
- Storage group with host names in VNX - - When the driver notices that there is no existing storage group - that has the host name as the storage group name, it will create - the storage group and also add the compute node's or Block Storage - nodes' registered initiators into the storage group. - - - If the driver notices that the storage group already exists, it - will assume that the registered initiators have also been put into - it and skip the operations above for better performance. - - - It is recommended that the storage administrator does not create - the storage group manually and instead relies on the driver for - the preparation. If the storage administrator needs to create the - storage group manually for some special requirements, the correct - registered initiators should be put into the storage group as well - (otherwise the following volume attaching operations will fail ). - -
-
- EMC storage-assisted volume migration - - EMC VNX driver supports storage-assisted volume migration, when - the user starts migrating with - cinder migrate --force-host-copy False <volume_id> <host> - or - cinder migrate <volume_id> <host>, - cinder will try to leverage the VNX's native volume migration - functionality. - - - In following scenarios, VNX storage-assisted volume migration will - not be triggered: - - - - - Volume migration between back ends with different storage - protocol, ex, FC and iSCSI. - - - - - Volume is to be migrated across arrays. - - - -
-
-
- Appendix -
- Authenticate by security file - - VNX credentials are necessary when the driver connects to the VNX - system. Credentials in global, local and ldap scopes are - supported. There are two approaches to provide the credentials: - - - The recommended one is using the Navisphere CLI security file to - provide the credentials which can get rid of providing the plain - text credentials in the configuration file. Following is the - instruction on how to do this. - - - - - Find out the Linux user id of the - cinder-volume processes. Assuming - the service cinder-volume is running - by the account cinder. - - - - - Run su as root user. - - - - - In /etc/passwd, change - cinder:x:113:120::/var/lib/cinder:/bin/false - to - cinder:x:113:120::/var/lib/cinder:/bin/bash - (This temporary change is to make step 4 work.) - - - - - Save the credentials on behave of cinder - user to a security file (assuming the array credentials are - admin/admin in global - scope). In the command below, the '-secfilepath' switch is - used to specify the location to save the security file. - - # su -l cinder -c '/opt/Navisphere/bin/naviseccli -AddUserSecurity -user admin -password admin -scope 0 -secfilepath <location>' - - - - Change - cinder:x:113:120::/var/lib/cinder:/bin/bash - back to - cinder:x:113:120::/var/lib/cinder:/bin/false - in /etc/passwd - - - - - Remove the credentials options san_login, - san_password and - storage_vnx_authentication_type from - cinder.conf. (normally it is - /etc/cinder/cinder.conf). Add option - storage_vnx_security_file_dir and set its - value to the directory path of your security file generated in - step 4. Omit this option if -secfilepath is - not used in step 4. - - - - - Restart the cinder-volume service - to validate the change. - - - -
-
- Register FC port with VNX - - This configuration is only required when - initiator_auto_registration=False. - - - To access VNX storage, the compute nodes should be registered on - VNX first if initiator auto registration is not enabled. - - - To perform "Copy Image to Volume" and "Copy Volume - to Image" operations, the nodes running the - cinder-volume - service (Block Storage nodes) must be registered with the VNX as - well. - - - The steps mentioned below are for the compute nodes. Please follow - the same steps for the Block Storage nodes also (The steps can be - skipped if initiator auto registration is enabled). - - - - Assume 20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2 - is the WWN of a FC initiator port name of the compute node whose - hostname and IP are myhost1 and - 10.10.61.1. Register - 20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2 - in Unisphere: - - Login to Unisphere, go to - FNM0000000000->Hosts->Initiators. - - Refresh and wait until the initiator - 20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2 - with SP Port A-1 appears. - Click the Register button, - select CLARiiON/VNX and enter the - hostname (which is the output of the linux command - hostname) and IP address: - - - Hostname : myhost1 - - - IP : 10.10.61.1 - - - Click Register - - - - Then host 10.10.61.1 will - appear under Hosts->Host List - as well. - - - Register the wwn with more ports if needed. - -
-
- Register iSCSI port with VNX - - This configuration is only required when - initiator_auto_registration=False. - - - To access VNX storage, the compute nodes should be registered on - VNX first if initiator auto registration is not enabled. - - - To perform "Copy Image to Volume" and "Copy Volume - to Image" operations, the nodes running the cinder-volume - service (Block Storage nodes) must be registered with the VNX as - well. - - - The steps mentioned below are for the compute nodes. Please follow - the same steps for the Block Storage nodes also (The steps can be - skipped if initiator auto registration is enabled). - - - On the compute node with IP address - 10.10.61.1 and hostname myhost1, - execute the following commands (assuming 10.10.61.35 - is the iSCSI target): - - Start the iSCSI initiator service on the node - # /etc/init.d/open-iscsi start - Discover the iSCSI target portals on VNX - # iscsiadm -m discovery -t st -p 10.10.61.35 - Enter /etc/iscsi - # cd /etc/iscsi - Find out the iqn of the node - # more initiatorname.iscsi - - - Login to VNX from the compute node using the - target corresponding to the SPA port: - # iscsiadm -m node -T iqn.1992-04.com.emc:cx.apm01234567890.a0 -p 10.10.61.35 -l - - Assume iqn.1993-08.org.debian:01:1a2b3c4d5f6g - is the initiator name of the compute node. Register - iqn.1993-08.org.debian:01:1a2b3c4d5f6g in - Unisphere: - - Login to Unisphere, go to - FNM0000000000->Hosts->Initiators - . - Refresh and wait until the initiator - iqn.1993-08.org.debian:01:1a2b3c4d5f6g - with SP Port A-8v0 appears. - Click the Register button, - select CLARiiON/VNX and enter the - hostname (which is the output of the linux command - hostname) and IP address: - - - Hostname : myhost1 - - - IP : 10.10.61.1 - - - Click Register - - - - Then host 10.10.61.1 will - appear under Hosts->Host List - as well. - - - Logout iSCSI on the node: - # iscsiadm -m node -u - - Login to VNX from the compute node using the - target corresponding to the SPB port: - # iscsiadm -m node -T iqn.1992-04.com.emc:cx.apm01234567890.b8 -p 10.10.61.36 -l - - In Unisphere register the initiator with the - SPB port. - Logout iSCSI on the node: - # iscsiadm -m node -u - - Register the iqn with more ports if needed. - -
-
-
diff --git a/doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml b/doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml deleted file mode 100644 index b23f03eb91..0000000000 --- a/doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml +++ /dev/null @@ -1,151 +0,0 @@ - -
- EMC XtremIO Block Storage driver configuration - - The high performance XtremIO All Flash Array (AFA) offers Block Storage services to OpenStack. - Using the driver, OpenStack Block Storage hosts can connect to an XtermIO Storage cluster. - This section explains how to configure and connect an OpenStack block storage host to an XtremIO storage cluster. -
- Support matrix - - - Xtremapp: Version 3.0 and 4.0 - - -
-
- Supported operations - - - Create, delete, clone, attach, and detach volumes - - - Create and delete volume snapshots - - - Create a volume from a snapshot - - - Copy an image to a volume - - - Copy a volume to an image - - - Extend a volume - - - Manage and unmanage a volume - - - Get volume statistics - - -
-
- XtremIO Block Storage driver configuration - Edit the cinder.conf file by adding the configuration below under the - [DEFAULT] section of the file in case of a single back end or under a separate section in case - of multiple back ends (for example [XTREMIO]). The configuration file is usually located under the - following path /etc/cinder/cinder.conf. - For a configuration example, refer to the configuration example. -
- XtremIO driver name - Configure the driver name by adding the following parameter: - - - For iSCSI volume_driver = cinder.volume.drivers.emc.xtremio.XtremIOIscsiDriver - - - For Fibre Channel volume_driver = cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver - - -
-
- XtremIO management server (XMS) IP - To retrieve the management IP, use the show-xms CLI command. - Configure the management IP by adding the following parameter: - san_ip = XMS Management IP -
-
- XtremIO cluster name - In XtremIO version 4.0, a single XMS can manage multiple cluster back ends. In such setups, the administrator is required - to specify the cluster name (in addition to the XMS IP). Each cluster must be defined as a separate back end. - To retrieve the Cluster Name, run the show-clusters CLI command. - Configure the cluster name by adding the xtremio_cluster_name = Cluster-Name - When a single cluster is managed in XtremIO version 4.0, the cluster name is not required. -
-
- XtremIO user credentials - OpenStack Block Storage requires an XtremIO XMS user with administrative privileges. XtremIO recommends - creating a dedicated OpenStack user account that holds an administrative user role. - Refer to the XtremIO User Guide for details on user account management - Create an XMS account using either the XMS GUI or the add-user-accountCLI command. - Configure the user credentials by adding the following parameters: - san_login = XMS username - san_password = XMS username password -
-
-
- Multiple back ends - Configuring multiple storage back ends enables you to create several back-end storage solutions that serve the same OpenStack Compute resources. - When a volume is created, the scheduler selects the appropriate back end to handle - the request, according to the specified volume type. -
-
- Setting thin provisioning and multipathing parameters - To support thin provisioning and multipathing in the XtremIO Array, the following - parameters from the Nova and Cinder configuration files should be modified as follows: - - Thin Provisioning - All XtremIO volumes are thin provisioned. The default value of 20 should be maintained - for the max_over_subscription_ratio parameter. - The use_cow_images parameter in thenova.conffile should be - set to False as follows: - use_cow_images = false - - Multipathing - The use_multipath_for_image_xfer parameter in thecinder.conf - file should be set to True as follows: - use_multipath_for_image_xfer = true - - -
-
- Restarting OpenStack Block Storage - Save thecinder.conffile and restart cinder by running the following command: - $ openstack-service restart cinder-volume -
-
- Configuring CHAP - The XtremIO Block Storage driver supports CHAP initiator authentication. If CHAP initiator authentication - is required, set the CHAP Authentication mode to initiator. - To set the CHAP initiator mode using CLI, run the following CLI command: - $ modify-chap chap-authentication-mode=initiator - The CHAP initiator mode can also be set via the XMS GUI - Refer to XtremIO User Guide for details on CHAP configuration via GUI and CLI. - The CHAP initiator authentication credentials (username and password) are generated - automatically by the Block Storage driver. Therefore, there is no need to configure the initial CHAP - credentials manually in XMS. -
-
- Configuration example - cinder.conf example file - You can update the cinder.conf file by editing the necessary parameters as follows: - -[Default] -enabled_backends = XtremIO - -[XtremIO] -volume_driver = cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver -san_ip = XMS_IP -xtremio_cluster_name = Cluster01 -san_login = XMS_USER -san_password = XMS_PASSWD -volume_backend_name = XtremIOAFA -
-
diff --git a/doc/config-reference/block-storage/drivers/figures/blockbridge/bb-cinder-fig1.png b/doc/config-reference/block-storage/drivers/figures/blockbridge/bb-cinder-fig1.png deleted file mode 100644 index 022d3652a1..0000000000 Binary files a/doc/config-reference/block-storage/drivers/figures/blockbridge/bb-cinder-fig1.png and /dev/null differ diff --git a/doc/config-reference/block-storage/drivers/glusterfs-driver.xml b/doc/config-reference/block-storage/drivers/glusterfs-driver.xml deleted file mode 100644 index 5e4ebe26b7..0000000000 --- a/doc/config-reference/block-storage/drivers/glusterfs-driver.xml +++ /dev/null @@ -1,29 +0,0 @@ -
- GlusterFS driver - GlusterFS is an open-source scalable distributed file system - that is able to grow to petabytes and beyond in size. More - information can be found on Gluster's - homepage. - This driver enables the use of GlusterFS in a similar - fashion as NFS. It supports basic volume operations, including - snapshot/clone. - - You must use a Linux kernel of version 3.4 or greater - (or version 2.6.32 or greater in Red Hat Enterprise Linux/CentOS 6.3+) when - working with Gluster-based volumes. See Bug 1177103 for more information. - - To use Block Storage with GlusterFS, first set the - volume_driver in - cinder.conf: - volume_driver=cinder.volume.drivers.glusterfs.GlusterfsDriver - The following table contains the configuration options - supported by the GlusterFS driver. - -
diff --git a/doc/config-reference/block-storage/drivers/hds-hnas-driver.xml b/doc/config-reference/block-storage/drivers/hds-hnas-driver.xml deleted file mode 100644 index 1bc8c33cd3..0000000000 --- a/doc/config-reference/block-storage/drivers/hds-hnas-driver.xml +++ /dev/null @@ -1,721 +0,0 @@ - -%openstack; -]> -
- HDS HNAS iSCSI and NFS driver - - This OpenStack Block Storage volume driver provides iSCSI and NFS - support for - Hitachi NAS Platform Models 3080, 3090, 4040, 4060, 4080 - and 4100. -
- Supported operations - The NFS and iSCSI drivers support these operations: - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - Get volume statistics. - - - Manage and unmanage a volume. - - -
-
- HNAS storage requirements - - Before using iSCSI and NFS services, use the HNAS configuration and - management GUI (SMU) or SSC CLI to create storage pool(s), file system(s), - and assign an EVS. Make sure that the file system used is not - created as a replication target. Additionally: - - - For NFS: - - - Create NFS exports, choose a path for them (it must - be different from "/") and set the Show - snapshots option to hide and disable - access. - - - Also, in the "Access Configuration" set the option norootsquash - , e.g. so - HNAS cinder driver can change the permissions of its volumes. - - - In order to use the hardware accelerated features of - NFS HNAS, we recommend setting max-nfs-version - to 3. - Refer to HNAS command line reference to see how to - configure this option. - - - - For iSCSI: - - You need to set an iSCSI domain. - - - -
-
- Block storage host requirements - The HNAS driver is supported for Red Hat Enterprise Linux - OpenStack Platform, SUSE OpenStack Cloud, and Ubuntu OpenStack. The - following packages must be installed: - - - nfs-utils for Red Hat Enterprise - Linux OpenStack Platform - - - nfs-client for SUSE OpenStack Cloud - - - - - nfs-common, libc6-i386 - for Ubuntu OpenStack - - - - - If you are not using SSH, you need the HDS SSC to - communicate with an HNAS array using the SSC - commands. This utility package is available - in the RPM package distributed with the hardware - through physical media or it can be manually - copied from the SMU to the Block Storage host. - - - -
-
- Package installation - - If you are installing the driver from a RPM or DEB package, - follow the steps bellow: - Install the dependencies: - In Red Hat: - # yum install nfs-utils nfs-utils-lib - Or in Ubuntu: - # apt-get install nfs-common - Or in SUSE: - # zypper install nfs-client - If you are using Ubuntu 12.04, you also need to install - libc6-i386 - # apt-get install libc6-i386 - - Configure the driver as described in the "Driver - configuration" section. - Restart all cinder services (volume, scheduler and - backup). - -
-
- Driver configuration - The HDS driver supports the concept of differentiated - services (also referred as quality of service) by mapping - volume types to services provided through HNAS. - HNAS supports a variety of storage options and file - system capabilities, which are selected through the definition - of volume types and the use of multiple back ends. The driver - maps up to four volume types into separated exports or file - systems, and can support any number if using multiple back - ends. - The configuration for the driver is read from an - XML-formatted file (one per back end), which you need to create - and set its path in the cinder.conf configuration - file. Below are the configuration needed in - the cinder.conf configuration file - - The configuration file location may differ. - : - - [DEFAULT] -enabled_backends = hnas_iscsi1, hnas_nfs1 - For HNAS iSCSI driver create this section: - [hnas_iscsi1] -volume_driver = cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver -hds_hnas_iscsi_config_file = /path/to/config/hnas_config_file.xml -volume_backend_name = HNAS-ISCSI - For HNAS NFS driver create this section: - [hnas_nfs1] -volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver -hds_hnas_nfs_config_file = /path/to/config/hnas_config_file.xml -volume_backend_name = HNAS-NFS - The XML file has the following format: - - <?xml version = "1.0" encoding = "UTF-8" ?> - <config> - <mgmt_ip0>172.24.44.15</mgmt_ip0> - <hnas_cmd>ssc</hnas_cmd> - <chap_enabled>False</chap_enabled> - <ssh_enabled>False</ssh_enabled> - <cluster_admin_ip0>10.1.1.1</cluster_admin_ip0> - <username>supervisor</username> - <password>supervisor</password> - <svc_0> - <volume_type>default</volume_type> - <iscsi_ip>172.24.44.20</iscsi_ip> - <hdp>fs01-husvm</hdp> - </svc_0> - <svc_1> - <volume_type>platinum</volume_type> - <iscsi_ip>172.24.44.20</iscsi_ip> - <hdp>fs01-platinum</hdp> - </svc_1> - </config> -
-
- HNAS volume driver XML configuration options - An OpenStack Block Storage node using HNAS drivers can have up to - four services. Each service is defined by a svc_n - tag (svc_0, svc_1, - svc_2, or svc_3 - - There is no relative precedence or weight among these - four labels. - , for example). These are the configuration options - available for each service label: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Configuration options for service labels
OptionTypeDefaultDescription
- - Requireddefault - - When a create_volume call with - a certain volume type happens, the volume type will - try to be matched up with this tag. In each - configuration file you must define the - default volume type in the service - labels and, if no volume type is specified, the - default is used. Other labels - are case sensitive and should match exactly. If no - configured volume types match the incoming - requested type, an error occurs in the volume - creation. - -
- - Required only for iSCSI - - An iSCSI IP address dedicated to the service. - -
- - Required - - For iSCSI driver: virtual file system label - associated with the service. - - - For NFS driver: path to the volume - (<ip_address>:/<path>) associated with - the service. - - - Additionally, this entry must be added in the file - used to list available NFS shares. This file is - located, by default, in - /etc/cinder/nfs_shares or you - can specify the location in the - nfs_shares_config option in the - cinder.conf configuration file. - -
- - These are the configuration options available to the - config section of the XML config file: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Configuration options
OptionTypeDefaultDescription
- Required - - Management Port 0 IP address. Should be the IP - address of the "Admin" EVS. - -
- Optionalssc - - Command to communicate to HNAS array. - -
- Optional (iSCSI only)True - - Boolean tag used to enable CHAP authentication - protocol. - -
- Requiredsupervisor - It's always required on HNAS. -
- Requiredsupervisor - Password is always required on HNAS. -
- - - - - Optional - - - (at least one label has to be defined) - - - - Service labels: these four predefined - names help four different sets of - configuration options. Each can specify - HDP and a unique volume type. - -
- - - - - Optional if - is True - - - - The address of HNAS cluster admin. - -
- - - - - Optional - - False - - - Enables SSH authentication between Block Storage - host and the SMU. - -
- - - - - Required if ssh_enabled is - True - - False - - - Path to the SSH private key used to authenticate - in HNAS SMU. The public key must be uploaded to - HNAS SMU using ssh-register-public-key - (this is an SSH subcommand). Note that - copying the public key HNAS using - ssh-copy-id doesn't work - properly as the SMU periodically wipe out those - keys. - -
-
-
- Service labels - - HNAS driver supports differentiated types of service using the service - labels. It is possible to create up to four types of them, as gold, - platinum, silver and ssd, for example. - - - After creating the services in the XML configuration file, you - must configure one volume_type per service. - Each volume_type must have the metadata - service_label with the same name configured in the - <volume_type> section of that - service. If this is not set, OpenStack Block Storage will - schedule the volume creation to the pool with largest available - free space or other criteria configured in volume filters. - - -$ cinder type-create default -$ cinder type-key default set service_label=default -$ cinder type-create platinum-tier -$ cinder type-key platinum set service_label=platinum - -
-
- Multi-back-end configuration - - If you use multiple back ends and intend to enable the creation of - a volume in a specific back end, you must configure volume types to - set the volume_backend_name option to the - appropriate back end. Then, create volume_type - configurations with the same volume_backend_name - . - - $ cinder type-create 'iscsi' -$ cinder type-key 'iscsi' set volume_backend_name = 'HNAS-ISCSI' -$ cinder type-create 'nfs' -$ cinder type-key 'nfs' set volume_backend_name = 'HNAS-NFS' - - You can deploy multiple OpenStack HNAS drivers instances that each - control a separate HNAS array. Each service (svc_0, svc_1, - svc_2, svc_3) on the instances need to have a volume_type - and service_label metadata associated with it. If no metadata is - associated with a pool, OpenStack Block Storage filtering algorithm - selects the pool with the largest available free space. - -
-
- SSH configuration - - Instead of using SSC on the Block Storage host - and store its credential on the XML configuration file, HNAS driver - supports SSH authentication. To configure that: - - - - - If you don't have a pair of public keys already generated, - create it in the Block Storage host (leave the pass-phrase - empty): - - $ mkdir -p /opt/hds/ssh -$ ssh-keygen -f /opt/hds/ssh/hnaskey - - - - Change the owner of the key to cinder - (or the user the volume service will be run): - - # chown -R cinder.cinder /opt/hds/ssh - - - - Create the directory "ssh_keys" in the SMU server: - - $ ssh [manager|supervisor]@<smu-ip> 'mkdir -p /var/opt/mercury-main/home/[manager|supervisor]/ssh_keys/' - - - - Copy the public key to the "ssh_keys" directory: - - $ scp /opt/hds/ssh/hnaskey.pub [manager|supervisor]@<smu-ip>:/var/opt/mercury-main/home/[manager|supervisor]/ssh_keys/ - - - - Access the SMU server: - - $ ssh [manager|supervisor]@<smu-ip> - - - - Run the command to register the SSH keys: - - $ ssh-register-public-key -u [manager|supervisor] -f ssh_keys/hnaskey.pub - - - - Check the communication with HNAS in the Block Storage host: - - $ ssh -i /opt/hds/ssh/hnaskey [manager|supervisor]@<smu-ip> 'ssc <cluster_admin_ip0> df -a' - - - - <cluster_admin_ip0> is "localhost" for - single node deployments. This should return a list of available - file systems on HNAS. - -
-
- Editing the XML config file: - - - - Set the "username". - - - - - Enable SSH adding the line "<ssh_enabled> - True</ssh_enabled>" under - "<config>" section. - - - - - Set the private key path: "<ssh_private_key> - /opt/hds/ssh/hnaskey</ssh_private_key>" under - "<config>" section. - - - - - If the HNAS is in a multi-cluster configuration set - "<cluster_admin_ip0>" to the cluster - node admin IP. In a single node HNAS, leave it empty. - - - - - Restart cinder services. - - - - Note that copying the public key HNAS using ssh-copy-id doesn't work properly as the SMU periodically wipe out those keys. -
-
- Manage and unmanage - - The manage and unmanage are two new API extensions that add some new features to the driver. The manage action on an existing volume is very similar to a volume creation. It creates a volume entry on OpenStack Block Storage DB, but instead of creating a new volume in the back end, it only adds a 'link' to an existing volume. Volume name, description, volume_type, metadata and availability_zone are supported as in a normal volume creation. - - - The unmanage action on an existing volume removes the volume from the OpenStack Block Storage DB, but keeps the actual volume in the back-end. From an OpenStack Block Storage perspective the volume would be deleted, but it would still exist for outside use. - - - How to Manage: - On the Dashboard: - For NFS: - - - Under the tab System -> Volumes choose the option [ + Manage Volume ] - - - - Fill the fields Identifier, Host and Volume Type with volume information to be managed: - - - Identifier: ip:/type/volume_name Example: 172.24.44.34:/silver/volume-test - - - Host: host@backend-name#pool_name Example: ubuntu@hnas-nfs#test_silver - - - Volume Name: volume_name Example: volume-test - - - Volume Type: choose a type of volume Example: silver - - - - - - For iSCSI: - - - Under the tab System -> Volumes choose the option [ + Manage Volume ] - - - - Fill the fields Identifier, Host, Volume Name and Volume Type with volume information to be managed: - - - Identifier: filesystem-name/volume-name Example: filesystem-test/volume-test - - - Host: host@backend-name#pool_name Example: ubuntu@hnas-iscsi#test_silver - - - Volume Name: volume_name Example: volume-test - - - Volume Type: choose a type of volume Example: silver - - - - - - By CLI: - $ cinder --os-volume-api-version 2 manage [--source-name <source-name>][--id-type <id-type>] - [--name <name>][--description <description>][--volume-type <volume-type>] - [--availability-zone <availability-zone>][--metadata [<key=value> [<key=value> ...]]][--bootable] - <host> [<key=value> [<key=value> ...]] - Example: - For NFS: - $ cinder --os-volume-api-version 2 manage --name <volume-test> --volume-type <silver> - --source-name <172.24.44.34:/silver/volume-test> <ubuntu@hnas-nfs#test_silver> - For iSCSI: - $ cinder --os-volume-api-version 2 manage --name <volume-test> --volume-type <silver> - --source-name <filesystem-test/volume-test> <ubuntu@hnas-iscsi#test_silver> - - - How to Unmanage: - On Dashboard: - - - Under the tab [ System -> Volumes ] choose a volume - - - On the volume options, choose [ +Unmanage Volume ] - - - Check the data and confirm. - - - By CLI: - $ cinder --os-volume-api-version 2 unmanage <volume> - Example: - $ cinder --os-volume-api-version 2 unmanage <voltest> - -
-
- Additional notes - - - - The get_volume_stats() function always - provides the available capacity based on the - combined sum of all the HDPs that are used in these - services labels. - - - - - After changing the configuration on the storage, - the OpenStack Block Storage driver must be - restarted. - - - - - On Red Hat, if the system is configured to use SELinux, you - need to set for NFS - driver work properly. - - # setsebool -P virt_use_nfs on - - - - It is not possible to manage a volume if there is a slash - ('/') or a colon (':') on the volume name. - - - -
-
- diff --git a/doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml b/doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml deleted file mode 100644 index 2787399bea..0000000000 --- a/doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml +++ /dev/null @@ -1,193 +0,0 @@ - -%openstack; -]> -
- Hitachi storage volume driver - Hitachi storage volume driver provides iSCSI and Fibre Channel - support for Hitachi storages. -
- System requirements - Supported storages: - - - Hitachi Virtual Storage Platform G1000 (VSP G1000) - - - Hitachi Virtual Storage Platform (VSP) - - - Hitachi Unified Storage VM (HUS VM) - - - Hitachi Unified Storage 100 Family (HUS 100 Family) - - - Required software: - - - RAID Manager Ver 01-32-03/01 or later for VSP G1000/VSP/HUS VM - - - Hitachi Storage Navigator Modular 2 (HSNM2) Ver 27.50 or later for HUS 100 Family - - - - HSNM2 needs to be installed under /usr/stonavm. - - Required licenses: - - - Hitachi In-System Replication Software for VSP G1000/VSP/HUS VM - - - (Mandatory) ShadowImage in-system replication for HUS 100 Family - - - (Optional) Copy-on-Write Snapshot for HUS 100 Family - - - Additionally, the pexpect package is required. -
-
- Supported operations - - - Create, delete, attach and detach volumes. - - - Create, list and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy a volume to an image. - - - Copy an image to a volume. - - - Clone a volume. - - - Extend a volume. - - - Get volume statistics. - - -
-
- Configuration - - Set up Hitachi storage - You need to specify settings as described below. For details about each step, see the user's guide of the storage device. Use a storage administrative software such as Storage Navigator to set up the storage device so that LDEVs and host groups can be created and deleted, and LDEVs can be connected to the server and can be asynchronously copied. - - - Create a Dynamic Provisioning pool. - - - Connect the ports at the storage to the Controller node and Compute nodes. - - - For VSP G1000/VSP/HUS VM, set "port security" to "enable" for the ports at the storage. - - - For HUS 100 Family, set "Host Group security"/"iSCSI target security" to "ON" for the ports at the storage. - - - For the ports at the storage, create host groups (iSCSI targets) whose names begin with HBSD- for the Controller node and each Compute node. Then register a WWN (initiator IQN) for each of the Controller node and Compute nodes. - - - For VSP G1000/VSP/HUS VM, perform the following: - - - Create a storage device account belonging to the Administrator User Group. -(To use multiple storage devices, create the same account name for all the target storage devices, and specify the same resource group and permissions.) - - - Create a command device (In-Band), and set user authentication to ON. - - - Register the created command device to the host group for the Controller node. - - - To use the Thin Image function, create a pool for Thin Image. - - - - - For HUS 100 Family, perform the following: - - - Use the command auunitaddauto to register the unit name and controller of the storage device to HSNM2. - - - When connecting via iSCSI, if you are using CHAP certification, specify the same user and password as that used for the storage port. - - - - - - - Set up Hitachi Gigabit Fibre Channel adaptor - Change a parameter of the hfcldd driver and update - the initram file if Hitachi Gigabit Fibre Channel - adaptor is used. - # /opt/hitachi/drivers/hba/hfcmgr -E hfc_rport_lu_scan 1 -# dracut -f initramfs-KERNEL_VERSION.img KERNEL_VERSION -# reboot - - - Set up Hitachi storage volume driver - - - Create directory. - # mkdir /var/lock/hbsd -# chown cinder:cinder /var/lock/hbsd - - - Create "volume type" and "volume key". - This example shows that HUS100_SAMPLE is created - as "volume type" and hus100_backend is registered - as "volume key". - $ cinder type-create HUS100_SAMPLE -$ cinder type-key HUS100_SAMPLE set volume_backend_name=hus100_backend - - Please specify any identical "volume type" name and "volume key". - - To confirm the created "volume type", please - execute the following command: - $ cinder extra-specs-list - - - Edit /etc/cinder/cinder.conf as follows. - If you use Fibre Channel: - volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver - If you use iSCSI: - volume_driver = cinder.volume.drivers.hitachi.hbsd_iscsi.HBSDISCSIDriver - Also, set created by cinder type-key - volume_backend_name = hus100_backend - This table shows configuration options for Hitachi storage volume driver. - - - - Restart Block Storage service. - - When the startup is done, - "MSGID0003-I: The storage backend can be used." - is output into /var/log/cinder/volume.log as follows. - - 2014-09-01 10:34:14.169 28734 WARNING cinder.volume.drivers.hitachi. -hbsd_common [req-a0bb70b5-7c3f-422a-a29e-6a55d6508135 None None] -MSGID0003-I: The storage backend can be used. (config_group: -hus100_backend) - - - -
-
diff --git a/doc/config-reference/block-storage/drivers/hp-msa-driver.xml b/doc/config-reference/block-storage/drivers/hp-msa-driver.xml deleted file mode 100644 index 19d689c76b..0000000000 --- a/doc/config-reference/block-storage/drivers/hp-msa-driver.xml +++ /dev/null @@ -1,266 +0,0 @@ - -
- HP MSA Fibre Channel and iSCSI drivers - - The HPMSAFCDriver and - HPMSAISCSIDriver Cinder drivers allow HP MSA 2040 or 1040 arrays - to be used for block storage in OpenStack deployments. - - - System requirements - - To use the HP MSA drivers, the following are required: - - - - HP MSA 2040 or 1040 array with: - - - - iSCSI or FC host interfaces - - - - G22x firmware or later - - - - - - Network connectivity between the OpenStack host and the array - management interfaces - - - - HTTPS or HTTP must be enabled on the array - - - - - - Supported operations - - - - Create, delete, attach, and detach volumes. - - - - Create, list, and delete volume snapshots. - - - - Create a volume from a snapshot. - - - - Copy an image to a volume. - - - - Copy a volume to an image. - - - - Clone a volume. - - - - Extend a volume. - - - - Migrate a volume with back-end assistance. - - - - Retype a volume. - - - - Manage and unmanage a volume. - - - - - - Configuring the array - - - - Verify that the array can be managed via an HTTPS connection. - HTTP can also be used if hpmsa_api_protocol=http - is placed into the appropriate sections of the - cinder.conf file. - - Confirm that virtual pools A and B are present if you plan to - use virtual pools for OpenStack storage. - - If you plan to use vdisks instead of virtual pools, create or - identify one or more vdisks to be used for OpenStack storage; - typically this will mean creating or setting aside one disk group for - each of the A and B controllers. - - - - Edit the cinder.conf file to define an - storage backend entry for each storage pool on the array that will be - managed by OpenStack. Each entry consists of a unique section name, - surrounded by square brackets, followed by options specified in - key=value format. - - - - The hpmsa_backend_name value - specifies the name of the storage pool or vdisk on the - array. - - - - The volume_backend_name option value - can be a unique value, if you wish to be able to assign volumes - to a specific storage pool on the array, or a name that's shared - among multiple storage pools to let the volume scheduler choose - where new volumes are allocated. - - - - The rest of the options will be repeated for each storage - pool in a given array: the appropriate Cinder driver name; IP - address or hostname of the array management interface; the - username and password of an array user account with - manage privileges; and the iSCSI IP addresses - for the array if using the iSCSI transport protocol. - - - - In the examples below, two backends are defined, one for pool A - and one for pool B, and a common - volume_backend_name is used so that a single volume - type definition can be used to allocate volumes from both - pools. - - - iSCSI example backend entries - - [pool-a] -hpmsa_backend_name = A -volume_backend_name = hpmsa-array -volume_driver = cinder.volume.drivers.san.hp.hpmsa_iscsi.HPMSAISCSIDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage -hpmsa_iscsi_ips = 10.2.3.4,10.2.3.5 - -[pool-b] -hpmsa_backend_name = B -volume_backend_name = hpmsa-array -volume_driver = cinder.volume.drivers.san.hp.hpmsa_iscsi.HPMSAISCSIDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage -hpmsa_iscsi_ips = 10.2.3.4,10.2.3.5 - - - - Fibre Channel example backend entries - - [pool-a] -hpmsa_backend_name = A -volume_backend_name = hpmsa-array -volume_driver = cinder.volume.drivers.san.hp.hpmsa_fc.HPMSAFCDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage - -[pool-b] -hpmsa_backend_name = B -volume_backend_name = hpmsa-array -volume_driver = cinder.volume.drivers.san.hp.hpmsa_fc.HPMSAFCDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage - - - - - If any volume_backend_name value refers to a - vdisk rather than a virtual pool, add an additional statement - hpmsa_backend_type = linear to that backend - entry. - - - - If HTTPS is not enabled in the array, include - hpmsa_api_protocol = http in each of the backend - definitions. - - - - If HTTPS is enabled, you can enable certificate verification - with the option hpmsa_verify_certificate=True. - You may also use the - hpmsa_verify_certificate_path parameter to - specify the path to a CA_BUNDLE file containing CAs other than those - in the default list. - - - - Modify the [DEFAULT] section of the - cinder.conf file to add an - enabled_backends parameter specifying the backend - entries you added, and a default_volume_type - parameter specifying the name of a volume type that you will create in - the next step. - - - [DEFAULT] section changes - - [DEFAULT] - ... -enabled_backends = pool-a,pool-b -default_volume_type = hpmsa - ... - - - - - Create a new volume type for each distinct - volume_backend_name value that you added to - cinder.conf. The example below assumes that the same - volume_backend_name=hpmsa-array option was - specified in all of the entries, and specifies that the volume type - hpmsa can be used to allocate volumes from any of - them. - Creating a volume type - - $ cinder type-create hpmsa - - $ cinder type-key hpmsa set - volume_backend_name=hpmsa-array - - - - - After modifying cinder.conf, restart the - cinder-volume service. - - - - - - Driver-specific options - - The following table contains the configuration options that are - specific to the HP MSA drivers. - - - -
diff --git a/doc/config-reference/block-storage/drivers/hpe-3par-driver.xml b/doc/config-reference/block-storage/drivers/hpe-3par-driver.xml deleted file mode 100644 index 0e1f9be373..0000000000 --- a/doc/config-reference/block-storage/drivers/hpe-3par-driver.xml +++ /dev/null @@ -1,485 +0,0 @@ -
- HPE 3PAR Fibre Channel and iSCSI drivers - The HPE3PARFCDriver and - HPE3PARISCSIDriver drivers, which are - based on the Block Storage service (Cinder) plug-in - architecture, run volume operations by communicating with the - HPE 3PAR storage system over HTTP, HTTPS, and SSH connections. - The HTTP and HTTPS communications use - python-3parclient, which is part of the - Python standard library. - For information about how to manage HPE 3PAR storage systems, - see the HPE 3PAR user documentation. -
- System requirements - To use the HPE 3PAR drivers, install the following - software and components on the HPE 3PAR storage - system: - - - HPE 3PAR Operating System software version 3.1.3 MU1 - or higher. - - - Deduplication provisioning requires SSD disks - and HPE 3PAR Operating System software version - 3.2.1 MU1 or higher. - - - Enabling Flash Cache Policy requires the following: - - - - Array must contain SSD disks. - - - - HPE 3PAR Operating System software version 3.2.1 MU2 - or higher. - - - - python-3parclient version 4.0.0 or newer. - - - - Array must have the Adaptive Flash Cache license - installed. - - - - Flash Cache - must be enabled on the array with the CLI command - createflashcache size, - where size must be in 16 GB increments. For - example, createflashcache 128g - will create 128 GB of Flash Cache for each - node pair in the array. - - - - - - The Dynamic Optimization license is required to - support any feature that results in a volume - changing provisioning type or CPG. This may - apply to the volume migrate, - retype, and - manage commands. - - - - The Virtual Copy License is required to support - any feature that involves volume snapshots. This - applies to the volume snapshot-* - commands. - - - - - - HPE 3PAR Web Services API Server must be enabled - and running - - - One Common Provisioning Group (CPG) - - - Additionally, you must install the - python-3parclient version 4.0.0 or - newer from the Python standard library on the - system with the enabled Block Storage service - volume drivers. - - -
-
- Supported operations - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - Migrate a volume with back-end assistance. - - - Retype a volume. - - - Manage and unmanage a volume. - - - Volume type support for both HPE 3PAR drivers includes - the ability to set the following capabilities in the - OpenStack Block Storage API - cinder.api.contrib.types_extra_specs - volume type extra specs extension module: - - - hpe3par:cpg - This setting is ignored as of Kilo. Instead, - use the hpe3par_cpg setting in - cinder.conf to list the valid CPGs for a - backend. CPGs should now be controlled by configuring - separate backends with pools. - - - hpe3par:snap_cpg - - - hpe3par:provisioning - - - hpe3par:persona - - - hpe3par:vvs - - - hpe3par:flash_cache - - - To work with the default filter scheduler, the key - values are case sensitive and scoped with - hpe3par:. - For information about how to set the key-value pairs and - associate them with a volume type, run the following - command: - $ cinder help type-key - - - Volumes that are cloned only support extra specs - keys cpg, snap_cpg, provisioning and vvs. The others - are ignored. In addition the comments section of the - cloned volume in the HPE 3PAR StoreServ storage array - is not populated. - - If volume types are not used or a particular key is not - set for a volume type, the following defaults are - used: - - - hpe3par:cpg - Defaults to the - hpe3par_cpg setting in the - cinder.conf file. - - - hpe3par:snap_cpg - Defaults to - the hpe3par_snap setting in the - cinder.conf file. If - hpe3par_snap is not set, it - defaults to the hpe3par_cpg - setting. - - - hpe3par:provisioning - - Defaults to thin provisioning, the valid values - are thin, full, - and dedup. - - - hpe3par:persona - Defaults to - the 2 - Generic-ALUA persona. The - valid values are, 1 - Generic, - 2 - Generic-ALUA, - 3 - Generic-legacy, - 4 - HPUX-legacy, 5 - - AIX-legacy, 6 - - EGENERA, 7 - - ONTAP-legacy, 8 - - VMware, 9 - - OpenVMS, 10 - HPUX, - and 11 - WindowsServer. - - - hpe3par:flash_cache - - Defaults to false, the valid values - are true - and false. - - - QoS support for both HPE 3PAR drivers includes - the ability to set the following capabilities in the - OpenStack Block Storage API - cinder.api.contrib.qos_specs_manage - qos specs extension module: - - - minBWS - - - maxBWS - - - minIOPS - - - maxIOPS - - - latency - - - priority - - - The qos keys above no longer require to be scoped - but must be created and associated to a volume type. - For information about how to set the key-value pairs and - associate them with a volume type, run the following - commands: - $ cinder help qos-create - $ cinder help qos-key - $ cinder help qos-associate - - - The following keys require that the HPE 3PAR StoreServ - storage array has a Priority Optimization license - installed. - - - hpe3par:vvs - The virtual - volume set name that has been predefined by the - Administrator with Quality of Service (QoS) rules - associated to it. If you specify extra_specs - hpe3par:vvs, the - qos_specs minIOPS, - maxIOPS, minBWS, - and maxBWS settings are - ignored. - - - minBWS - The QoS I/O - issue bandwidth minimum goal in MBs. If not set, the I/O - issue bandwidth rate has no minimum goal. - - - maxBWS - The QoS I/O - issue bandwidth rate limit in MBs. If not set, the I/O - issue bandwidth rate has no limit. - - - minIOPS - The QoS I/O - issue count minimum goal. If not set, the I/O issue - count has no minimum goal. - - - maxIOPS - The QoS I/O - issue count rate limit. If not set, the I/O issue - count rate has no limit. - - - latency - The latency - goal in milliseconds. - - - priority - The priority of - the QoS rule over other rules. If not set, the - priority is normal, valid values are low, normal - and high. - - - - Since the Icehouse release, minIOPS and maxIOPS must - be used together to set I/O limits. Similarly, minBWS - and maxBWS must be used together. If only one is set - the other will be set to the same value. - - - The following keys require that the HPE 3PAR StoreServ - storage array has an Adaptive Flash Cache license - installed. - - - hpe3par:flash_cache - The - flash-cache policy, which can be turned on and off - by setting the value to true or - false. - - - -
-
- Enable the HPE 3PAR Fibre Channel and iSCSI - drivers - The HPE3PARFCDriver and - HPE3PARISCSIDriver are installed - with the OpenStack software. - - - Install the python-3parclient - Python package on the OpenStack Block Storage - system. - # pip install 'python-3parclient>=4.0,<5.0' - - - - Verify that the HPE 3PAR Web Services API server - is enabled and running on the HPE 3PAR storage - system. - - - Log onto the HPE 3PAR storage system with - administrator - access. - $ ssh 3paradm@<HPE 3PAR IP Address> - - - View the current state of the Web - Services API Server. - # showwsapi --Service- -State- -HTTP_State- HTTP_Port -HTTPS_State- HTTPS_Port -Version- -Enabled Active Enabled 8008 Enabled 8080 1.1 - - - If the Web Services API Server is - disabled, start - it. - - # startwsapi - - - - - If the HTTP or HTTPS state is disabled, enable - one of - them. - # setwsapi -http enable - or # setwsapi -https enable - - - To stop the Web Services API Server, use - the stopwsapi command. For other options - run the setwsapi –h - command. - - - - If you are not using an existing CPG, create a - CPG on the HPE 3PAR storage system to be used as - the default location for creating volumes. - - - Make the following changes in the - /etc/cinder/cinder.conf - file. - ## REQUIRED SETTINGS -# 3PAR WS API Server URL -hpe3par_api_url=https://10.10.0.141:8080/api/v1 - -# 3PAR username with the 'edit' role -hpe3par_username=edit3par - -# 3PAR password for the user specified in hpe3par_username -hpe3par_password=3parpass - -# 3PAR CPG to use for volume creation -hpe3par_cpg=OpenStackCPG_RAID5_NL - -# IP address of SAN controller for SSH access to the array -san_ip=10.10.22.241 - -# Username for SAN controller for SSH access to the array -san_login=3paradm - -# Password for SAN controller for SSH access to the array -san_password=3parpass - -# FIBRE CHANNEL(uncomment the next line to enable the FC driver) -# volume_driver=cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver - -# iSCSI (uncomment the next line to enable the iSCSI driver and -# hpe3par_iscsi_ips or iscsi_ip_address) -#volume_driver=cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver - -# iSCSI multiple port configuration -# hpe3par_iscsi_ips=10.10.220.253:3261,10.10.222.234 - -# Still available for single port iSCSI configuration -#iscsi_ip_address=10.10.220.253 - -## OPTIONAL SETTINGS -# Enable HTTP debugging to 3PAR -hpe3par_debug=False - -# Enable CHAP authentication for iSCSI connections. -hpe3par_iscsi_chap_enabled=false - -# The CPG to use for Snapshots for volumes. If empty hpe3par_cpg will be used. -hpe3par_snap_cpg=OpenStackSNAP_CPG - -# Time in hours to retain a snapshot. You can't delete it before this expires. -hpe3par_snapshot_retention=48 - -# Time in hours when a snapshot expires and is deleted. This must be larger than retention. -hpe3par_snapshot_expiration=72 - -# The ratio of oversubscription when thin provisioned volumes are involved. -# Default ratio is 20.0, this means that a provisioned capacity can be 20 times of the total -# physical capacity. -max_over_subscription_ratio=20.0 - -# This flag represents the percentage of reserved back-end capacity. -reserved_percentage=15 - - You can enable only one driver on each - cinder instance unless you enable multiple - back-end support. See the Cinder multiple - back-end support instructions to enable this - feature. - - - You can configure one or more iSCSI - addresses by using the - option. - When you configure multiple addresses, the - driver selects the iSCSI port with the fewest - active volumes at attach time. The IP address - might include an IP port by using a colon - (:) to separate the - address from port. If you do not define an IP - port, the default port 3260 is used. Separate - IP addresses with a comma - (,). The - / - options might be used as an alternative to - for - single port iSCSI configuration. - - - - Save the changes to the - cinder.conf file and - restart the cinder-volume service. - - - The HPE 3PAR Fibre Channel and iSCSI drivers are now - enabled on your OpenStack system. If you experience - problems, review the Block Storage service log files for - errors. - The following table contains all the configuration options - supported by the HPE 3PAR Fibre Channel and iSCSI drivers. - -
-
diff --git a/doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml b/doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml deleted file mode 100644 index a320c8759d..0000000000 --- a/doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml +++ /dev/null @@ -1,309 +0,0 @@ -
- HPE LeftHand/StoreVirtual driver - - The - HPELeftHandISCSIDriver - is based on the - Block Storage service (Cinder) plug-in architecture. Volume operations - are run by communicating with the HPE LeftHand/StoreVirtual system over - HTTPS, or SSH connections. HTTPS communications use the - python-lefthandclient, - which is part of the Python standard library. - - - The - HPELeftHandISCSIDriver - can be configured - to run using a REST client to communicate with the array. For - performance improvements and new functionality the python-lefthandclient - must be downloaded, and HP LeftHand/StoreVirtual Operating System - software version 11.5 or - higher is required on the array. To configure - the driver, see - . - - For information about how to manage HPE LeftHand/StoreVirtual - storage systems, see the HPE LeftHand/StoreVirtual user documentation. - -
- HPE LeftHand/StoreVirtual REST driver - This section describes how to configure the HPE - LeftHand/StoreVirtual Cinder driver. - -
- System requirements - To use the HPE LeftHand/StoreVirtual driver, - do the following: - - - - Install LeftHand/StoreVirtual Operating System - software version 11.5 or higher on the HPE - LeftHand/StoreVirtual storage system. - - - - Create a cluster group. - - - - Install the python-lefthandclient version - 2.0.0 from the Python Package Index on the system with the - enabled Block Storage service volume drivers. - - - -
-
- Supported operations - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - Get volume statistics. - - - Migrate a volume with back-end assistance. - - - Retype a volume. - - - Manage and unmanage a volume. - - - When you use back-end assisted volume - migration, both source and destination clusters must be in - the same HPE LeftHand/StoreVirtual management group. - The HPE LeftHand/StoreVirtual array will use native LeftHand - APIs to migrate the volume. The volume cannot be attached - or have snapshots to migrate. - - - Volume type support for the driver includes the ability to set - the following capabilities in the OpenStack Cinder API - cinder.api.contrib.types_extra_specs - volume type extra specs extension module. - - - - - hpelh:provisioning - - - - - hpelh:ao - - - - - hpelh:data_pl - - - - - To work with the default filter scheduler, the key-value pairs - are case-sensitive and scoped with - 'hpelh:'. - For information about how to set the key-value pairs and - associate them with a volume type, run the following - command: - - $ cinder help type-key - - - The following keys require the HPE LeftHand/StoreVirtual - storage array be configured for - - hpelh:ao - The HPE LeftHand/StoreVirtual storage array - must be configured for Adaptive Optimization. - - hpelh:data_pl - The HPE LeftHand/StoreVirtual storage array - must be able to support the Data Protection level - specified by the extra spec. - - - - - - - If volume types are not used or a particular key is not - set for a volume type, the following defaults are used: - - hpelh:provisioning - Defaults to thin provisioning, the valid values are, - thin - and - full - - hpelh:ao - Defaults to true, the valid values are, - true - and - false. - - hpelh:data_pl - Defaults to - r-0, - Network RAID-0 (None), the valid values are, - - r-0, - Network RAID-0 (None) - - - r-5, - Network RAID-5 (Single Parity) - - - r-10-2, - Network RAID-10 (2-Way Mirror) - - - r-10-3, - Network RAID-10 (3-Way Mirror) - - - r-10-4, - Network RAID-10 (4-Way Mirror) - - - r-6, - Network RAID-6 (Dual Parity), - - - - - - - -
-
- Enable the HPE LeftHand/StoreVirtual iSCSI driver - - - The - HPELeftHandISCSIDriver - is installed with the OpenStack software. - - - - - Install the - python-lefthandclient - Python package on the OpenStack Block Storage system. - -# pip install 'python-lefthandclient>=2.0,<2.0' - - - If you are not using an existing cluster, create a - cluster on the HPE LeftHand storage system to be used as - the cluster for creating volumes. - - - - - Make the following changes in the - /etc/cinder/cinder.conf - file: - - ## REQUIRED SETTINGS - -# LeftHand WS API Server URL -hpelefthand_api_url=https://10.10.0.141:8081/lhos - -# LeftHand Super user username -hpelefthand_username=lhuser - -# LeftHand Super user password -hpelefthand_password=lhpass - -# LeftHand cluster to use for volume creation -hpelefthand_clustername=ClusterLefthand - -# LeftHand iSCSI driver -volume_driver=cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver - -## OPTIONAL SETTINGS - -# Should CHAPS authentication be used (default=false) -hpelefthand_iscsi_chap_enabled=false - -# Enable HTTP debugging to LeftHand (default=false) -hpelefthand_debug=false - -# The ratio of oversubscription when thin provisioned volumes are involved. -# Default ratio is 20.0, this means that a provisioned capacity can be 20 times of the total -# physical capacity. -max_over_subscription_ratio=20.0 - -# This flag represents the percentage of reserved back-end capacity. -reserved_percentage=15 - - You can enable only one driver on each - cinder instance unless you enable multiple - back end support. See the Cinder multiple - back end support instructions to enable this - feature. - - - If the - is set to true, the driver will - associate randomly-generated CHAP secrets with all - hosts on the HPE LeftHand/StoreVirtual system. OpenStack Compute - nodes use these secrets when creating iSCSI connections. - - CHAP secrets are passed from OpenStack Block - Storage to Compute in clear text. This - communication should be secured to - ensure that CHAP secrets are not discovered. - - - - CHAP secrets are added to existing hosts as - well as newly-created ones. If the CHAP option - is enabled, hosts will not be able to access - the storage without the generated secrets. - - - - - - - Save the changes to the cinder.conf - file and restart the - cinder-volume - service. - - - - The HPE LeftHand/StoreVirtual driver is now enabled on your - OpenStack system. If you experience problems, review the Block - Storage service log files for errors. - -
-
-
diff --git a/doc/config-reference/block-storage/drivers/huawei-storage-driver.xml b/doc/config-reference/block-storage/drivers/huawei-storage-driver.xml deleted file mode 100644 index 44a7b3bc47..0000000000 --- a/doc/config-reference/block-storage/drivers/huawei-storage-driver.xml +++ /dev/null @@ -1,1090 +0,0 @@ - -
- Huawei volume driver - Huawei volume driver can be used to provide functions such as the logical volume and - snapshot for virtual machines (VMs) in the OpenStack Block Storage Driver that supports - iSCSI and Fibre Channel protocols. -
- Version mappings - The following table describes the version mappings among the Block Storage driver, - Huawei storage system, and OpenStack: - - - - - - - - - - - - - - - - - - - - - -
Version mappings among the Block Storage driver and Huawei storage - system
Description (Volume Driver Version)Storage System VersionVolume Driver Version
Create, delete, expand, attach, and detach volumesCreate and - delete a snapshotCopy an image to a volumeCopy a volume - to an imageCreate a volume from a snapshotClone a - volumeQoS - OceanStor T series V1R5 C02/C30 - OceanStor T series V2R2 C00/C20/C30 - OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00 - OceanStor 18500/18800V1R1C00/C20/C30 V3R3C00 - - 1.1.0 - 1.2.0 -
Volume Migration (version 1.2.0 or later) - Auto zoning (version 1.2.0 or later) - SmartTier (version 1.2.0 or later) - SmartCache (version 1.2.0 or later) - Smart Thin/Thick (version 1.2.0 or later) - SmartPartition (version 1.2.0 or later) - OceanStor T series V2R2 C00/C20/C30 - OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00 - OceanStor 18500/18800V1R1C00/C20/C30 - 1.2.0
-
-
- Volume Driver Configuration - This section describes how to configure the Huawei volume driver for iSCSI storage - and FC storage. -
- Configuring the volume driver for iSCSI storage - This section describes how to configure the volume driver for different products for iSCSI storage - products. -
- Configuring the volume driver for T series V1 (iSCSI) - - - In /etc/cinder, create a Huawei-customized driver - configuration file. The file format is XML. - - - Configure parameters in the driver configuration file. - <?xml version='1.0' encoding='UTF-8'?> -<config> - <Storage> - <Product>T</Product> - <Protocol>iSCSI</Protocol> - <ControllerIP0>x.x.x.x</ControllerIP0> - <ControllerIP1>x.x.x.x</ControllerIP1> - <UserName>xxxxxxxx</UserName> - <UserPassword>xxxxxxxx</UserPassword> - </Storage> - <LUN> - <LUNType>xxx</LUNType> - <StripUnitSize>xxx</StripUnitSize> - <WriteType>xxx</WriteType> - <MirrorSwitch>xxx</MirrorSwitch> - <Prefetch Type="xxx" Value="xxx" /> - <StoragePool Name="xxx" /> - <StoragePool Name="xxx" /> - </LUN> - <iSCSI> - <DefaultTargetIP>x.x.x.x</DefaultTargetIP> - <Initiator Name="xxxxxxxx" TargetIP="x.x.x.x"/> - </iSCSI> - <Host OSType="Linux" HostIP="x.x.x.x, x.x.x.x"/> -</config> - - - - - For details about the parameters in the configuration - file, see section 1.2.1.5 "Parameters in the - Configuration File". - - - - - - - Configure the cinder.conf file - In the [default] block of /etc/cinder/cinder.conf, add - the following contents. volume_driver indicates the loaded driver, and - cinder_huawei_conf_file indicates the specified - Huawei-customized configuration file. - volume_driver =cinder.volume.drivers.huawei.huawei_t.HuaweiTISCSIDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - - - Run the service cinder-volume restart command - to restart the Block Storage service. - - -
-
- Configuring the volume driver for T series V2 (iSCSI) - - - In /etc/cinder, create a Huawei-customized driver configuration file. The file - format is XML. - - - Configure parameters in the driver configuration file. - <?xml version='1.0' encoding='UTF-8'?> -<config> - <Storage> - <Product>TV2</Product> - <Protocol>iSCSI</Protocol> - <RestURL>https://x.x.x.x:8088/deviceManager/rest/</RestURL> - <UserName>xxxxxxxx</UserName> - <UserPassword>xxxxxxxx</UserPassword> - </Storage> - <LUN> - <LUNType>xxx</LUNType> - <WriteType>xxx</WriteType> - <MirrorSwitch>xxx</MirrorSwitch> - <LUNcopyWaitInterval>xxx</LUNcopyWaitInterval> - <Timeout>432000</Timeout> - <StoragePool>xxx;xxx;xxx</StoragePool> - </LUN> - <iSCSI> - <DefaultTargetIP>x.x.x.x</DefaultTargetIP> - <Initiator Name="xxxxxxxx" TargetIP="x.x.x.x"/> - </iSCSI> - <Host OSType="Linux" HostIP="x.x.x.x, x.x.x.x"/> -</config> - - - - - For details about the parameters in the configuration - file, see section 1.2.1.5 "Parameters in the - Configuration File". - - - - - - - - Configure the cinder.conf file - In the [default] block of /etc/cinder/cinder.conf, - add the following contents. volume_driver indicates the loaded - driver, and cinder_huawei_conf_file indicates the specified - Huawei-customized configuration file. - volume_driver =cinder.volume.drivers.huawei.huawei_driver.HuaweiTV2ISCSIDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - - - Run the service cinder-volume restart - command to restart the Block Storage service. - - -
-
- Configuring the volume driver for V3 (iSCSI) - - - In /etc/cinder, create a Huawei-customized driver configuration file. The file - format is XML. - - - Configure parameters in the driver configuration file. - <?xml version='1.0' encoding='UTF-8'?> -<config> - <Storage> - <Product>V3</Product> - <Protocol>iSCSI</Protocol> - <RestURL>https://x.x.x.x:8088/deviceManager/rest/</RestURL> - <UserName>xxxxxxxx</UserName> - <UserPassword>xxxxxxxx</UserPassword> - </Storage> - <LUN> - <LUNType>xxx</LUNType> - <WriteType>xxx</WriteType> - <MirrorSwitch>xxx</MirrorSwitch> - <LUNcopyWaitInterval>xxx</LUNcopyWaitInterval> - <Timeout>432000</Timeout> - <StoragePool>xxx;xxx;xxx</StoragePool> - </LUN> - <iSCSI> - <DefaultTargetIP>x.x.x.x</DefaultTargetIP> - <Initiator Name="xxxxxxxx" TargetIP="x.x.x.x"/> - </iSCSI> - <Host OSType="Linux" HostIP="x.x.x.x, x.x.x.x"/> -</config> - - - - - For details about the parameters in the configuration - file, see section 1.2.1.5 "Parameters in the - Configuration File". - - - - - - - - Configure the cinder.conf file - In the [default] block of /etc/cinder/cinder.conf, - add the following contents. volume_driver indicates the loaded - driver, and cinder_huawei_conf_file indicates the specified - Huawei-customized configuration file. - volume_driver =cinder.volume.drivers.huawei.huawei_driver.HuaweiV3ISCSIDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - - - Run the service cinder-volume restart - command to restart the Block Storage service. - - -
-
- Configuring the volume driver for OceanStor 18000 series (iSCSI) - - - In /etc/cinder, create a Huawei-customized driver configuration file. The file - format is XML. - - - Configure parameters in the driver configuration file. - <?xml version='1.0' encoding='UTF-8'?> -<config> - <Storage> - <Product>18000</Product> - <Protocol>iSCSI</Protocol> - <RestURL>https://x.x.x.x/deviceManager/rest/</RestURL> - <UserName>xxxxxxxx</UserName> - <UserPassword>xxxxxxxx</UserPassword> - </Storage> - <LUN> - <LUNType>xxx</LUNType> - <StripUnitSize>xxx</StripUnitSize> - <WriteType>xxx</WriteType> - <MirrorSwitch>xxx</MirrorSwitch> - <Prefetch Type="xxx" Value="xxx" /> - <StoragePool Name="xxx" /> - <StoragePool Name="xxx" /> - </LUN> - <iSCSI> - <DefaultTargetIP>x.x.x.x</DefaultTargetIP> - <Initiator Name="xxxxxxxx" TargetIP="x.x.x.x"/> - </iSCSI> - <Host OSType="Linux" HostIP="x.x.x.x, x.x.x.x"/> -</config> - - - - - For details about the parameters in the configuration - file, see section 1.2.1.5 "Parameters in the - Configuration File". - - - - - - - - Configure the cinder.conf file - In the [default] block of /etc/cinder/cinder.conf, - add the following contents. volume_driver indicates the loaded - driver file, and cinder_huawei_conf_file indicates the specified - Huawei-customized configuration file. - volume_driver =cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - - - Run the service cinder-volume restart - command to restart the Block Storage service. - - -
-
- Parameters in the configuration file - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Mandatory parameters
ParameterDefault valueDescriptionApplicable to
- - - - - - Type of a storage product. Possible values are T, 18000 and - V3. - All
- - Type of a connection protocol. The possible value is - iSCSI. - All
-IP address of the primary controller on an OceanStor T series - V100R005 storage device.T series V1
-IP address of the secondary controller on an OceanStor T series - V100R005 storage device.T series V1
-Access address of the REST interface, - https://x.x.x.x/devicemanager/rest/. x.x.x.x indicates the - management IP address. OceanStor 18000 uses the preceding setting, - and V2 and V3 need to add port number 8088, for example, - https://x.x.x.x:8088/deviceManager/rest/.T series V2 V3 18000
- - - - - - User name of a storage administrator. - All
- - - - - - - Password of a storage administrator. - - All -
-Name of a storage pool to be used. If you need to configure multiple - storage pools, separate them by semicolons (;). - All -
-Default IP address of the iSCSI target port that is provided for - computing nodes.All
LinuxOperating system of the Nova computer node's host.All
-IP address of the Nova computer node's host.All
- - Note for the parameters - The value of StoragePool cannot contain Chinese characters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Optional parameters
ParameterDefault valueDescriptionApplicable to
- - - Thin - Type of the LUNs to be created. The value can be Thick or Thin. - All -
- - - - 64 - - Stripe depth of a LUN to be created. The unit is KB. This - parameter is invalid when a thin LUN is created. - T series V1
- - - - 1 - - Cache write type, possible values are: 1 (write back), 2 - (write through), and 3 (mandatory write back). - All
- - - 1 - - Cache mirroring or not, possible values are: 0 (without - mirroring) or 1 (with mirroring). - All
3 - Cache prefetch policy, possible values are: 0 (no prefetch), 1 - (fixed prefetch), 2 (variable prefetch) or 3 (intelligent - prefetch). - T series V1
0 - Cache prefetch value. - - T series V1 -
5After LUN copy is enabled, the plug-in frequently queries the copy - progress. You can set a value to specify the query interval.T series V2 V3 18000
432000Timeout interval for waiting LUN copy of a storage device to - complete. The unit is second.T series V2 V3 18000
- - Name of a compute node initiator. - - All -
- - IP address of the iSCSI port provided for compute - nodes. - - All -
- - IP address of the iSCSI target port that is provided for - computing nodes. - T series V2 V3 18000
-
-
- Configuring iSCSI Multipathing - - - Create a port group on the storage device using the DeviceManager, add - service links that require multipathing into the port group. - - - Log in to the storage device using CLI commands, and enable the - multiport discovery switch in the multipathing. - developer:/>change iscsi discover_multiport switch=on - - - Add the port group settings in the Huawei-customized driver - configuration file, and configure the port group name needed by an - initiator. - <iSCSI> -<DefaultTargetIP>x.x.x.x</DefaultTargetIP> -<Initiator Name="xxxxxx" TargetPortGroup="xxxx" /> -</iSCSI> - - - Enable the multipathing switch of the OpenStack Nova module. - If the version of OpenStack is Havana and IceHouse, add libvirt_iscsi_use_multipath = True - in [default] of /etc/nova/nova.conf. If the version of OpenStack is Juno, Kilo, and Liberty, add - iscsi_use_multipath = True in [libvirt] of /etc/nova/nova.conf. - - - Run the service nova-compute restart command to restart the - nova-compute service. - - -
-
- Configuring CHAP and ALUA - On a public network, any application server whose IP address resides on the same network - segment as that of the storage system's iSCSI host port can access the storage system and perform - read and write operations in it. This poses risks to the data security of the storage system. To - ensure the storage system access security, you can configure CHAP authentication to control - application servers' access to the storage system. - Configure CHAP and ALUA. - Configure the driver configuration file as follows: - <Initiator ALUA="xxx" CHAPinfo="xxx" Name="xxx" TargetIP="x.x.x.x"/> - ALUA indicates a multipathing mode. 0 indicates that ALUA is disabled. 1 indicates that ALUA - is enabled. - CHAPinfo indicates the user name and password authenticated by CHAP. The format is mmuser; - mm-user@storage. The user name and password are separated by semicolons (;). -
-
-
- Configuring the volume driver (Fibre Channel) - This section describes how to configure volume drivers for different products for the Fibre - Channel products. - For a Fibre Channel network, the sg tool must be installed so that storage resources can be used. -
- Configuring the volume driver for T series V1 (Fibre Channel) - - - In /etc/cinder, create a Huawei-customized driver configuration file. The file - format is XML. - - - Configure parameters in the driver configuration file. - <?xml version='1.0' encoding='UTF-8'?> -<config> - <Storage> - <Product>T</Product> - <Protocol>FC</Protocol> - <ControllerIP0>x.x.x.x</ControllerIP0> - <ControllerIP1>x.x.x.x</ControllerIP1> - <UserName>xxxxxxxx</UserName> - <UserPassword>xxxxxxxx</UserPassword> - </Storage> - <LUN> - <LUNType>xxx</LUNType> - <StripUnitSize>xxx</StripUnitSize> - <WriteType>xxx</WriteType> - <MirrorSwitch>xxx</MirrorSwitch> - <Prefetch Type="xxx" Value="xxx" /> - <StoragePool Name="xxx" /> - <StoragePool Name="xxx" /> - </LUN> - <iSCSI> - <DefaultTargetIP>x.x.x.x</DefaultTargetIP> - <Initiator Name="xxxxxxxx" TargetIP="x.x.x.x"/> - </iSCSI> - <Host OSType="Linux" HostIP="x.x.x.x, x.x.x.x"/> -</config> - - - - - For details about the parameters in the configuration - file, see section 1.2.2.5 "Parameters in the - Configuration File" - - - - - - - - Configure the cinder.conf file - In the [default] block of /etc/cinder/cinder.conf, - add the following contents. volume_driver indicates the loaded - driver file, and cinder_huawei_conf_file indicates the specified - Huawei-customized configuration file. - volume_driver =cinder.volume.drivers.huawei.huawei_t.HuaweiTFCDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - - - Run the service cinder-volume restart command - to restart the Block Storage service. - - -
-
- Configuring the volume driver for T series V2 (Fibre Channel) - - - In /etc/cinder, create a Huawei-customized driver configuration file. The file - format is XML. - - - Configure parameters in the driver configuration file. - <?xml version='1.0' encoding='UTF-8'?> -<config> - <Storage> - <Product>TV2</Product> - <Protocol>FC</Protocol> - <RestURL>https://x.x.x.x:8088/deviceManager/rest/</RestURL> - <UserName>xxxxxxxx</UserName> - <UserPassword>xxxxxxxx</UserPassword> - </Storage> - <LUN> - <LUNType>xxx</LUNType> - <WriteType>xxx</WriteType> - <MirrorSwitch>xxx</MirrorSwitch> - <LUNcopyWaitInterval>xxx</LUNcopyWaitInterval> - <Timeout>432000</Timeout> - <StoragePool>xxx;xxx;xxx</StoragePool> - </LUN> - <iSCSI> - <DefaultTargetIP>x.x.x.x</DefaultTargetIP> - <Initiator Name="xxxxxxxx" TargetIP="x.x.x.x"/> - </iSCSI> - <Host OSType="Linux" HostIP="x.x.x.x, x.x.x.x"/> -</config> - - - - - For details about the parameters in the configuration - file, see section 1.2.2.5 "Parameters in the - Configuration File" - - - - - - - - Configure the cinder.conf file - In the [default] block of /etc/cinder/cinder.conf, - add the following contents. volume_driver indicates the loaded - driver file, and cinder_huawei_conf_file indicates the specified - Huawei-customized configuration file. - volume_driver =cinder.volume.drivers.huawei.huawei_driver.HuaweiTV2FCDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - - - Run the service cinder-volume restart - command to restart the Block Storage service. - - -
-
- Configuring the volume driver for V3 (Fibre Channel) - - - In /etc/cinder, create a Huawei-customized driver configuration file. The file - format is XML. - - - Configure parameters in the driver configuration file. - <?xml version='1.0' encoding='UTF-8'?> -<config> - <Storage> - <Product>V3</Product> - <Protocol>FC</Protocol> - <RestURL>https://x.x.x.x:8088/deviceManager/rest/</RestURL> - <UserName>xxxxxxxx</UserName> - <UserPassword>xxxxxxxx</UserPassword> - </Storage> - <LUN> - <LUNType>xxx</LUNType> - <WriteType>xxx</WriteType> - <MirrorSwitch>xxx</MirrorSwitch> - <LUNcopyWaitInterval>xxx</LUNcopyWaitInterval> - <Timeout>432000</Timeout> - <StoragePool>xxx;xxx;xxx</StoragePool> - </LUN> - <iSCSI> - <DefaultTargetIP>x.x.x.x</DefaultTargetIP> - <Initiator Name="xxxxxxxx" TargetIP="x.x.x.x"/> - </iSCSI> - <Host OSType="Linux" HostIP="x.x.x.x, x.x.x.x"/> -</config> - - - - - For details about the parameters in the configuration - file, see section 1.2.2.5 "Parameters in the - Configuration File" - - - - - - - - Configure the cinder.conf file - In the [default] block of /etc/cinder/cinder.conf, - add the following contents. volume_driver indicates the loaded - driver file, and cinder_huawei_conf_file indicates the specified - Huawei-customized configuration file. - volume_driver =cinder.volume.drivers.huawei.huawei_driver.HuaweiV3FCDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - - - Run the service cinder-volume restart - command to restart the Block Storage service. - - -
-
- Configuring the volume driver for OceanStor 18000 series (Fibre - Channel) - - - In /etc/cinder, create a Huawei-customized driver configuration file. The file - format is XML. - - - Configure parameters in the driver configuration file. - <?xml version='1.0' encoding='UTF-8'?> -<config> - <Storage> - <Product>18000</Product> - <Protocol>FC</Protocol> - <RestURL>https://x.x.x.x/deviceManager/rest/</RestURL> - <UserName>xxxxxxxx</UserName> - <UserPassword>xxxxxxxx</UserPassword> - </Storage> - <LUN> - <LUNType>xxx</LUNType> - <StripUnitSize>xxx</StripUnitSize> - <WriteType>xxx</WriteType> - <MirrorSwitch>xxx</MirrorSwitch> - <Prefetch Type="xxx" Value="xxx" /> - <StoragePool Name="xxx" /> - <StoragePool Name="xxx" /> - </LUN> - <iSCSI> - <DefaultTargetIP>x.x.x.x</DefaultTargetIP> - <Initiator Name="xxxxxxxx" TargetIP="x.x.x.x"/> - </iSCSI> - <Host OSType="Linux" HostIP="x.x.x.x, x.x.x.x"/> -</config> - - - - - For details about the parameters in the configuration - file, see section 1.2.2.5 "Parameters in the - Configuration File" - - - - - - - Configure the cinder.conf file - In the [default] block of /etc/cinder/cinder.conf, - add the following contents. volume_driver indicates the loaded - driver file, and cinder_huawei_conf_file indicates the specified - Huawei-customized configuration file. - volume_driver =cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - - - Run the service cinder-volume restart - command to restart the Block Storage service. - - -
-
- Parameters in the configuration file - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Mandatory parameters
ParameterDefault valueDescriptionApplicable to
- - - - - - Type of a storage product. Possible values are T, 18000 and - V3. - All
- - Type of a connection protocol. The possible value is - FC. - All
-IP address of the primary controller on an OceanStor T series - V100R005 storage device.T series V1
-IP address of the secondary controller on an OceanStor T series - V100R005 storage device.T series V1
-Access address of the REST interface, - https://x.x.x.x/devicemanager/rest/. x.x.x.x indicates the - management IP address. OceanStor 18000 uses the preceding setting, - and V2 and V3 need to add port number 8088, for example, - https://x.x.x.x:8088/deviceManager/rest/.T series V2 V3 18000
- - - - - - User name of a storage administrator. - All
- - - - - - - Password of a storage administrator. - - All -
-Name of a storage pool to be used. If you need to configure multiple - storage pools, separate them by semicolons (;). - All -
-Default IP address of the iSCSI target port that is provided for - computing nodes.All
LinuxOperating system of the Nova computer node's host.All
-IP address of the Nova computer node's host.All
- - Note for the parameters - The value of StoragePool cannot contain Chinese characters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Optional parameters
ParameterDefault valueDescriptionApplicable to
- - - Thin - Type of the LUNs to be created. The value can be Thick or Thin. - All -
- - - - 64 - - Stripe depth of a LUN to be created. The unit is KB. This - parameter is invalid when a thin LUN is created. - T series V1
- - - - 1 - - Cache write type, possible values are: 1 (write back), 2 - (write through), and 3 (mandatory write back). - All
- - - 1 - - Cache mirroring or not, possible values are: 0 (without - mirroring) or 1 (with mirroring). - All
3 - Cache prefetch policy, possible values are: 0 (no prefetch), 1 - (fixed prefetch), 2 (variable prefetch) or 3 (intelligent - prefetch). - T series V1
0 - Cache prefetch value. - - T series V1 -
5After LUN copy is enabled, the plug-in frequently queries the copy - progress. You can set a value to specify the query interval.T series V2 V3 18000
432000Timeout interval for waiting LUN copy of a storage device to - complete. The unit is second.T series V2 V3 18000
-
-
-
- Configuring multi-storage support - Example for configuring multiple storage systems: - enabled_backends = t_fc, 18000_fc -[t_fc] -volume_driver = cinder.volume.drivers.huawei.huawei_t.HuaweiTFCDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf_t_fc.xml -volume_backend_name = HuaweiTFCDriver -[18000_fc] -volume_driver = cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver -cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf_18000_fc.xml -volume_backend_name = Huawei18000FCDriver -
-
-
diff --git a/doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml b/doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml deleted file mode 100644 index d90e81a35b..0000000000 --- a/doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml +++ /dev/null @@ -1,318 +0,0 @@ -
- IBM FlashSystem volume driver - The volume driver for FlashSystem provides OpenStack Block - Storage hosts with access to IBM FlashSystems. -
- Configure FlashSystem - - Configure storage array - The volume driver requires a pre-defined array. Users - need to create an array on FlashSystem before using - the volume driver. An existing array also can be used - and existing data will not be deleted. - - FlashSystem can only create one array, so no - configuration option is needed for the IBM - FlashSystem driver to assign it. - - - - Configure user authentication for the - driver - The driver requires access to the FlashSystem - management interface using SSH. It should be provided with - the FlashSystem management IP using the - san_ip flag, and the management port - should be provided by the san_ssh_port - flag. By default, the port value is configured to be - port 22 (SSH). - - Make sure the compute node running the - cinder-volume - driver has SSH network access to the storage - system. - - Using password authentication, assign a password - to the user on the FlashSystem. See the driver - configuration flags for the user and password in - or - . - - -
-
- IBM FlashSystem FC driver -
- Data Path configuration - Using Fiber Channel (FC), each FlashSystem node - should have at least one WWPN port configured. If the - flashsystem_multipath_enabled - flag is set to True in the Cinder - configuration file, the driver uses all available WWPNs - to attach the volume to the instance (details about the - configuration flags appear in section - "Enable IBM FlashSystem FC driver"). - If the flag is not set, the driver uses the WWPN - associated with the volume's preferred node (if - available), otherwise it uses the first available WWPN - of the system. The driver obtains the WWPNs directly - from the storage system. You do not need to provide - these WWPNs to the driver. - - Using FC, ensure that the block storage hosts - have FC connectivity to the FlashSystem. - -
-
- Enable IBM FlashSystem FC driver - Set the volume driver to the FlashSystem - driver by setting the volume_driver - option in configuration file - cinder.conf as follows: - volume_driver = cinder.volume.drivers.ibm.flashsystem_fc.FlashSystemFCDriver - To enable IBM FlashSystem FC driver, configure the - following options in configuration file - cinder.conf: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
List of configuration flags for IBM FlashSystem FC driver
Flag nameTypeDefaultDescription
san_ipRequiredManagement IP or host name
san_ssh_portOptional22Management port
san_loginRequiredManagement login user name
san_passwordRequiredManagement login password
flashsystem_connection_protocolRequiredConnection protocol should be set to FC
flashsystem_multipath_enabledRequiredEnable multipath for FC connections
flashsystem_multihost_enabled - OptionalTrue - Enable mapping vdisks to multiple hosts - - This option allows the driver to - map a vdisk to more than one host at a - time. This scenario occurs during - migration of a virtual machine with an - attached volume; the volume is - simultaneously mapped to both the source - and destination compute hosts. If your - deployment does not require attaching - vdisks to multiple hosts, setting this - flag to False will - provide added safety. - -
-
-
-
- IBM FlashSystem iSCSI driver -
- Network configuration - Using iSCSI, each FlashSystem node - should have at least one iSCSI port configured. - iSCSI IP addresses of IBM FlashSystem can be - obtained by FlashSystem GUI or CLI. - Please refer to the redbook of FlashSystem. - - Using iSCSI, ensure that the compute nodes - have iSCSI network access to IBM FlashSystem. - -
-
- Enable IBM FlashSystem iSCSI driver - Set the volume driver to the FlashSystem - driver by setting the volume_driver - option in configuration file - cinder.conf as follows: - volume_driver = cinder.volume.drivers.ibm.flashsystem_iscsi.FlashSystemISCSIDriver - To enable IBM FlashSystem iSCSI driver, configure the - following options in configuration file - cinder.conf: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
List of configuration flags for IBM FlashSystem iSCSI driver
Flag nameTypeDefaultDescription
san_ipRequiredManagement IP or host name
san_ssh_portOptional22Management port
san_loginRequiredManagement login user name
san_passwordRequiredManagement login password
flashsystem_connection_protocolRequired - Connection protocol should be set to iSCSI -
flashsystem_multihost_enabled - OptionalTrue - Enable mapping vdisks to multiple hosts - - This option allows the driver to - map a vdisk to more than one host at a - time. This scenario occurs during - migration of a virtual machine with an - attached volume; the volume is - simultaneously mapped to both the source - and destination compute hosts. If your - deployment does not require attaching - vdisks to multiple hosts, setting this - flag to False will - provide added safety. - -
iscsi_ip_addressRequired - Set to one of the iSCSI IP addresses obtained by FlashSystem GUI or CLI - - On cluster of FlashSystem, iSCSI IP address column - is the seventh column IP_address - of the output of lsportip. - -
flashsystem_iscsi_portidRequired - Set to the id of the iscsi_ip_address obtained by FlashSystem GUI or CLI - - On cluster of FlashSystem, port - ID column is the first column id - of the output of lsportip, - not the sixth column port_id. - -
-
-
-
- Limitation and known issues - - - IBM FlashSystem only works when:open_access_enabled=off - - -
-
- Supported operations - These operations are supported: - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - Get volume statistics. - - -
-
diff --git a/doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml b/doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml deleted file mode 100644 index a38679c7b7..0000000000 --- a/doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml +++ /dev/null @@ -1,185 +0,0 @@ -
- IBM GPFS volume driver - IBM General Parallel File System (GPFS) is a cluster file - system that provides concurrent access to file systems from - multiple nodes. The storage provided by these nodes can be - direct attached, network attached, SAN attached, or a - combination of these methods. GPFS provides many features - beyond common data access, including data replication, policy - based storage management, and space efficient file snapshot - and clone operations. -
- How the GPFS driver works - The GPFS driver enables the use of GPFS in a fashion - similar to that of the NFS driver. With the GPFS driver, instances do not - actually access a storage device at the block level. - Instead, volume backing files are created in a GPFS file - system and mapped to instances, which emulate a block - device. - - - GPFS software must be installed and running on - nodes where Block Storage and Compute - services run in the OpenStack environment. - A GPFS file system must also be created and - mounted on these nodes before starting the - cinder-volume service. The - details of these GPFS specific steps are covered - in GPFS: Concepts, Planning, and Installation Guide - and GPFS: Administration and Programming Reference. - - - - Optionally, the Image Service can be configured to store images on - a GPFS file system. When a Block Storage volume is created from - an image, if both image data and volume data reside in the - same GPFS file system, the data from image file is moved - efficiently to the volume file using copy-on-write - optimization strategy. -
-
- Enable the GPFS driver - To use the Block Storage service with the GPFS driver, first set the - volume_driver in - cinder.conf: - volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver - The following table contains the configuration options - supported by the GPFS driver. - - - The gpfs_images_share_mode - flag is only valid if the Image Service is configured to - use GPFS with the gpfs_images_dir flag. - When the value of this flag is - copy_on_write, the paths - specified by the - gpfs_mount_point_base and - gpfs_images_dir flags must both - reside in the same GPFS file system and in the same - GPFS file set. - -
-
- Volume creation options - It is possible to specify additional volume - configuration options on a per-volume basis by specifying - volume metadata. The volume is created using the specified - options. Changing the metadata after the volume is created - has no effect. The following table lists the volume - creation options supported by the GPFS volume - driver. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Volume Create Options for GPFS Volume - Drive
Metadata Item NameDescription
fstypeSpecifies whether to create a file system or a swap - area on the new volume. If - fstype=swap is - specified, the mkswap command is used to - create a swap area. Otherwise the mkfs command - is passed the specified file system type, for example - ext3, ext4 or ntfs.
fslabelSets the file system label for - the file system specified by fstype option. - This value is only used if fstype is - specified.
data_pool_name - Specifies the GPFS storage pool to which the volume is to be assigned. - Note: The GPFS storage pool must already have been - created. -
replicas - Specifies how many copies of the volume - file to create. Valid values are 1, 2, - and, for GPFS V3.5.0.7 and later, 3. This - value cannot be greater than the value of - the MaxDataReplicas attribute of the file - system. -
dio - Enables or disables the Direct I/O caching - policy for the volume file. Valid values - are yes and no. -
write_affinity_depth - Specifies the allocation policy to be used - for the volume file. Note: This option - only works if allow-write-affinity is - set for the GPFS data pool. -
block_group_factor - Specifies how many blocks are laid out - sequentially in the volume file to behave - as a single large block. Note: This option - only works if allow-write-affinity is - set for the GPFS data pool. -
write_affinity_failure_group - Specifies the range of nodes (in GPFS - shared nothing architecture) where - replicas of blocks in the volume file are - to be written. See GPFS: Administration and - Programming Reference for more - details on this option. -
- - Example: Volume creation options - This example shows the creation of a 50GB volume with an ext4 - file system labeled newfs and direct IO enabled: - $ cinder create --metadata fstype=ext4 fslabel=newfs dio=yes --display-name volume_1 50 - -
-
-Operational notes for GPFS driver - - Snapshots and clones - Volume snapshots are implemented using the GPFS file - clone feature. Whenever a new snapshot is created, the - snapshot file is efficiently created as a read-only - clone parent of the volume, and the volume file uses - copy-on-write optimization strategy to minimize data - movement. - Similarly when a new volume is created from a snapshot or from an existing volume, the - same approach is taken. The same approach is also used when a new volume is created - from an Image Service image, if the source image is in raw format, and - gpfs_images_share_mode is set to - copy_on_write. - -
-
diff --git a/doc/config-reference/block-storage/drivers/ibm-sonas-7k-driver.xml b/doc/config-reference/block-storage/drivers/ibm-sonas-7k-driver.xml deleted file mode 100644 index bfb0c918b4..0000000000 --- a/doc/config-reference/block-storage/drivers/ibm-sonas-7k-driver.xml +++ /dev/null @@ -1,16 +0,0 @@ -
- IBM SONAS and Storwise V7000 volume driver - - The IBM Storage Driver for OpenStack is a Block Storage driver - that supports IBM SONAS and V7000 storage systems over NFS. - - - Set the following in your cinder.conf, and - use the table of options to configure it. - - volume_driver = cinder.volume.drivers.ibm.ibmnas - -
diff --git a/doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml b/doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml deleted file mode 100644 index b35960b3c4..0000000000 --- a/doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml +++ /dev/null @@ -1,756 +0,0 @@ -
- IBM Storwize family and SVC volume driver - The volume management driver for Storwize family and SAN - Volume Controller (SVC) provides OpenStack Compute instances - with access to IBM Storwize family or SVC storage - systems. -
- Configure the Storwize family and SVC system - - Network configuration - The Storwize family or SVC system must be configured - for iSCSI, Fibre Channel, or both. - If using iSCSI, each Storwize family or SVC node - should have at least one iSCSI IP address. The IBM - Storwize/SVC driver uses an iSCSI IP address - associated with the volume's preferred node (if - available) to attach the volume to the instance, - otherwise it uses the first available iSCSI IP address - of the system. The driver obtains the iSCSI IP address - directly from the storage system; you do not need to - provide these iSCSI IP addresses directly to the - driver. - - If using iSCSI, ensure that the compute nodes - have iSCSI network access to the Storwize family - or SVC system. - - - OpenStack Nova's Grizzly version supports iSCSI - multipath. Once this is configured on the Nova - host (outside the scope of this documentation), - multipath is enabled. - - If using Fibre Channel (FC), each Storwize family or - SVC node should have at least one WWPN port - configured. If the - storwize_svc_multipath_enabled - flag is set to True in the Cinder configuration file, - the driver uses all available WWPNs to attach the - volume to the instance (details about the - configuration flags appear in the next - section). If the flag is not set, the - driver uses the WWPN associated with the volume's - preferred node (if available), otherwise it uses the - first available WWPN of the system. The driver obtains - the WWPNs directly from the storage system; you do not - need to provide these WWPNs directly to the - driver. - - If using FC, ensure that the compute nodes have - FC connectivity to the Storwize family or SVC - system. - - - - iSCSI CHAP authentication - If using iSCSI for data access and the - storwize_svc_iscsi_chap_enabled - is set to True, the driver will - associate randomly-generated CHAP secrets with all - hosts on the Storwize family system. OpenStack compute - nodes use these secrets when creating iSCSI - connections. - CHAP secrets are added to existing hosts as - well as newly-created ones. If the CHAP option - is enabled, hosts will not be able to access - the storage without the generated - secrets. - - - Not all OpenStack Compute drivers support - CHAP authentication. Please check - compatibility before using. - - - CHAP secrets are passed from OpenStack Block - Storage to Compute in clear text. This - communication should be secured to ensure that - CHAP secrets are not discovered. - - - - Configure storage pools - Each instance of the IBM Storwize/SVC driver - allocates all volumes in a single pool. The pool - should be created in advance and be provided to the - driver using the - storwize_svc_volpool_name - configuration flag. Details about the configuration - flags and how to provide the flags to the driver - appear in the next section. - - - Configure user authentication for the - driver - The driver requires access to the Storwize family or - SVC system management interface. The driver - communicates with the management using SSH. The driver - should be provided with the Storwize family or SVC - management IP using the san_ip - flag, and the management port should be provided by - the san_ssh_port flag. By default, - the port value is configured to be port 22 - (SSH). - - - Make sure the compute node running the cinder-volume - management driver has SSH network access to the - storage system. - - - To allow the driver to communicate with the Storwize - family or SVC system, you must provide the driver with - a user on the storage system. The driver has two - authentication methods: password-based authentication - and SSH key pair authentication. The user should have - an Administrator role. It is suggested to create a new - user for the management driver. Please consult with - your storage and security administrator regarding the - preferred authentication method and how passwords or - SSH keys should be stored in a secure manner. - - When creating a new user on the Storwize or SVC - system, make sure the user belongs to the - Administrator group or to another group that has - an Administrator role. - - If using password authentication, assign a password - to the user on the Storwize or SVC system. The driver - configuration flags for the user and password are - san_login and - san_password, - respectively. - If you are using the SSH key pair authentication, - create SSH private and public keys using the - instructions below or by any other method. Associate - the public key with the user by uploading the public - key: select the "choose file" option in the Storwize - family or SVC management GUI under "SSH public key". - Alternatively, you may associate the SSH public key - using the command line interface; details can be found - in the Storwize and SVC documentation. The private key - should be provided to the driver using the - san_private_key configuration - flag. - - - Create a SSH key pair with OpenSSH - You can create an SSH key pair using OpenSSH, by - running: - $ ssh-keygen -t rsa - The command prompts for a file to save the key pair. - For example, if you select 'key' as the filename, two - files are created: key and - key.pub. The - key file holds the private SSH - key and key.pub holds the public - SSH key. - The command also prompts for a pass phrase, which - should be empty. - The private key file should be provided to the - driver using the san_private_key - configuration flag. The public key should be uploaded - to the Storwize family or SVC system using the storage - management GUI or command line interface. - - Ensure that Cinder has read permissions on the - private key file. - - -
-
- Configure the Storwize family and SVC driver - - Enable the Storwize family and SVC driver - Set the volume driver to the Storwize family and SVC - driver by setting the volume_driver - option in cinder.conf as - follows: - volume_driver = cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver - - - Storwize family and SVC driver options in - cinder.conf - The following options specify default values for all - volumes. Some can be over-ridden using volume types, - which are described below. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
List of configuration flags for Storwize - storage and SVC driver
Flag nameTypeDefaultDescription
san_ip - RequiredManagement IP or host name -
san_ssh_port - Optional22Management port
san_login - RequiredManagement login username -
san_password - Required - The authentication requires - either a password - (san_password) - or SSH private key - (san_private_key). - One must be specified. If both are - specified, the driver uses only the - SSH private key. - Management login password -
san_private_key - Required Management login SSH private - key -
- storwize_svc_volpool_name - RequiredDefault pool name for - volumes
- storwize_svc_vol_rsize - Optional2Initial physical allocation - (percentage) - The driver creates - thin-provisioned volumes by - default. The - storwize_svc_vol_rsize - flag defines the initial physical - allocation percentage for - thin-provisioned volumes, or if set - to -1, the - driver creates full allocated - volumes. More details about the - available options are available in - the Storwize family and SVC - documentation. -
- storwize_svc_vol_warning - Optional0 (disabled)Space allocation warning threshold - (percentage)
- storwize_svc_vol_autoexpand - OptionalTrueEnable or disable volume auto expand - - Defines whether - thin-provisioned volumes can be - auto expanded by the storage - system, a value of - True means that - auto expansion is enabled, a value - of False - disables auto expansion. Details - about this option can be found in - the –autoexpand - flag of the Storwize family and SVC - command line interface - mkvdisk - command. -
- storwize_svc_vol_grainsize - Optional256Volume grain size in - KB
- storwize_svc_vol_compression - - OptionalFalseEnable or disable Real-time - Compression - Defines whether Real-time - Compression is used for the volumes - created with OpenStack. Details on - Real-time Compression can be found - in the Storwize family and SVC - documentation. The Storwize or SVC - system must have compression - enabled for this feature to - work. - -
- storwize_svc_vol_easytier - OptionalTrueEnable or disable Easy Tier - - Defines whether Easy Tier is - used for the volumes created with - OpenStack. Details on EasyTier can - be found in the Storwize family and - SVC documentation. The Storwize or - SVC system must have Easy Tier - enabled for this feature to - work. -
- storwize_svc_vol_iogrp - Optional0The I/O group in which to allocate - vdisks
- storwize_svc_flashcopy_timeout - - Optional120FlashCopy timeout threshold - - The driver wait timeout - threshold when creating an - OpenStack snapshot. This is - actually the maximum amount of time - that the driver waits for the - Storwize family or SVC system to - prepare a new FlashCopy mapping. - The driver accepts a maximum wait - time of 600 seconds (10 - minutes). - (seconds)
- storwize_svc_connection_protocol - - OptionaliSCSIConnection protocol to use - (currently supports 'iSCSI' or - 'FC')
- storwize_svc_iscsi_chap_enabled - - OptionalTrueConfigure CHAP authentication for - iSCSI connections
- storwize_svc_multipath_enabled - - OptionalFalseEnable multipath for FC connections - - Multipath for iSCSI - connections requires no - storage-side configuration and is - enabled if the compute host has - multipath configured. -
- storwize_svc_multihost_enabled - - OptionalTrueEnable mapping vdisks to multiple - hosts - This option allows the driver - to map a vdisk to more than one - host at a time. This scenario - occurs during migration of a - virtual machine with an attached - volume; the volume is - simultaneously mapped to both the - source and destination compute - hosts. If your deployment does not - require attaching vdisks to - multiple hosts, setting this flag - to False will provide added - safety. -
- storwize_svc_vol_nofmtdisk - - OptionalFalseEnable or disable fast format - - Defines whether or not - the fast formatting of - thick-provisioned volumes - is disabled at creation. - The default value is - False and - a value of True - means that fast format is - disabled. Details about this - option can be found in the - –nofmtdisk - flag of the Storwize family and SVC - command line interface - mkvdisk - command. -
- -
- - Placement with volume types - The IBM Storwize/SVC driver exposes capabilities - that can be added to the extra - specs of volume types, and used by the - filter scheduler to determine placement of new - volumes. Make sure to prefix these keys with - capabilities: to indicate that - the scheduler should use them. The following - extra specs are - supported: - - - capabilities:volume_back-end_name - Specify - a specific back-end where the volume should be - created. The back-end name is a concatenation - of the name of the IBM Storwize/SVC storage - system as shown in - lssystem, an underscore, - and the name of the pool (mdisk group). For - example: - capabilities:volume_back-end_name=myV7000_openstackpool - - - capabilities:compression_support - Specify a - back-end according to compression support. A - value of True should be - used to request a back-end that supports - compression, and a value of - False will request a - back-end that does not support compression. If - you do not have constraints on compression - support, do not set this key. Note that - specifying True does not - enable compression; it only requests that the - volume be placed on a back-end that supports - compression. Example syntax: - capabilities:compression_support='<is> True' - - - capabilities:easytier_support - Similar - semantics as the - compression_support - key, but for specifying according to support - of the Easy Tier feature. Example syntax: - capabilities:easytier_support='<is> True' - - - capabilities:storage_protocol - Specifies - the connection protocol used to attach volumes - of this type to instances. Legal values are - iSCSI and - FC. This extra - specs value is used for both - placement and setting the protocol used for - this volume. In the example syntax, note - <in> is used as opposed to <is> - used in the previous examples. - capabilities:storage_protocol='<in> FC' - - - - - Configure per-volume creation options - Volume types can also be used to pass options to the - IBM Storwize/SVC driver, which over-ride the default - values set in the configuration file. Contrary to the - previous examples where the "capabilities" scope was - used to pass parameters to the Cinder scheduler, - options can be passed to the IBM Storwize/SVC driver - with the "drivers" scope. - The following extra specs keys - are supported by the IBM Storwize/SVC driver: - - - rsize - - - warning - - - autoexpand - - - grainsize - - - compression - - - easytier - - - multipath - - - iogrp - - - These keys have the same semantics as their - counterparts in the configuration file. They are set - similarly; for example, rsize=2 or - compression=False. - - - Example: Volume types - In the following example, we create a volume type to - specify a controller that supports iSCSI and - compression, to use iSCSI when attaching the volume, - and to enable compression: - $ cinder type-create compressed -$ cinder type-key compressed set capabilities:storage_protocol='<in> iSCSI' capabilities:compression_support='<is> True' drivers:compression=True - We can then create a 50GB volume using this - type: - $ cinder create --display-name "compressed volume" --volume-type compressed 50 - - Volume types can be used, for example, to provide - users with different - - - performance levels (such as, allocating - entirely on an HDD tier, using Easy Tier for - an HDD-SDD mix, or allocating entirely on an - SSD tier) - - - resiliency levels (such as, allocating - volumes in pools with different RAID - levels) - - - features (such as, enabling/disabling - Real-time Compression) - - - - - QOS - The Storwize driver provides QOS support for storage - volumes by controlling the I/O amount. QOS is enabled by - editing the etc/cinder/cinder.conf file and setting the - storwize_svc_allow_tenant_qos to - True. - There are three ways to set the Storwize IOThrotting - parameter for storage volumes: - - - Add the qos:IOThrottling key - into a QOS specification and associate it with a volume - type. - - - Add the qos:IOThrottling key into - an extra specification with a volume type. - - - Add the qos:IOThrottling key to - the storage volume metadata. - - - - If you are changing a volume type with QOS to a new - volume type without QOS, the QOS configuration settings will - be removed. - - -
-
- Operational notes for the Storwize family and SVC - driver - - Migrate volumes - In the context of OpenStack Block Storage's volume - migration feature, the IBM Storwize/SVC driver enables - the storage's virtualization technology. When - migrating a volume from one pool to another, the - volume will appear in the destination pool almost - immediately, while the storage moves the data in the - background. - - To enable this feature, both pools involved in a - given volume migration must have the same values - for extent_size. If the pools - have different values for - extent_size, the data will - still be moved directly between the pools (not - host-side copy), but the operation will be - synchronous. - - - - Extend volumes - The IBM Storwize/SVC driver allows for extending a - volume's size, but only for volumes without - snapshots. - - - Snapshots and clones - Snapshots are implemented using FlashCopy with no - background copy (space-efficient). Volume clones - (volumes created from existing volumes) are - implemented with FlashCopy, but with background copy - enabled. This means that volume clones are - independent, full copies. While this background copy - is taking place, attempting to delete or extend the - source volume will result in that operation waiting - for the copy to complete. - - - Volume retype - The IBM Storwize/SVC driver enables you to modify - volume types. When you modify volume types, you can - also change these extra specs properties: - - - rsize - - - warning - - - autoexpand - - - grainsize - - - compression - - - easytier - - - iogrp - - - nofmtdisk - - - - When you change the rsize, - grainsize or - compression properties, - volume copies are asynchronously synchronized on - the array. - - - To change the iogrp property, - IBM Storwize/SVC firmware version 6.4.0 or later - is required. - - -
-
diff --git a/doc/config-reference/block-storage/drivers/ibm-xiv-volume-driver.xml b/doc/config-reference/block-storage/drivers/ibm-xiv-volume-driver.xml deleted file mode 100644 index 1fb4a38b05..0000000000 --- a/doc/config-reference/block-storage/drivers/ibm-xiv-volume-driver.xml +++ /dev/null @@ -1,30 +0,0 @@ -
- IBM XIV and DS8000 volume driver - - - The IBM Storage Driver for OpenStack is a Block Storage driver - that supports IBM XIV and IBM DS8000 storage systems over Fiber - channel and iSCSI. - - - Set the following in your cinder.conf, and - use the following options to configure it. - - volume_driver = cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver - - - - - To use the IBM Storage Driver for OpenStack you must download - and install the package available at: - http://www.ibm.com/support/fixcentral/swg/selectFixes?parent=Enterprise%2BStorage%2BServers&product=ibm/Storage_Disk/XIV+Storage+System+%282810,+2812%29&release=All&platform=All&function=all - - - - For full documentation refer to IBM's online documentation - available at http://pic.dhe.ibm.com/infocenter/strhosts/ic/topic/com.ibm.help.strghosts.doc/nova-homepage.html. - -
diff --git a/doc/config-reference/block-storage/drivers/lenovo-driver.xml b/doc/config-reference/block-storage/drivers/lenovo-driver.xml deleted file mode 100644 index 0af3b4b29a..0000000000 --- a/doc/config-reference/block-storage/drivers/lenovo-driver.xml +++ /dev/null @@ -1,253 +0,0 @@ - -
- Lenovo Fibre Channel and iSCSI drivers - - The LenovoFCDriver and - LenovoISCSIDriver Cinder drivers allow Lenovo S3200 or S2200 arrays - to be used for block storage in OpenStack deployments. - - - System requirements - - To use the Lenovo drivers, the following are required: - - - - Lenovo S3200 or S2200 array with: - - - - iSCSI or FC host interfaces - - - - G22x firmware or later - - - - - - Network connectivity between the OpenStack host and the array - management interfaces - - - - HTTPS or HTTP must be enabled on the array - - - - - - Supported operations - - - - Create, delete, attach, and detach volumes. - - - - Create, list, and delete volume snapshots. - - - - Create a volume from a snapshot. - - - - Copy an image to a volume. - - - - Copy a volume to an image. - - - - Clone a volume. - - - - Extend a volume. - - - - Migrate a volume with back-end assistance. - - - - Retype a volume. - - - - Manage and unmanage a volume. - - - - - - Configuring the array - - - - Verify that the array can be managed via an HTTPS connection. - HTTP can also be used if lenovo_api_protocol=http - is placed into the appropriate sections of the - cinder.conf file. - - Confirm that virtual pools A and B are present if you plan to - use virtual pools for OpenStack storage. - - - - Edit the cinder.conf file to define an - storage backend entry for each storage pool on the array that will be - managed by OpenStack. Each entry consists of a unique section name, - surrounded by square brackets, followed by options specified in - key=value format. - - - - The lenovo_backend_name value - specifies the name of the storage pool on the - array. - - - - The volume_backend_name option value - can be a unique value, if you wish to be able to assign volumes - to a specific storage pool on the array, or a name that's shared - among multiple storage pools to let the volume scheduler choose - where new volumes are allocated. - - - - The rest of the options will be repeated for each storage - pool in a given array: the appropriate Cinder driver name; IP - address or hostname of the array management interface; the - username and password of an array user account with - manage privileges; and the iSCSI IP addresses - for the array if using the iSCSI transport protocol. - - - - In the examples below, two backends are defined, one for pool A - and one for pool B, and a common - volume_backend_name is used so that a single volume - type definition can be used to allocate volumes from both - pools. - - - iSCSI example backend entries - - [pool-a] -lenovo_backend_name = A -volume_backend_name = lenovo-array -volume_driver = cinder.volume.drivers.lenovo.lenovo_iscsi.LenovoISCSIDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage -lenovo_iscsi_ips = 10.2.3.4,10.2.3.5 - -[pool-b] -lenovo_backend_name = B -volume_backend_name = lenovo-array -volume_driver = cinder.volume.drivers.lenovo.lenovo_iscsi.LenovoISCSIDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage -lenovo_iscsi_ips = 10.2.3.4,10.2.3.5 - - - - Fibre Channel example backend entries - - [pool-a] -lenovo_backend_name = A -volume_backend_name = lenovo-array -volume_driver = cinder.volume.drivers.lenovo.lenovo_fc.LenovoFCDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage - -[pool-b] -lenovo_backend_name = B -volume_backend_name = lenovo-array -volume_driver = cinder.volume.drivers.lenovo.lenovo_fc.LenovoFCDriver -san_ip = 10.1.2.3 -san_login = manage -san_password = !manage - - - - If HTTPS is not enabled in the array, include - lenovo_api_protocol = http in each of the backend - definitions. - - - - If HTTPS is enabled, you can enable certificate verification - with the option lenovo_verify_certificate=True. - You may also use the - lenovo_verify_certificate_path parameter to - specify the path to a CA_BUNDLE file containing CAs other than those - in the default list. - - - - Modify the [DEFAULT] section of the - cinder.conf file to add an - enabled_backends parameter specifying the backend - entries you added, and a default_volume_type - parameter specifying the name of a volume type that you will create in - the next step. - - - [DEFAULT] section changes - - [DEFAULT] - ... -enabled_backends = pool-a,pool-b -default_volume_type = lenovo - ... - - - - - Create a new volume type for each distinct - volume_backend_name value that you added to - cinder.conf. The example below assumes that the same - volume_backend_name=lenovo-array option was - specified in all of the entries, and specifies that the volume type - lenovo can be used to allocate volumes from any of - them. - Creating a volume type - - $ cinder type-create lenovo - - $ cinder type-key lenovo set - volume_backend_name=lenovo-array - - - - - After modifying cinder.conf, restart the - cinder-volume service. - - - - - - Driver-specific options - - The following table contains the configuration options that are - specific to the Lenovo drivers. - - - -
diff --git a/doc/config-reference/block-storage/drivers/lvm-volume-driver.xml b/doc/config-reference/block-storage/drivers/lvm-volume-driver.xml deleted file mode 100644 index 23cc9ac036..0000000000 --- a/doc/config-reference/block-storage/drivers/lvm-volume-driver.xml +++ /dev/null @@ -1,25 +0,0 @@ -
-LVM - The default volume back-end uses local volumes managed by LVM. - This driver supports different transport protocols to attach - volumes, currently iSCSI and iSER. - - Set the following in your - cinder.conf configuration file, and use the following - options to configure for iSCSI transport: - - - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - iscsi_protocol = iscsi - - Use the following options to configure for the iSER transport: - - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - iscsi_protocol = iser - - -
diff --git a/doc/config-reference/block-storage/drivers/netapp-volume-driver.xml b/doc/config-reference/block-storage/drivers/netapp-volume-driver.xml deleted file mode 100644 index 24f3f537ff..0000000000 --- a/doc/config-reference/block-storage/drivers/netapp-volume-driver.xml +++ /dev/null @@ -1,623 +0,0 @@ -
- NetApp unified driver - The NetApp unified driver is a block storage driver that - supports multiple storage families and protocols. A storage - family corresponds to storage systems built on different - NetApp technologies such as clustered Data ONTAP, Data ONTAP - operating in 7-Mode, and E-Series. The storage protocol refers - to the protocol used to initiate data storage and access - operations on those storage systems like iSCSI and NFS. The - NetApp unified driver can be configured to provision and manage - OpenStack volumes on a given storage family using a specified - storage protocol. The OpenStack volumes can then be used for - accessing and storing data using the storage protocol on the - storage family system. The NetApp unified driver is an - extensible interface that can support new storage families and - protocols. - - With the Juno release of OpenStack, OpenStack Block Storage - has introduced the concept of "storage pools", in which a single - OpenStack Block Storage back end may present one or more logical - storage resource pools from which OpenStack Block Storage will - select as a storage location when provisioning volumes. - - In releases prior to Juno, the NetApp unified driver contained some - "scheduling" logic that determined which NetApp storage container - (namely, a FlexVol volume for Data ONTAP, or a dynamic disk pool for - E-Series) that a new OpenStack Block Storage volume would be - placed into. - - With the introduction of pools, all scheduling logic - is performed completely within the OpenStack Block Storage scheduler, - as each NetApp storage container is directly exposed to the OpenStack - Block Storage scheduler as a storage pool; whereas previously, the - NetApp unified driver presented an aggregated view to the scheduler - and made a final placement decision as to which NetApp storage - container the OpenStack Block Storage volume would be provisioned - into. - - -
- NetApp clustered Data ONTAP storage family - The NetApp clustered Data ONTAP storage family - represents a configuration group which provides OpenStack - compute instances access to clustered Data ONTAP storage - systems. At present it can be configured in OpenStack Block Storage to work - with iSCSI and NFS storage protocols. -
- NetApp iSCSI configuration for clustered Data - ONTAP - The NetApp iSCSI configuration for clustered Data - ONTAP is an interface from OpenStack to clustered Data - ONTAP storage systems for provisioning and managing - the SAN block storage entity; that is, a NetApp LUN - which can be accessed using the iSCSI protocol. - The iSCSI configuration for clustered Data ONTAP is - a direct interface from OpenStack Block Storage to the clustered Data - ONTAP instance and as such does not require additional - management software to achieve the desired - functionality. It uses NetApp APIs to interact with - the clustered Data ONTAP instance. - - Configuration options for clustered Data ONTAP - family with iSCSI protocol - Configure the volume driver, storage family and - storage protocol to the NetApp unified driver, - clustered Data ONTAP, and iSCSI respectively by - setting the , - and - - options in cinder.conf as - follows: - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = ontap_cluster -netapp_storage_protocol = iscsi -netapp_vserver = openstack-vserver -netapp_server_hostname = myhostname -netapp_server_port = port -netapp_login = username -netapp_password = password - - - To use the iSCSI protocol, you must override the - default value of - with - iscsi. - - - - - If you specify an account in the - that only - has virtual storage server (Vserver) - administration privileges (rather than - cluster-wide administration privileges), some - advanced features of the NetApp unified driver - will not work and you may see warnings in the - OpenStack Block Storage logs. - - - For more information on these options and - other deployment and operational scenarios, - visit the - NetApp OpenStack Deployment and Operations Guide. - - - -
-
- NetApp NFS configuration for clustered Data - ONTAP - The NetApp NFS configuration for clustered Data - ONTAP is an interface from OpenStack to a clustered - Data ONTAP system for provisioning and managing - OpenStack volumes on NFS exports provided by the - clustered Data ONTAP system that are accessed using - the NFS protocol. - The NFS configuration for clustered Data ONTAP is a - direct interface from OpenStack Block Storage to the clustered Data - ONTAP instance and as such does not require any - additional management software to achieve the desired - functionality. It uses NetApp APIs to interact with - the clustered Data ONTAP instance. - - Configuration options for the clustered Data - ONTAP family with NFS protocol - Configure the volume driver, storage family, and - storage protocol to NetApp unified driver, - clustered Data ONTAP, and NFS respectively by - setting the , - and - - options in cinder.conf as - follows: - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = ontap_cluster -netapp_storage_protocol = nfs -netapp_vserver = openstack-vserver -netapp_server_hostname = myhostname -netapp_server_port = port -netapp_login = username -netapp_password = password -nfs_shares_config = /etc/cinder/nfs_shares - - - Additional NetApp NFS configuration options are - shared with the generic NFS driver. These options - can be found here: - . - - - - If you specify an account in the - that only - has virtual storage server (Vserver) - administration privileges (rather than - cluster-wide administration privileges), some - advanced features of the NetApp unified driver - will not work and you may see warnings in the - OpenStack Block Storage logs. - - - - NetApp NFS Copy Offload client - A feature was added in the Icehouse release of the NetApp unified - driver that enables Image Service images to be efficiently copied to a - destination Block Storage volume. When the Block Storage and Image Service are - configured to use the NetApp NFS Copy Offload client, a controller-side copy - will be attempted before reverting to downloading the image from the - Image Service. This improves image provisioning times while reducing the - consumption of bandwidth and CPU cycles on the host(s) running the Image - and Block Storage services. This is due to the copy operation being performed - completely within the storage cluster. - The NetApp NFS Copy Offload client can be used in either of the following - scenarios: - - - The Image Service is configured to store images in an NFS share that - is exported from a NetApp FlexVol volume and - the destination for the new Block Storage volume will be on an NFS share - exported from a different FlexVol volume than the one used by - the Image Service. Both FlexVols must be located within the same cluster. - - - The source image from the Image Service has already been cached in an NFS - image cache within a Block Storage backend. The cached image resides on a - different FlexVol volume than the destination for the new Block Storage - volume. Both FlexVols must be located within the same cluster. - - - To use this feature, you must configure the - Image Service, as follows: - - - Set the - configuration option to - file. - - - Set the - configuration option to the - path to the Image Service NFS export. - - - Set the - configuration option to - True. - - - Set the - configuration option to - True. - - - Set the - configuration option to a - metadata file. The metadata file should - contain a JSON object that contains the - correct information about the NFS export - used by the Image Service, similar - to: - { - "share_location": "nfs://192.168.0.1/myGlanceExport", - "mount_point": "/var/lib/glance/images", - "type": "nfs" -} - - - To use this feature, you must configure the - Block Storage service, as follows: - - - Set the - configuration option to - the path to the NetApp Copy Offload binary. - - - Set the - configuration option to - 2. - - - - This feature requires that: - - - The storage system must have Data - ONTAP v8.2 or greater installed. - - - The vStorage feature must be enabled - on each storage virtual machine (SVM, also - known as a Vserver) that is permitted to - interact with the copy offload - client. - - - To configure the copy offload workflow, - enable NFS v4.0 or greater - and export it from the SVM. - - - - - To download the NetApp copy offload binary to be utilized - in conjunction with the - configuration option, please visit the Utility Toolchest page at - the - NetApp Support portal (login is required). - - - For more information on these options and - other deployment and operational scenarios, - visit the - NetApp OpenStack Deployment and Operations Guide. - - - -
-
- NetApp-supported extra specs for clustered Data - ONTAP - Extra specs enable vendors to specify extra filter - criteria that the Block Storage scheduler uses when it - determines which volume node should fulfill a volume - provisioning request. When you use the NetApp unified - driver with a clustered Data ONTAP storage system, you - can leverage extra specs with OpenStack Block Storage volume types to - ensure that OpenStack Block Storage volumes are created on storage back - ends that have certain properties. For example, when - you configure QoS, mirroring, or compression for a - storage back end. - Extra specs are associated with OpenStack Block Storage volume types, - so that when users request volumes of a particular - volume type, the volumes are created on storage back - ends that meet the list of requirements. For example, - the back ends have the available space or extra specs. - You can use the specs in the following table when you - define OpenStack Block Storage volume types by using the - cinder type-key command. - -
-
-
- NetApp Data ONTAP operating in 7-Mode storage - family - The NetApp Data ONTAP operating in 7-Mode storage family - represents a configuration group which provides OpenStack - compute instances access to 7-Mode storage systems. At - present it can be configured in OpenStack Block Storage to work with iSCSI - and NFS storage protocols. -
- NetApp iSCSI configuration for Data ONTAP operating - in 7-Mode - The NetApp iSCSI configuration for Data ONTAP - operating in 7-Mode is an interface from OpenStack to - Data ONTAP operating in 7-Mode storage systems for - provisioning and managing the SAN block storage - entity, that is, a LUN which can be accessed using - iSCSI protocol. - The iSCSI configuration for Data ONTAP operating in - 7-Mode is a direct interface from OpenStack to Data - ONTAP operating in 7-Mode storage system and it does - not require additional management software to achieve - the desired functionality. It uses NetApp ONTAPI to - interact with the Data ONTAP operating in 7-Mode - storage system. - - Configuration options for the Data ONTAP - operating in 7-Mode storage family with iSCSI - protocol - Configure the volume driver, storage family and - storage protocol to the NetApp unified driver, - Data ONTAP operating in 7-Mode, and iSCSI - respectively by setting the - , - and - - options in cinder.conf as - follows: - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = ontap_7mode -netapp_storage_protocol = iscsi -netapp_server_hostname = myhostname -netapp_server_port = 80 -netapp_login = username -netapp_password = password - - - To use the iSCSI protocol, you must override the - default value of - with - iscsi. - - - - - For more information on these options and - other deployment and operational scenarios, - visit the - NetApp OpenStack Deployment and Operations Guide. - - - -
-
- NetApp NFS configuration for Data ONTAP operating - in 7-Mode - The NetApp NFS configuration for Data ONTAP - operating in 7-Mode is an interface from OpenStack to - Data ONTAP operating in 7-Mode storage system for - provisioning and managing OpenStack volumes on NFS - exports provided by the Data ONTAP operating in 7-Mode - storage system which can then be accessed using NFS - protocol. - The NFS configuration for Data ONTAP operating in - 7-Mode is a direct interface from OpenStack Block Storage to the Data - ONTAP operating in 7-Mode instance and as such does - not require any additional management software to - achieve the desired functionality. It uses NetApp - ONTAPI to interact with the Data ONTAP operating in - 7-Mode storage system. - - Configuration options for the Data ONTAP - operating in 7-Mode family with NFS - protocol - Configure the volume driver, storage family, and - storage protocol to the NetApp unified driver, - Data ONTAP operating in 7-Mode, and NFS - respectively by setting the - , - and - - options in cinder.conf as - follows: - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = ontap_7mode -netapp_storage_protocol = nfs -netapp_server_hostname = myhostname -netapp_server_port = 80 -netapp_login = username -netapp_password = password -nfs_shares_config = /etc/cinder/nfs_shares - - - Additional NetApp NFS configuration options - are shared with the generic NFS driver. For a - description of these, see - . - - - - For more information on these options and - other deployment and operational scenarios, - visit the - NetApp OpenStack Deployment and Operations Guide. - - - -
-
-
- NetApp E-Series storage family - The NetApp E-Series storage family represents a - configuration group which provides OpenStack - compute instances access to E-Series storage systems. - At present it can be configured in OpenStack Block Storage to work - with the iSCSI storage protocol. -
- NetApp iSCSI configuration for E-Series - The NetApp iSCSI configuration for E-Series - is an interface from OpenStack to E-Series storage - systems for provisioning and managing the SAN block - storage entity; that is, a NetApp LUN which can be - accessed using the iSCSI protocol. - The iSCSI configuration for E-Series is an interface - from OpenStack Block Storage to the E-Series proxy instance and as such - requires the deployment of the proxy instance in order to - achieve the desired functionality. The driver uses REST - APIs to interact with the E-Series proxy instance, which in - turn interacts directly with the E-Series controllers. - The use of multipath and DM-MP are required when using the - OpenStack Block Storage driver for E-Series. In order for - OpenStack Block Storage and OpenStack Compute to take advantage - of multiple paths, the following configuration options must be - correctly configured: - - - The option - should be set to True in the - cinder.conf file within the - driver-specific stanza (for example, - [myDriver]). - - - The option - should be set to True in the - nova.conf file within the - [libvirt] stanza. - - - - Configuration options for E-Series storage - family with iSCSI protocol - Configure the volume driver, storage family, and - storage protocol to the NetApp unified driver, - E-Series, and iSCSI respectively by setting the - , - and - - options in cinder.conf as - follows: - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = eseries -netapp_storage_protocol = iscsi -netapp_server_hostname = myhostname -netapp_server_port = 80 -netapp_login = username -netapp_password = password -netapp_controller_ips = 1.2.3.4,5.6.7.8 -netapp_sa_password = arrayPassword -netapp_storage_pools = pool1,pool2 -use_multipath_for_image_xfer = True - - - To use the E-Series driver, you must override - the default value of - with - eseries. - - - - - To use the iSCSI protocol, you must override the - default value of - with - iscsi. - - - - - For more information on these options and - other deployment and operational scenarios, - visit the - NetApp OpenStack Deployment and Operations Guide. - - - -
-
-
- Upgrading prior NetApp drivers to the NetApp unified - driver - NetApp introduced a new unified block storage driver in - Havana for configuring different storage families and - storage protocols. This requires defining upgrade path for - NetApp drivers which existed in releases prior to Havana. - This section covers the upgrade configuration for NetApp - drivers to the new unified configuration and a list of - deprecated NetApp drivers. -
- Upgraded NetApp drivers - This section describes how to update OpenStack Block Storage - configuration from a pre-Havana release to the - unified driver format. - - Driver upgrade configuration - - - NetApp iSCSI direct driver for Clustered - Data ONTAP in Grizzly (or earlier). - volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver - NetApp unified driver - configuration. - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = ontap_cluster -netapp_storage_protocol = iscsi - - - NetApp NFS direct driver for Clustered - Data ONTAP in Grizzly (or earlier). - volume_driver = cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver - NetApp unified driver - configuration. - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = ontap_cluster -netapp_storage_protocol = nfs - - - NetApp iSCSI direct driver for Data - ONTAP operating in 7-Mode storage - controller in Grizzly (or earlier) - volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver - NetApp unified driver - configuration - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = ontap_7mode -netapp_storage_protocol = iscsi - - - NetApp NFS direct driver for Data ONTAP - operating in 7-Mode storage controller in - Grizzly (or earlier) - volume_driver = cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver - NetApp unified driver - configuration - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver -netapp_storage_family = ontap_7mode -netapp_storage_protocol = nfs - - - - -
-
- Deprecated NetApp drivers - This section lists the NetApp drivers in earlier - releases that are deprecated in Havana. - - - NetApp iSCSI driver for clustered Data - ONTAP. - volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver - - - NetApp NFS driver for clustered Data - ONTAP. - volume_driver = cinder.volume.drivers.netapp.nfs.NetAppCmodeNfsDriver - - - NetApp iSCSI driver for Data ONTAP operating - in 7-Mode storage controller. - volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver - - - NetApp NFS driver for Data ONTAP operating - in 7-Mode storage controller. - volume_driver = cinder.volume.drivers.netapp.nfs.NetAppNFSDriver - - - - For support information on deprecated NetApp drivers - in the Havana release, visit the - NetApp OpenStack Deployment and Operations Guide. - - -
-
-
diff --git a/doc/config-reference/block-storage/drivers/nfs-volume-driver.xml b/doc/config-reference/block-storage/drivers/nfs-volume-driver.xml deleted file mode 100644 index 4ea3fd718e..0000000000 --- a/doc/config-reference/block-storage/drivers/nfs-volume-driver.xml +++ /dev/null @@ -1,164 +0,0 @@ -
- NFS driver - The Network File System (NFS) is a distributed file system - protocol originally developed by Sun Microsystems in 1984. An - NFS server exports one or more of its - file systems, known as shares. An NFS - client can mount these exported shares on its own file system. - You can perform file actions on this mounted remote file - system as if the file system were local. -
- How the NFS driver works - The NFS driver, and other drivers based on it, work - quite differently than a traditional block storage - driver. - The NFS driver does not actually allow an instance to - access a storage device at the block level. Instead, files - are created on an NFS share and mapped to instances, which - emulates a block device. This works in a similar way to - QEMU, which stores instances in the - /var/lib/nova/instances - directory. -
-
- Enable the NFS driver and related options - To use Cinder with the NFS driver, first set the - volume_driver in - cinder.conf: - volume_driver=cinder.volume.drivers.nfs.NfsDriver - The following table contains the options supported by - the NFS driver. - - - As of the Icehouse release, the NFS driver (and other - drivers based off it) will attempt to mount shares - using version 4.1 of the NFS protocol (including pNFS). If the - mount attempt is unsuccessful due to a lack of client or - server support, a subsequent mount attempt that requests the - default behavior of the mount.nfs command - will be performed. On most distributions, the default behavior - is to attempt mounting first with NFS v4.0, then silently fall - back to NFS v3.0 if necessary. If the - configuration option - contains a request for a specific version of NFS to be used, - or if specific options are specified in the shares - configuration file specified by the - configuration option, the - mount will be attempted as requested with no subsequent - attempts. - -
-
- How to use the NFS driver - - - Access to one or more NFS servers. Creating an - NFS server is outside the scope of this document. - This example assumes access to the following NFS - servers and mount points: - - - 192.168.1.200:/storage - - - 192.168.1.201:/storage - - - 192.168.1.202:/storage - - - This example demonstrates the use of with this - driver with multiple NFS servers. Multiple servers - are not required. One is usually enough. - - - Add your list of NFS servers to the file you - specified with the - nfs_shares_config option. - For example, if the value of this option was set - to /etc/cinder/shares.txt, - then: - # cat /etc/cinder/shares.txt -192.168.1.200:/storage -192.168.1.201:/storage -192.168.1.202:/storage - Comments are allowed in this file. They begin - with a #. - - - Configure the - nfs_mount_point_base - option. This is a directory where cinder-volume - mounts all NFS shares stored in - shares.txt. For this - example, /var/lib/cinder/nfs is - used. You can, of course, use the default value of - $state_path/mnt. - - - Start the cinder-volume service. - /var/lib/cinder/nfs should - now contain a directory for each NFS share - specified in shares.txt. The - name of each directory is a hashed name: - # ls /var/lib/cinder/nfs/ -... -46c5db75dc3a3a50a10bfd1a456a9f3f -... - - - You can now create volumes as you normally - would: - $ nova volume-create --display-name myvol 5 -# ls /var/lib/cinder/nfs/46c5db75dc3a3a50a10bfd1a456a9f3f -volume-a8862558-e6d6-4648-b5df-bb84f31c8935 - This volume can also be attached and deleted - just like other volumes. However, snapshotting is - not supported. - - -
- - NFS driver notes - - - cinder-volume manages the - mounting of the NFS shares as well as volume - creation on the shares. Keep this in mind when - planning your OpenStack architecture. If you have - one master NFS server, it might make sense to only - have one cinder-volume service to handle - all requests to that NFS server. However, if that - single server is unable to handle all requests, - more than one cinder-volume service is needed - as well as potentially more than one NFS - server. - - - Because data is stored in a file and not - actually on a block storage device, you might not - see the same IO performance as you would with a - traditional block storage driver. Please test - accordingly. - - - Despite possible IO performance loss, having - volume data stored in a file might be beneficial. - For example, backing up volumes can be as easy as - copying the volume files. - - Regular IO flushing and syncing still - stands. - - - - -
diff --git a/doc/config-reference/block-storage/drivers/nimble-volume-driver.xml b/doc/config-reference/block-storage/drivers/nimble-volume-driver.xml deleted file mode 100644 index adea634498..0000000000 --- a/doc/config-reference/block-storage/drivers/nimble-volume-driver.xml +++ /dev/null @@ -1,175 +0,0 @@ - -
- Nimble Storage volume driver - - Nimble Storage fully integrates with the OpenStack platform through - the Nimble Cinder driver, allowing a host to configure and manage Nimble - Storage array features through Block Storage interfaces. - Support for the Liberty release is available from Nimble OS 2.3.8 or - later. -
- Supported operations - - - Create, delete, clone, attach, and detach volumes - - - Create and delete volume snapshots - - - Create a volume from a snapshot - - - Copy an image to a volume - - - Copy a volume to an image - - - Extend a volume - - - Get volume statistics - - - Manage and unmanage a volume - - - Enable encryption and default performance policy for a volume-type - using extra-specs - - - - The Nimble Storage implementation uses iSCSI only. Fibre Channel - is not supported. - -
-
- Nimble Storage driver configuration - Update the file /etc/cinder/cinder.conf with - the given configuration. - In case of a basic (single back-end) configuration, add the - parameters within the [default] section as follows. - - -[default] -san_ip = NIMBLE_MGMT_IP -san_login = NIMBLE_USER -san_password = NIMBLE_PASSWORD -volume_driver = cinder.volume.drivers.nimble.NimbleISCSIDriver - - In case of multi back-end configuration, for example, configuration - which supports multiple Nimble Storage arrays or a single Nimble Storage - array with arrays from other vendors, use the following parameters. - - -[default] -enabled_backends = Nimble-Cinder - -[Nimble-Cinder] -san_ip = NIMBLE_MGMT_IP -san_login = NIMBLE_USER -san_password = NIMBLE_PASSWORD -volume_driver = cinder.volume.drivers.nimble.NimbleISCSIDriver -volume_backend_name = NIMBLE_BACKEND_NAME - - In case of multi back-end configuration, Nimble Storage volume-type - is created and associated with a back-end name as follows. - - Single back-end configuration users do not need to create the - volume-type. - - $ cinder type-create - NIMBLE_VOLUME_TYPE -$ cinder type-key NIMBLE_VOLUME_TYPE - set volume_backend_name=NIMBLE_BACKEND_NAME - - This section explains the variables used above: - - - NIMBLE_MGMT_IP - - Management IP address of Nimble Storage array/group. - - - - NIMBLE_USER - - Nimble Storage account login with minimum "power user"(admin) - privilege if RBAC is used. - - - - NIMBLE_PASSWORD - - Password of the admin account for nimble array. - - - - NIMBLE_BACKEND_NAME - - A volume back-end name which is specified in - cinder.conf. This is also used while assigning a back- - end name to the Nimble volume-type. - - - - NIMBLE_VOLUME_TYPE - - The Nimble volume-type which is created from the CLI and - associated with NIMBLE_BACKEND_NAME. - - - - - - Restart the cinder-api, cinder-scheduler, and cinder-volume - services after updating the cinder.conf. - -
-
- Nimble driver extra spec options - The Nimble volume driver also supports the following extra spec - options: - - - - 'nimble:encryption'='yes' - - Used to enable encryption for a volume-type - - - - 'nimble:perfpol-name'=PERF_POL_NAME - - - PERF_POL_NAME is the name of a performance policy which exists - on the Nimble array and should be enabled for every volume in a - volume-type - - - - 'nimble:multi-initiator'='true' - - Used to enable multi-initiator access for a volume-type - - - - These extra-specs can be enabled by using the following command: - - $ cinder type-key \ - VOLUME_TYPE set KEY=\ - VALUE - VOLUME_TYPE is the Nimble volume-type and KEY and VALUE are the - options mentioned above. -
-
- Configuration options - The Nimble storage driver supports these configuration options: - - -
-
diff --git a/doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml b/doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml deleted file mode 100644 index 92f0af93e1..0000000000 --- a/doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml +++ /dev/null @@ -1,119 +0,0 @@ -
- ProphetStor Fibre Channel and iSCSI drivers - ProhetStor Fibre Channel and iSCSI drivers add support for - ProphetStor Flexvisor through OpenStack Block Storage. ProphetStor - Flexvisor enables commodity x86 hardware as software-defined storage - leveraging well-proven ZFS for disk management to provide enterprise - grade storage services such as snapshots, data protection with - different RAID levels, replication, and deduplication. - The DPLFCDriver and - DPLISCSIDriver drivers run - volume operations by communicating with the ProphetStor storage - system over HTTPS. -
- Supported operations - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - -
-
- Enable the Fibre Channel or iSCSI drivers - The DPLFCDriver and - DPLISCSIDriver are installed - with the OpenStack software. - - - Query storage pool id for configure dpl_pool of the - cinder.conf. - - - Logon onto the storage system with administrator - access. - $ ssh root@STORAGE IP ADDRESS - - - View the current usable pool id. - $ flvcli show pool list -- d5bd40b58ea84e9da09dcf25a01fdc07 : default_pool_dc07 - - - Use d5bd40b58ea84e9da09dcf25a01fdc07 - to config the dpl_pool of /etc/cinder/cinder.conf. - - - - Other management command can reference by command help flvcli -h. - - - - Make the following changes on the volume node - /etc/cinder/cinder.conf - file. - # IP address of SAN controller (string value) -san_ip=STORAGE IP ADDRESS - -# Username for SAN controller (string value) -san_login=USERNAME - -# Password for SAN controller (string value) -san_password=PASSWORD - -# Use thin provisioning for SAN volumes? (boolean value) -san_thin_provision=true - -# The port that the iSCSI daemon is listening on. (integer value) -iscsi_port=3260 - -# DPL pool uuid in which DPL volumes are stored. (string value) -dpl_pool=d5bd40b58ea84e9da09dcf25a01fdc07 - -# DPL port number. (integer value) -dpl_port=8357 - -# Uncomment one of the next two option to enable Fibre channel or iSCSI -# FIBRE CHANNEL(uncomment the next line to enable the FC driver) -#volume_driver=cinder.volume.drivers.prophetstor.dpl_fc.DPLFCDriver -# iSCSI (uncomment the next line to enable the iSCSI driver) -#volume_driver=cinder.volume.drivers.prophetstor.dpl_iscsi.DPLISCSIDriver - - - Save the changes to the - /etc/cinder/cinder.conf file and - restart the cinder-volume service. - - - The ProphetStor Fibre Channel or iSCSI drivers are now - enabled on your OpenStack system. If you experience - problems, review the Block Storage service log files for - errors. - The following table contains the options supported - by the ProphetStor storage driver. - -
-
diff --git a/doc/config-reference/block-storage/drivers/pure-storage-driver.xml b/doc/config-reference/block-storage/drivers/pure-storage-driver.xml deleted file mode 100644 index 812bf55691..0000000000 --- a/doc/config-reference/block-storage/drivers/pure-storage-driver.xml +++ /dev/null @@ -1,162 +0,0 @@ - -
- Pure Storage iSCSI and Fibre Channel volume drivers - - The Pure Storage FlashArray volume drivers for OpenStack Block Storage interact with - configured Pure Storage arrays and support various operations. - Support for iSCSI storage protocol is available with the PureISCSIDriver Volume Driver class, - and Fibre Channel with PureFCDriver. - All drivers are compatible with Purity FlashArrays that support the REST API - version 1.2, 1.3, or 1.4 (Purity 4.0.0 and newer). -
- Limitations and known issues - If you do not set up the nodes hosting instances to use multipathing, all network - connectivity will use a single physical port on the array. In addition to - significantly limiting the available bandwidth, this means you do not have the - high-availability and non-disruptive upgrade benefits provided by FlashArray. - Workaround: You must set up multipathing on your hosts. -
-
- Supported operations - - - Create, delete, attach, detach, retype, clone, and extend volumes. - - - Create a volume from snapshot. - - - Create, list, and delete volume snapshots. - - - Create, list, update, and delete consistency groups. - - - Create, list, and delete consistency group snapshots. - - - Manage and unmanage a volume. - - - Manage and unmanage a snapshot. - - - Get volume statistics. - - - Create a thin provisioned volume. - - -
-
- Configure OpenStack and Purity - You need to configure both your Purity array and your OpenStack cluster. - - These instructions assume that the cinder-api and cinder-scheduler services are installed and configured in your OpenStack cluster. - - - - Configure the OpenStack Block Storage service - In these steps, you will edit the cinder.conf file to configure OpenStack Block Storage - service to enable multipathing and to use the Pure Storage FlashArray as back-end storage. - - - Retrieve an API token from Purity - The OpenStack Block Storage service configuration requires an API token from Purity. Actions - performed by the volume driver use this token for authorization. Also, Purity logs the - volume driver's actions as being performed by the user who owns this API token. - If you created a Purity user account that is dedicated to managing your OpenStack - Block Storage volumes, copy the API token from that user account. - Use the appropriate create or list command below to display and copy the Purity API - token: - - - To create a new API token: - $ pureadmin create --api-token USER - The following is an example output: - - $ pureadmin create --api-token pureuser -Name API Token Created -pureuser 902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9 2014-08-04 14:50:30 - - - - To list an existing API token: - $ pureadmin list --api-token --expose USER - The following is an example output: - - $ pureadmin list --api-token --expose pureuser -Name API Token Created -pureuser 902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9 2014-08-04 14:50:30 - - - - - - Copy the API token retrieved - (902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9 from the examples above) to use in the next step. - - - Edit the OpenStack Block Storage service configuration file - The following sample /etc/cinder/cinder.conf configuration lists - the relevant settings for a typical Block Storage service using a single Pure Storage - array: - -[DEFAULT] -.... -enabled_backends = puredriver-1 -default_volume_type = puredriver-1 -.... - -[puredriver-1] -volume_backend_name = puredriver-1 -volume_driver = PURE_VOLUME_DRIVER -san_ip = IP_PURE_MGMT -pure_api_token = PURE_API_TOKEN -use_multipath_for_image_xfer = True - Replace the following variables accordingly: - - - PURE_VOLUME_DRIVER - - Use either cinder.volume.drivers.pure.PureISCSIDriver for iSCSI - or cinder.volume.drivers.pure.PureFCDriver for Fibre Channel connectivity. - - - - IP_PURE_MGMT - - The IP address of the Pure Storage array's management interface or a domain name - that resolves to that IP address. - - - - PURE_API_TOKEN - - The Purity Authorization token that the volume driver uses to perform volume - management on the Pure Storage array. - - - - - - - - - The volume driver automatically creates purity host objects for initiators as needed. If - CHAP authentication is enabled via the setting, you must ensure - there are no manually created host objects with IQN's that will be used by the OpenStack Block - Storage. The driver will only modify credentials on hosts that it manages. - - - - If using the PureFCDriver it is recommended to use the OpenStack Block Storage Fibre Channel Zone Manager. - - -
-
diff --git a/doc/config-reference/block-storage/drivers/quobyte-driver.xml b/doc/config-reference/block-storage/drivers/quobyte-driver.xml deleted file mode 100644 index a7d8013c85..0000000000 --- a/doc/config-reference/block-storage/drivers/quobyte-driver.xml +++ /dev/null @@ -1,55 +0,0 @@ -
- Quobyte driver - - The Quobyte volume - driver enables storing Block Storage service volumes on a Quobyte storage - back end. Block Storage service back ends are mapped to Quobyte volumes - and individual Block Storage service volumes are stored as files on a - Quobyte volume. Selection of the appropriate Quobyte volume is done by the - aforementioned back end configuration that specifies the Quobyte volume - explicitely. - - Note the dual use of the term 'volume' in the context of Block - Storage service volumes and in the context of Quobyte volumes. - - - For more information see - the Quobyte support webpage - . - - The Quobyte volume driver supports the following volume operations: - - - Create, delete, attach, and detach volumes - - Secure NAS operation (Starting with Mitaka release - secure NAS operation is optional but still default) - Create and delete a snapshot - Create a volume from a snapshot - Extend a volume - Clone a volume - Copy a volume to image - Generic volume migration (no back end optimization) - - - - - When running VM instances off Quobyte volumes, ensure that the - - Quobyte Compute service driver has been configured in your - OpenStack cloud. - - To activate the Quobyte volume driver, configure the corresponding - volume_driver parameter: - volume_driver = cinder.volume.drivers.quobyte.QuobyteDriver - - - The following table contains the configuration options - supported by the Quobyte driver. - - -
diff --git a/doc/config-reference/block-storage/drivers/scality-sofs-driver.xml b/doc/config-reference/block-storage/drivers/scality-sofs-driver.xml deleted file mode 100644 index ebe097e0d6..0000000000 --- a/doc/config-reference/block-storage/drivers/scality-sofs-driver.xml +++ /dev/null @@ -1,76 +0,0 @@ - -
- Scality SOFS driver - The Scality SOFS volume driver interacts with configured - sfused mounts. - The Scality SOFS driver manages volumes as sparse files stored on - a Scality Ring through sfused. Ring connection settings and sfused options - are defined in the cinder.conf file and the - configuration file pointed to by the - option, typically /etc/sfused.conf. - - - Supported operations - The Scality SOFS volume driver provides the following - Block Storage volume operations: - - - Create, delete, attach (map), and detach (unmap) volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - Backup a volume. - - - Restore backup to new or existing volume. - - - - - Configuration - Use the following instructions to update the - cinder.conf configuration file: - Sample Block Storage Configuration - [DEFAULT] -enabled_backends = scality-1 - -[scality-1] -volume_driver=cinder.volume.drivers.scality.ScalityDriver -volume_backend_name = scality-1 - -scality_sofs_config=/etc/sfused.conf -scality_sofs_mount_point=/cinder -scality_sofs_volume_dir=cinder/volumes - - - Use the following instructions to update the - nova.conf configuration file: - Sample Compute Configuration - [libvirt] -scality_sofs_mount_point = /cinder -scality_sofs_config = /etc/sfused.conf - - - - -
diff --git a/doc/config-reference/block-storage/drivers/sheepdog-driver.xml b/doc/config-reference/block-storage/drivers/sheepdog-driver.xml deleted file mode 100644 index afc8a83f8d..0000000000 --- a/doc/config-reference/block-storage/drivers/sheepdog-driver.xml +++ /dev/null @@ -1,55 +0,0 @@ -
- Sheepdog driver - Sheepdog is an open-source distributed storage system - that provides a virtual storage pool utilizing internal disk of - commodity servers. - Sheepdog scales to several hundred nodes, and has powerful - virtual disk management features like snapshotting, cloning, rollback, - and thin provisioning. - More information can be found on Sheepdog Project. - This driver enables the use of Sheepdog through Qemu/KVM. - -
- Supported operations - Sheepdog driver supports these operations: - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - -
- -
- Configuration - Set the following - volume_driver in - cinder.conf: - volume_driver=cinder.volume.drivers.sheepdog.SheepdogDriver - The following table contains the configuration options - supported by the Sheepdog driver. - -
-
diff --git a/doc/config-reference/block-storage/drivers/smbfs-volume-driver.xml b/doc/config-reference/block-storage/drivers/smbfs-volume-driver.xml deleted file mode 100644 index 9d5f789b0b..0000000000 --- a/doc/config-reference/block-storage/drivers/smbfs-volume-driver.xml +++ /dev/null @@ -1,17 +0,0 @@ -
-SambaFS driver - There is a volume back-end for Samba filesystems. Set the following in your - cinder.conf, and use the following options to configure it. - - - The SambaFS driver requires qemu-img version - 1.7 or higher on Linux nodes, and qemu-img> - version 1.6 or higher on Windows nodes. - - volume_driver=cinder.volume.drivers.smbfs.SmbfsDriver - -
diff --git a/doc/config-reference/block-storage/drivers/solidfire-volume-driver.xml b/doc/config-reference/block-storage/drivers/solidfire-volume-driver.xml deleted file mode 100644 index 92fd726116..0000000000 --- a/doc/config-reference/block-storage/drivers/solidfire-volume-driver.xml +++ /dev/null @@ -1,37 +0,0 @@ -
- SolidFire - The SolidFire Cluster is a high performance all SSD iSCSI - storage device that provides massive scale out capability and - extreme fault tolerance. A key feature of the SolidFire - cluster is the ability to set and modify during operation - specific QoS levels on a volume for volume basis. The - SolidFire cluster offers this along with de-duplication, - compression, and an architecture that takes full advantage of - SSDs. - To configure the use of a SolidFire cluster with Block - Storage, modify your cinder.conf file as - follows: - volume_driver = cinder.volume.drivers.solidfire.SolidFireDriver -san_ip = 172.17.1.182 # the address of your MVIP -san_login = sfadmin # your cluster admin login -san_password = sfpassword # your cluster admin password -sf_account_prefix = '' # prefix for tenant account creation on solidfire cluster - - Older versions of the SolidFire driver (prior to Icehouse) - created a unique account prefixed with - $cinder-volume-service-hostname-$tenant-id - on the SolidFire cluster for each tenant. Unfortunately, this - account formation resulted in issues for High Availability - (HA) installations and installations where the cinder-volume service can - move to a new node. The current default implementation does - not experience this issue as no prefix is used. For installations - created on a prior release, the OLD default behavior can be - configured by using the keyword "hostname" in sf_account_prefix. - - - -
diff --git a/doc/config-reference/block-storage/drivers/tintri-volume-driver.xml b/doc/config-reference/block-storage/drivers/tintri-volume-driver.xml deleted file mode 100644 index b641763fa9..0000000000 --- a/doc/config-reference/block-storage/drivers/tintri-volume-driver.xml +++ /dev/null @@ -1,57 +0,0 @@ -
- Tintri - Tintri VMstore is a smart storage that sees, learns and adapts - for cloud and virtualization. The Tintri Cinder driver will interact - with configured VMstore running Tintri OS 4.0 and above. It supports - various operations using Tintri REST APIs and NFS protocol. - - To configure the use of a Tintri VMstore with Block - Storage, perform the following actions: - - Edit the etc/cinder/cinder.conf file - and set the cinder.volume.drivers.tintri options: - volume_driver=cinder.volume.drivers.tintri.TintriDriver -# Mount options passed to the nfs client. See section of the -# nfs man page for details. (string value) -nfs_mount_options=vers=3,lookupcache=pos - -# -# Options defined in cinder.volume.drivers.tintri -# - -# The hostname (or IP address) for the storage system (string -# value) -tintri_server_hostname={Tintri VMstore Management IP} - -# User name for the storage system (string value) -tintri_server_username={username} - -# Password for the storage system (string value) -tintri_server_password={password} - -# API version for the storage system (string value) -#tintri_api_version=v310 - -# Following options needed for NFS configuration -# File with the list of available nfs shares (string value) -#nfs_shares_config=/etc/cinder/nfs_shares - - - Edit the etc/nova/nova.conf file, and - set the : - nfs_mount_options=vers=3 - - - Edit the /etc/cinder/nfs_shares file, - and add the Tintri VMstore mount points associated with - the configured VMstore management IP in the cinder.conf - file: - {vmstore_data_ip}:/tintri/{submount1} -{vmstore_data_ip}:/tintri/{submount2} - - - -
diff --git a/doc/config-reference/block-storage/drivers/violin-v6000-driver.xml b/doc/config-reference/block-storage/drivers/violin-v6000-driver.xml deleted file mode 100644 index 7c7cca1c48..0000000000 --- a/doc/config-reference/block-storage/drivers/violin-v6000-driver.xml +++ /dev/null @@ -1,251 +0,0 @@ - -
- - Violin Memory 6000 series AFA volume driver - - The OpenStack V6000 driver package from Violin Memory adds block - storage service support for Violin 6000 Series All Flash Arrays. - - - The driver package release can be used with any OpenStack Liberty - deployment for all 6000 series all-flash arrays for release 6.3.1 - and later using either Fibre Channel or iSCSI HBAs. - - - - The Violin 6000 series AFA driver is recommended as an - evaluation product only, for existing 6000 series customers. - The driver will be deprecated or removed in the next OpenStack - release. Future development and support will be focused on the - 7000 series FSP driver only. - - - -
- System requirements - To use the Violin driver, the following are required: - - - Violin 6000 series AFA with: - - - Concerto OS version 6.3.1 or later - - - iSCSI or FC host interfaces - - - Storage network connectivity between all target and - initiator ports - - - - - - - The Violin volume driver: The driver implements the - Block Storage API calls. Both FC and iSCSI driver support - is included with the OpenStack Liberty release. - - - - - The vmemclient library: This is the Violin Array - Communications library to the Flash Storage Platform through - a REST-like interface. The client can be installed using - the python 'pip' installer tool. Further information on - vmemclient can be found here: PyPI - . - - -pip install vmemclient - - - -
- -
- Supported operations - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - - - All listed operations are supported for both thick and - thin LUNs. However, over-subscription is not supported. - - -
- -
- Array configuration - - After installing and configuring your V6000 array per the - installation guide provided with your array, please follow these - additional steps to prepare your array for use with OpenStack. - - - - - Ensure your client initiator interfaces are all zoned or - VLAN'd so that they can communicate with ALL of the target - ports on the array. See your array installation or user - guides for more information. - - - - Set the array's provisioning threshold value. - -container set name CONTAINER_NAME provision-threshold hard 100 - - - - Set the array's used-space threshold value. - -container set name CONTAINER_NAME usedspace-threshold hard 0 - - - -
- -
- Driver configuration - - Once the array is configured, it is simply a matter of editing - the cinder configuration file to add or modify the parameters. - Contents will differ depending on whether you are setting up a - fibre channel or iSCSI environment. - - -
- Fibre channel configuration - - Set the following in your cinder.conf - configuration file for setup of a Fibre channel array, - replacing the variables using the guide in the following - section: - - -volume_driver = cinder.volume.drivers.violin.v6000_fcp.V6000FCPDriver -san_thin_provision = True -san_ip = VMEM_MGMT_IP -san_login = VMEM_USER_NAME -san_password = VMEM_PASSWORD -gateway_mga = VMEM_MGA_IP -gateway_mgb = VMEM_MGB_IP - -
- -
- iSCSI configuration - - Set the following in your cinder.conf - configuration file for setup of an iSCSI array, replacing the - variables using the guide in the following section: - - -volume_driver = cinder.volume.drivers.violin.v6000_iscsi.V6000ISCSIDriver -san_thin_provision = True -san_ip = VMEM_MGMT_IP -san_login = VMEM_USER_NAME -san_password = VMEM_PASSWORD -iscsi_target_prefix = iqn.2004-02.com.vmem: -iscsi_port = 3260 -iscsi_ip_address = CINDER_INITIATOR_IP -gateway_mga = VMEM_MGA_IP -gateway_mgb = VMEM_MGB_IP - -
- -
- Configuration parameters - - Description of configuration value placeholders: - - - - VMEM_MGMT_IP - - - Cluster master IP address or hostname of the Violin 6000 - Array. Can be an IP address or hostname. - - - - - VMEM_USER_NAME - - - Log-in user name for the Violin 6000 Memory Gateways. - This user must have administrative rights on the array. - Typically this is the 'admin' user. - - - - - VMEM_PASSWORD - - - Log-in user's password. - - - - - CINDER_INITIATOR_IP - - - The IP address assigned to the primary iSCSI interface on - the cinder-volume client. This IP address must be able to - communicate with all target ports that are active on the - array. - - - - - VMEM_MGA_IP - - - The IP or hostname of the gateway node marked 'A', commonly - referred to as 'MG-A'. - - - - - VMEM_MGB_IP - - - The IP or hostname of the gateway node marked 'B', commonly - referred to as 'MG-B'. - - - - - -
-
-
diff --git a/doc/config-reference/block-storage/drivers/violin-v7000-driver.xml b/doc/config-reference/block-storage/drivers/violin-v7000-driver.xml deleted file mode 100644 index cae815f945..0000000000 --- a/doc/config-reference/block-storage/drivers/violin-v7000-driver.xml +++ /dev/null @@ -1,173 +0,0 @@ -
- Violin Memory 7000 Series FSP volume driver - - The OpenStack V7000 driver package from Violin Memory adds block - storage service support for Violin 7300 Flash Storage Platforms - (FSPs) and 7700 FSP controllers. - - - The driver package release can be used with any OpenStack Liberty - deployment for all 7300 FSPs and 7700 FSP controllers running - Concerto 7.5.3 and later using Fibre Channel HBAs. - - -
- System requirements - To use the Violin driver, the following are required: - - - Violin 7300/7700 series FSP with: - - - Concerto OS version 7.5.3 or later - - - Fibre channel host interfaces - - - - - - - The Violin block storage driver: This driver implements the - block storage API calls. The driver is included with the - OpenStack Liberty release. - - - - - The vmemclient library: This is the Violin Array - Communications library to the Flash Storage Platform through - a REST-like interface. The client can be installed using - the python 'pip' installer tool. Further information on - vmemclient can be found on PyPI - . - - -pip install vmemclient - - - -
- -
- Supported operations - - - Create, delete, attach, and detach volumes. - - - Create, list, and delete volume snapshots. - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - - Copy a volume to an image. - - - Clone a volume. - - - Extend a volume. - - - - - Listed operations are supported for thick, thin, and dedup - luns, with the exception of cloning. Cloning operations are - supported only on thick luns. - - -
- -
- Driver configuration - - Once the array is configured per the installation guide, it is - simply a matter of editing the cinder configuration file to add - or modify the parameters. The driver currently only supports - fibre channel configuration. - - -
- Fibre channel configuration - - Set the following in your cinder.conf - configuration file, replacing the variables using the guide in - the following section: - - -volume_driver = cinder.volume.drivers.violin.v7000_fcp.V7000FCPDriver -volume_backend_name = vmem_violinfsp -extra_capabilities = VMEM_CAPABILITIES -san_ip = VMEM_MGMT_IP -san_login = VMEM_USER_NAME -san_password = VMEM_PASSWORD -use_multipath_for_image_xfer = true - -
- -
- Configuration parameters - - Description of configuration value placeholders: - - - - VMEM_CAPABILITIES - - - User defined capabilities, a JSON formatted string - specifying key/value pairs (string value). The ones - particularly supported are dedup and thin. Only these two - capabilities are listed here in cinder.conf, indicating - this backend be selected for creating luns which have - a volume type associated with them that have 'dedup' or - 'thin' extra_specs specified. For example, if the FSP is - configured to support dedup luns, set the associated driver - capabilities to: {"dedup":"True","thin":"True"}. - - - - - VMEM_MGMT_IP - - - External IP address or hostname of the Violin 7300 - Memory Gateway. This can be an IP address or - hostname. - - - - - VMEM_USER_NAME - - - Log-in user name for the Violin 7300 Memory Gateway or - 7700 FSP controller. This user must have administrative - rights on the array or controller. - - - - - VMEM_PASSWORD - - - Log-in user's password. - - - - -
-
- -
diff --git a/doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml b/doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml deleted file mode 100644 index d92893ce38..0000000000 --- a/doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml +++ /dev/null @@ -1,420 +0,0 @@ - -
- VMware VMDK driver - - Use the VMware VMDK driver to enable management of the - OpenStack Block Storage volumes on vCenter-managed data - stores. Volumes are backed by VMDK files on data stores that - use any VMware-compatible storage technology such as NFS, - iSCSI, FiberChannel, and vSAN. -
- Functional context - The VMware VMDK driver connects to vCenter, through - which it can dynamically access all the data stores - visible from the ESX hosts in the managed cluster. - When you create a volume, the VMDK driver creates a VMDK - file on demand. The VMDK file creation completes only when - the volume is subsequently attached to an instance. The - reason for this requirement is that data stores visible - to the instance determine where to place - the volume. Before the service creates the VMDK file, - attach a volume to the target instance. - The running vSphere VM is automatically reconfigured to - attach the VMDK file as an extra disk. Once attached, you - can log in to the running vSphere VM to rescan and - discover this extra disk. - With the update to ESX version 6.0, the VMDK driver - now supports NFS version 4.1. -
-
- Configuration - The recommended volume driver for OpenStack Block - Storage is the VMware vCenter VMDK driver. When you - configure the driver, you must match it with the - appropriate OpenStack Compute driver from VMware and both - drivers must point to the same server. - In the nova.conf file, use this - option to define the Compute driver: - compute_driver=vmwareapi.VMwareVCDriver - In the cinder.conf file, use this - option to define the volume driver: - volume_driver=cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver - The following table lists various options that the - drivers support for the OpenStack Block Storage - configuration (cinder.conf): - -
-
- VMDK disk type - The VMware VMDK drivers support the creation of VMDK disk file - types thin, lazyZeroedThick - (sometimes called thick or flat), or eagerZeroedThick. - - - A thin virtual disk is allocated and zeroed on demand as the space - is used. Unused space on a Thin disk is available to other users. - - - A lazy zeroed thick virtual disk will have all space allocated at - disk creation. This reserves the entire disk space, so it is not - available to other users at any time. - - - An eager zeroed thick virtual disk is similar to a lazy zeroed thick - disk, in that the entire disk is allocated at creation. However, in - this type, any previous data will be wiped clean on the disk before - the write. This can mean that the disk will take longer to create, - but can also prevent issues with stale data on physical media. - - - Use the vmware:vmdk_type extra spec key with the - appropriate value to specify the VMDK disk file type. This table - shows the mapping between the extra spec entry and the VMDK disk - file type: - - - - - - - - - - - - - - - - - - - - - - - - - - -
Extra spec entry to VMDK disk file type - mapping
Disk file typeExtra spec keyExtra spec value
thinvmware:vmdk_typethin
lazyZeroedThickvmware:vmdk_typethick
eagerZeroedThickvmware:vmdk_typeeagerZeroedThick
- If you do not specify a vmdk_type extra spec entry, - the disk file type will default to thin. - The following example shows how to create a - lazyZeroedThick VMDK volume by using the - appropriate vmdk_type: - $ cinder type-create thick_volume -$ cinder type-key thick_volume set vmware:vmdk_type=thick -$ cinder create --volume-type thick_volume --display-name volume1 1 -
-
- Clone type - With the VMware VMDK drivers, you can create a volume - from another source volume or a snapshot point. The VMware - vCenter VMDK driver supports the full - and linked/fast clone types. Use the - vmware:clone_type extra spec key to - specify the clone type. The following table captures the - mapping for clone types: - - - - - - - - - - - - - - - - - - - - - -
Extra spec entry to clone type mapping
Clone typeExtra spec keyExtra spec value
fullvmware:clone_typefull
linked/fastvmware:clone_typelinked
- If you do not specify the clone type, the default is - full. - The following example shows linked cloning from a - source volume, which is created from an image: - $ cinder type-create fast_clone -$ cinder type-key fast_clone set vmware:clone_type=linked -$ cinder create --image-id 9cb87f4f-a046-47f5-9b7c-d9487b3c7cd4 --volume-type fast_clone --display-name source-vol 1 -$ cinder create --source-volid 25743b9d-3605-462b-b9eb-71459fe2bb35 --display-name dest-vol 1 -
-
- Use vCenter storage policies to specify back-end data - stores - - This section describes how to configure back-end data - stores using storage policies. In vCenter 5.5 and greater, - you can create one or more storage policies and expose them - as a Block Storage volume-type to a vmdk volume. The storage - policies are exposed to the vmdk driver through the extra spec - property with the - vmware:storage_profile key. - For example, assume a storage policy in vCenter named - gold_policy. and a Block Storage - volume type named vol1 with the extra - spec key vmware:storage_profile set to - the value gold_policy. Any Block - Storage volume creation that uses the - vol1 volume type places the volume - only in data stores that match the - gold_policy storage policy. - The Block Storage back-end configuration for vSphere - data stores is automatically determined based on the - vCenter configuration. If you configure a connection to - connect to vCenter version 5.5 or later in the - cinder.conf file, the use of - storage policies to configure back-end data stores is - automatically supported. - - You must configure any data stores that you - configure for the Block Storage service for the - Compute service. - - - To configure back-end data stores by using storage - policies - - In vCenter, tag the data stores to be used for - the back end. - OpenStack also supports policies that are - created by using vendor-specific capabilities; for - example vSAN-specific storage policies. - - The tag value serves as the policy. For - details, see . - - - - Set the extra spec key - vmware:storage_profile in - the desired Block Storage volume types to the - policy name that you created in the previous - step. - - - Optionally, for the - - parameter, enter the version number of your - vSphere platform. For example, - 5.5. - This setting overrides the default location for - the corresponding WSDL file. Among other - scenarios, you can use this setting to prevent - WSDL error messages during the development phase - or to work with a newer version of vCenter. - - - Complete the other vCenter configuration - parameters as appropriate. - - - - The following considerations apply to configuring - SPBM for the Block Storage service: - - - Any volume that is created without an - associated policy (that is to say, without an - associated volume type that specifies - vmware:storage_profile - extra spec), there is no policy-based - placement for that volume. - - - -
-
- Supported operations - The VMware vCenter VMDK driver supports these - operations: - - - Create, delete, attach, and detach volumes. - - When a volume is attached to an instance, a reconfigure - operation is performed on the instance to add the volume's - VMDK to it. The user must manually rescan and mount the device - from within the guest operating system. - - - - Create, list, and delete volume snapshots. - - Allowed only if volume is not attached to an - instance. - - - - Create a volume from a snapshot. - - - Copy an image to a volume. - - Only images in vmdk disk format - with bare container format are - supported. The property - of the image can be preallocated, - sparse, - streamOptimized or - thin. - - - - - Copy a volume to an image. - - - - Allowed only if the volume is not attached to an - instance. - - - This operation creates a - streamOptimized disk image. - - - - - - Clone a volume. - - Supported only if the source volume is not attached to an - instance. - - - - Backup a volume. - - This operation creates a backup of the volume in - streamOptimized disk format. - - - - Restore backup to new or existing volume. - - Supported only if the existing volume doesn't contain - snapshots. - - - - Change the type of a volume. - - This operation is supported only if the volume state is - available. - - - - - Extend a volume. - - -
- -
- Storage policy-based configuration in vCenter - You can configure Storage Policy-Based Management (SPBM) - profiles for vCenter data stores supporting the Compute, - Image Service, and Block Storage components of an OpenStack - implementation. - In a vSphere OpenStack deployment, SPBM enables you to - delegate several data stores for storage, which reduces - the risk of running out of storage space. The policy logic - selects the data store based on accessibility and - available storage space. -
-
- Prerequisites - - - Determine the data stores to be used by the SPBM - policy. - - - Determine the tag that identifies the data - stores in the OpenStack component - configuration. - - - Create separate policies or sets of data stores - for separate OpenStack components. - - -
-
- Create storage policies in vCenter - - To create storage policies in vCenter - - In vCenter, create the tag that identifies the - data stores: - - - From the Home screen, click - Tags. - - - Specify a name for the tag. - - - Specify a tag category. For example, - spbm-cinder. - - - - - Apply the tag to the data stores to be used by - the SPBM policy. - - For details about creating tags in vSphere, - see the vSphere documentation. - - - - In vCenter, create a tag-based storage policy - that uses one or more tags to identify a set of - data stores. - - For details about creating storage policies in vSphere, - see the vSphere documentation. - - - -
-
- Data store selection - If storage policy is enabled, the driver initially - selects all the data stores that match the associated - storage policy. - If two or more data stores match the storage policy, the - driver chooses a data store that is connected to the - maximum number of hosts. - In case of ties, the driver chooses the data store with - lowest space utilization, where space utilization is - defined by the - (1-freespace/totalspace) - meters. - These actions reduce the number of volume migrations - while attaching the volume to instances. - The volume must be migrated if the ESX host for the - instance cannot access the data store that contains the - volume. -
-
diff --git a/doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml b/doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml deleted file mode 100644 index ee0f16c3eb..0000000000 --- a/doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml +++ /dev/null @@ -1,136 +0,0 @@ - -
- - - Windows iSCSI volume driver - - Windows Server 2012 and Windows Storage Server 2012 offer an - integrated iSCSI Target service that can be used with OpenStack Block Storage - in your stack. Being entirely a software solution, consider it in particular - for mid-sized networks where the costs of a SAN might be excessive. - - The Windows cinder-volume - driver works with OpenStack Compute on any hypervisor. It includes snapshotting - support and the "boot from volume" feature. - - This driver creates volumes backed by fixed-type VHD images on Windows - Server 2012 and dynamic-type VHDX on Windows Server 2012 R2, stored locally - on a user-specified path. The system uses those images as iSCSI disks and - exports them through iSCSI targets. Each volume has its own iSCSI target. - - This driver has been tested with Windows Server 2012 and Windows Server - R2 using the Server and Storage Server distributions. - - Install the cinder-volume - service as well as the required Python components directly onto the Windows - node. - - You may install and configure cinder-volume - and its dependencies manually using the following guide or you - may use the Cinder Volume Installer, presented below. - -
- Installing using the OpenStack cinder volume installer - - In case you want to avoid all the manual setup, you can use - Cloudbase Solutions' installer. You can find it at - https://www.cloudbase.it/downloads/CinderVolumeSetup_Beta.msi. It - installs an independent Python environment, in order to avoid conflicts - with existing applications, dynamically generates a cinder.conf - file based on the parameters provided by you. - - cinder-volume will be - configured to run as a Windows Service, which can be restarted - using: - - PS C:\> net stop cinder-volume ; net start cinder-volume - - The installer can also be used in unattended mode. More details - about how to use the installer and its features can be found at https://www.cloudbase.it -
- -
- Windows Server configuration - - The required service in order to run - cinder-volume on Windows is wintarget. - This will require the iSCSI Target Server Windows feature to be installed. - You can install it by running the following command: - - PS C:\> Add-WindowsFeature -FS-iSCSITarget-ServerAdd-WindowsFeatureFS-iSCSITarget-Server - - - The Windows Server installation requires at least 16 GB of disk space. - The volumes hosted by this node need the extra space. - - - For cinder-volume to work - properly, you must configure NTP as explained in - . - - Next, install the requirements as described in . -
- -
- Getting the code - - Git can be used to download the necessary source code. The installer - to run Git on Windows can be downloaded here: - - - https://github.com/msysgit/msysgit/releases/download/Git-1.9.2-preview20140411/Git-1.9.2-preview20140411.exe - - Once installed, run the following to clone the OpenStack - Block Storage code. - - PS C:\> git.exe clone https://github.com/openstack/cinder.git -
- -
- Configure cinder-volume - - The cinder.conf file may be placed in - C:\etc\cinder. Below is a config sample for using - the Windows iSCSI Driver: - - [DEFAULT] -auth_strategy = keystone -volume_name_template = volume-%s -volume_driver = cinder.volume.drivers.windows.WindowsDriver -glance_api_servers = IP_ADDRESS:9292 -rabbit_host = IP_ADDRESS -rabbit_port = 5672 -sql_connection = mysql+pymysql://root:Passw0rd@IP_ADDRESS/cinder -windows_iscsi_lun_path = C:\iSCSIVirtualDisks -verbose = True -rabbit_password = Passw0rd -logdir = C:\OpenStack\Log\ -image_conversion_dir = C:\ImageConversionDir -debug = True - - The following table contains a reference to the only driver specific - option that will be used by the Block Storage Windows driver: - - -
- -
- Running cinder-volume - - After configuring cinder-volume - using the cinder.conf file, you may use the following - commands to install and run the service (note that you must replace the - variables with the proper paths): - - PS C:\> python $CinderClonePath\setup.py install -PS C:\> cmd /c C:\python27\python.exe c:\python27\Scripts\cinder-volume" –-config-file $CinderConfPath -
-
diff --git a/doc/config-reference/block-storage/drivers/xio-volume-driver.xml b/doc/config-reference/block-storage/drivers/xio-volume-driver.xml deleted file mode 100644 index 8b2b0adfc1..0000000000 --- a/doc/config-reference/block-storage/drivers/xio-volume-driver.xml +++ /dev/null @@ -1,145 +0,0 @@ - -
- X-IO volume driver - The X-IO volume driver for OpenStack Block Storage enables ISE products to be managed by - OpenStack Block Storage nodes. This driver can be configured to work with iSCSI and Fibre - Channel storage protocols. The X-IO volume driver allows the cloud operator to take advantage of - ISE features like Quality of Service and Continuous Adaptive Data Placement (CADP). It also - supports creating thin volumes and specifying volume media affinity. -
- Requirements - ISE FW 2.8.0 or ISE FW 3.1.0 is required for OpenStack Block Storage support. The X-IO - volume driver will not work with older ISE FW. -
- -
- Supported operations - - - Create, delete, attach, detach, retype, clone, and extend volumes. - - - Create a volume from snapshot. - - - Create, list, and delete volume snapshots. - - - Manage and unmanage a volume. - - - Get volume statistics. - - - Create a thin provisioned volume. - - - Create volumes with QoS specifications. - - -
-
- Configure X-IO Volume driver - To configure the use of an ISE product with OpenStack Block Storage, modify your - cinder.conf file as follows. Be careful to use the one that matches the - storage protocol in use: -
- Fibre Channel - volume_driver = cinder.volume.drivers.xio.XIOISEFCDriver -san_ip = 1.2.3.4 # the address of your ISE REST management interface -san_login = administrator # your ISE management admin login -san_password = password # your ISE management admin password -
-
- iSCSI - volume_driver = cinder.volume.drivers.xio.XIOISEISCSIDriver -san_ip = 1.2.3.4 # the address of your ISE REST management interface -san_login = administrator # your ISE management admin login -san_password = password # your ISE management admin password -iscsi_ip_address = ionet_ip # ip address to one ISE port connected to the IONET -
-
- Optional configuration parameters - -
-
- Multipath - The X-IO ISE supports a multipath configuration, but multipath must be enabled on the - compute node (see ISE Storage Blade Best Practices - Guide). For more information, see www.openstack.org. -
-
- Volume types - OpenStack Block Storage uses volume types to help the administrator specify attributes - for volumes. These attributes are called extra-specs. The X-IO volume driver support the - following extra-specs. - Extra specs - - - - - - - Extra-specs name - Valid values - Description - - - - - Feature:Raid - 1, 5 - RAID level for volume. - - - Feature:Pool - 1 - n (n being number of pools on ISE) - Pool to create volume in. - - - Affinity:Type - cadp, flash, hdd - Volume media affinity type. - - - Alloc:Type - 0 (thick), 1 (thin) - Allocation type for volume. Thick or thin - - - QoS:minIOPS - n (value less than maxIOPS) - Minimum IOPS setting for volume. - - - QoS:maxIOPS - n (value bigger than minIOPS) - Maximum IOPS setting for volume. - - - QoS:burstIOPS - n (value bigger than minIOPS) - Burst IOPS setting for volume. - - - -
-
- Examples - Create a volume type called xio1-flash for volumes that should reside on ssd - storage: -$ cinder type-create xio1-flash -$ cinder type-key xio1-flash set Affinity:Type=flash - - Create a volume type called xio1 and set QoS min and - max: -$ cinder type-create xio1 -$ cinder type-key xio1 set QoS:minIOPS=20 -$ cinder type-key xio1 set QoS:maxIOPS=5000 - -
-
-
-
diff --git a/doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml b/doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml deleted file mode 100644 index 7a1567863e..0000000000 --- a/doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml +++ /dev/null @@ -1,257 +0,0 @@ -
- Oracle ZFS Storage Appliance iSCSI driver - Oracle ZFS Storage Appliances (ZFSSAs) provide advanced software to protect data, speed tuning and - troubleshooting, and deliver high performance and high availability. - Through the Oracle ZFSSA iSCSI Driver, OpenStack Block Storage can use an Oracle ZFSSA as a block storage resource. - The driver enables you to create iSCSI volumes that an OpenStack Block Storage server can allocate to any virtual machine running on a compute host. - The Oracle ZFSSA iSCSI Driver, version 1.0.0 and later, supports ZFSSA software release 2013.1.2.0 and later. - - - - Configuration - - - Enable RESTful service on the ZFSSA Storage Appliance. - - - Create a new user on the appliance with the following authorizations: - - - scope=stmf - allow_configure=true - - - scope=nas - allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true - - - - You can create a role with authorizations as follows: - - zfssa:> configuration roles -zfssa:configuration roles> role OpenStackRole -zfssa:configuration roles OpenStackRole (uncommitted)> set description="OpenStack Cinder Driver" -zfssa:configuration roles OpenStackRole (uncommitted)> commit -zfssa:configuration roles> select OpenStackRole -zfssa:configuration roles OpenStackRole> authorizations create -zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=stmf -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_configure=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> commit - You can create a user with a specific role as follows: - zfssa:> configuration users -zfssa:configuration users> user cinder -zfssa:configuration users cinder (uncommitted)> set fullname="OpenStack Cinder Driver" -zfssa:configuration users cinder (uncommitted)> set initial_password=12345 -zfssa:configuration users cinder (uncommitted)> commit -zfssa:configuration users> select cinder set roles=OpenStackRole - - - You can also run this workflow to automate the above tasks. - - - - - - Ensure that the ZFSSA iSCSI service is online. If the ZFSSA iSCSI service is not online, enable the service by using the BUI, CLI or REST API in the appliance. - - zfssa:> configuration services iscsi -zfssa:configuration services iscsi> enable -zfssa:configuration services iscsi> show -Properties: -<status>= online -... - - - -Define the following required properties in the cinder.conf file: - volume_driver = cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver -san_ip = myhost -san_login = username -san_password = password -zfssa_pool = mypool -zfssa_project = myproject -zfssa_initiator_group = default -zfssa_target_portal = w.x.y.z:3260 -zfssa_target_interfaces = e1000g0 -Optionally, you can define additional properties. - Target interfaces can be seen as follows in the CLI: - zfssa:> configuration net interfaces -zfssa:configuration net interfaces> show -Interfaces: -INTERFACE STATE CLASS LINKS ADDRS LABEL -e1000g0 up ip e1000g0 1.10.20.30/24 Untitled Interface -... - - - Do not use management interfaces for zfssa_target_interfaces. - - - - - - Supported operations - - - Create and delete volumes - - - Extend volume - - - Create and delete snapshots - - - Create volume from snapshot - - - Delete volume snapshots - - - Attach and detach volumes - - - Get volume stats - - - Clone volumes - - - Volume migration - - - Local cache of a bootable volume - - - - - ZFSSA assisted volume migration - The ZFSSA iSCSI driver supports storage assisted volume migration starting in the Liberty release. This feature - uses remote replication feature on the ZFSSA. Volumes can be migrated between - two backends configured not only to the same ZFSSA but also between - two separate ZFSSAs altogether. - - The following conditions must be met in order to use ZFSSA assisted volume - migration: - - - - Both the source and target backends are configured to ZFSSAs. - - - Remote replication service on the source and target appliance is enabled. - - - The ZFSSA to which the target backend is configured should be configured as a target in - the remote replication service of the ZFSSA configured to the source backend. - The remote replication target needs to be configured even when the source - and the destination for volume migration are the same ZFSSA. - Define zfssa_replication_ip in the cinder.conf of the source backend as the IP - address used to register the target ZFSSA in the remote replication service of - the source ZFSSA. - - - - The name of the iSCSI target group(zfssa_target_group) on the source and the - destination ZFSSA is the same. - - - - The volume is not attached and is in available state. - - - - If any of the above conditions are not met, the driver will proceed with generic - volume migration. - - The ZFSSA user on the source and target appliances will need to have additional - role authorizations for assisted volume migration to work. In scope nas, - set allow_rrtarget and allow_rrsource to true. - - - zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=nas - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rrtarget=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rrsource=true - - - - ZFSSA local cache - The local cache feature enables ZFSSA drivers to serve the usage of bootable volumes significantly better. With the feature, - the first bootable volume created from an image is cached, so that subsequent volumes can be created directly from the cache, - instead of having image data transferred over the network multiple times. - - The following conditions must be met in order to use ZFSSA local cache feature: - - - - A storage pool needs to be configured. - - - REST and iSCSI services need to be turned on. - - - On an OpenStack controller, cinder.conf needs to contain necessary properties used to configure and - set up the ZFSSA iSCSI driver, including the following new properties: - - - zfssa_enable_local_cache: (True/False) To enable/disable the feature. - - - zfssa_cache_project: The ZFSSA project name where cache volumes are stored. - - - - - - - - Every cache volume has two additional properties stored as ZFSSA custom schema. It is important that the schema are not altered outside of - Block Storage when the driver is in use: - - - - image_id: stores the image id as in Image service. - - - updated_at: stores the most current timestamp when the image is updated in Image service. - - - - - Supported extra specs - Extra specs provide the OpenStack storage admin the - flexibility to create volumes with different characteristics from the - ones specified in cinder.conf. The admin will - specify the volume properties as keys at volume type creation. When a - user requests a volume of this volume type, the volume will be created - with the properties specified as extra specs. - - The following extra specs scoped keys are supported by the driver - - - - zfssa:volblocksize - - - zfssa:sparse - - - zfssa:compression - - - zfssa:logbias - - - Volume types can be created using the cinder type-create. Extra spec keys can be added using cinder type-key command. - - - - - - Driver options - The Oracle ZFSSA iSCSI Driver supports these options: - - - -
diff --git a/doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml b/doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml deleted file mode 100644 index 0efed5d3b4..0000000000 --- a/doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml +++ /dev/null @@ -1,258 +0,0 @@ -
- Oracle ZFS Storage Appliance NFS driver - The Oracle ZFS Storage Appliance (ZFSSA) NFS driver enables the - ZFSSA to be used seamlessly as a block storage resource. The driver enables you to - to create volumes on a ZFS share that is NFS mounted. - - - Requirements - Oracle ZFS Storage Appliance Software version 2013.1.2.0 or later - - - Supported operations - - - Create, extend, delete volumes - - - Attach and detach volumes - - - Create, delete snapshots - - - Create a volume from a snapshot - - - Copy an image to a volume - - - Copy a volume to an image - - - Clone a volume - - - Volume migration - - - Local cache of a bootable volume - - - - - Appliance configuration - Appliance configuration using the command line interface (CLI) is described - below. To access the CLI, ensure SSH remote access is enabled, which is the default. You - can also perform configuration using the browser user interface (BUI) or the RESTful - API. Please refer to the - Oracle ZFS Storage Appliance documentation - for details on how to configure the Oracle ZFS Storage Appliance using the BUI, CLI and RESTful API. - - - - Log in to the Oracle ZFS Storage Appliance CLI and enable the REST - service. REST service needs to stay online for this driver to function. - zfssa:>configuration services rest enable - - - Create a new storage pool on the appliance if you do not want to use an - existing one. This storage pool is named 'mypool' for the sake of this documentation. - - - Create a new project and share in the storage pool (mypool) if you do not want to use - existing ones. This driver will create a project and share by the names specified in cinder.conf, if the - a project or share by that name does not already exist in the storage pool (mypool). - The project and share are named 'NFSProject' and 'nfs_share' in the sample cinder.conf entries below. - - - To perform driver operations, create a role with the following - authorizations: - - - scope=svc - allow_administer=true, allow_restart=true, allow_configure=true - - - scope=nas - pool=pool_name, project=project_name, share=share_name, allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true - - - The following examples show how to create a role with authorizations. - zfssa:> configuration roles -zfssa:configuration roles> role OpenStackRole -zfssa:configuration roles OpenStackRole (uncommitted)> set description="OpenStack NFS Cinder Driver" -zfssa:configuration roles OpenStackRole (uncommitted)> commit -zfssa:configuration roles> select OpenStackRole -zfssa:configuration roles OpenStackRole> authorizations create -zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=svc -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_administer=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_restart=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_configure=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> commit - zfssa:> configuration roles OpenStackRole authorizations> set scope=nas -The following properties need to be set when the scope of this role needs to be limited to a pool (mypool), a project (NFSProject) and a -share (nfs_share) created in the steps above. This will prevent the user assigned to this role from being used to modify other pools, projects and shares. -zfssa:configuration roles OpenStackRole auth (uncommitted)> set pool=mypool -zfssa:configuration roles OpenStackRole auth (uncommitted)> set project=NFSProject -zfssa:configuration roles OpenStackRole auth (uncommitted)> set share=nfs_share - -The following properties only need to be set when a share or a project has not been created following the steps above and wish to allow -the driver to create them for you. -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createProject=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createShare=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_clone=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeSpaceProps=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_destroy=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rollback=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_takeSnap=true -zfssa:configuration roles OpenStackRole auth (uncommitted)> commit - - - Create a new user or modify an existing one and assign the new role to the - user. - The following example shows how to create a new user and assign the new role - to the user. - zfssa:> configuration users -zfssa:configuration users> user cinder -zfssa:configuration users cinder (uncommitted)> set fullname="OpenStack Cinder Driver" -zfssa:configuration users cinder (uncommitted)> set initial_password=12345 -zfssa:configuration users cinder (uncommitted)> commit -zfssa:configuration users> select cinder set roles=OpenStackRole - - - Ensure that NFS and HTTP services on the appliance are online. Note the HTTPS - port number for later entry in the cinder service configuration file (cinder.conf). This driver uses WebDAV - over HTTPS to create snapshots and clones of volumes, and therefore needs to have the HTTP service online. - The following example illustrates enabling the services and showing - their properties. - zfssa:> configuration services nfs -zfssa:configuration services nfs> enable -zfssa:configuration services nfs> show -Properties: -<status>= online -... - zfssa:configuration services http> enable -zfssa:configuration services http> show -Properties: -<status>= online -require_login = true -protocols = http/https -listen_port = 80 -https_port = 443 - - - Create a network interface to be used exclusively for data. An existing network interface may also be used. The following example - illustrates how to make a network interface for data traffic flow only. - - For better performance and reliability, it is recommended to configure a separate subnet exclusively for data traffic in your cloud environment. - - zfssa:> configuration net interfaces -zfssa:configuration net interfaces> select igbx -zfssa:configuration net interfaces igbx> set admin=false -zfssa:configuration net interfaces igbx> commit - - - For clustered controller systems, the following verification is required in addition to the above steps. - Skip this step if a standalone system is used. - zfssa:> configuration cluster resources list - Verify that both the newly created pool and the network interface are of type "singleton" and - are not locked to the current controller. This approach ensures that the pool and the interface used - for data always belong to the active controller, regardless of the current state of the cluster. - - Verify that both the network interface used for management and data, and the storage pool belong to the same head. - - There will be a short service interruption during failback/takeover, but once the process is complete, - the driver should be able to access the ZFSSA for data as well as for management. - - - - - - Cinder service configuration - - - Define the following required properties in the - cinder.conf configuration file: - volume_driver = cinder.volume.drivers.zfssa.zfssanfs.ZFSSANFSDriver -san_ip = myhost -san_login = username -san_password = password -zfssa_data_ip = mydata -zfssa_nfs_pool = mypool - - Management interface san_ip can be used instead of - zfssa_data_ip, but it is not recommended. - - - - - You can also define the following additional properties in the - cinder.conf configuration file: - zfssa_nfs_project = NFSProject -zfssa_nfs_share = nfs_share -zfssa_nfs_mount_options = -zfssa_nfs_share_compression = off -zfssa_nfs_share_logbias = latency -zfssa_https_port = 443 - - - - The driver does not use the file specified in the nfs_shares_config - option. - - - - Driver options - The Oracle ZFS Storage Appliance NFS driver supports these options: - - This driver shares additional NFS configuration options with the generic NFS driver. - For a description of these, see . - - - - ZFSSA local cache - The local cache feature enables ZFSSA drivers to serve the usage of bootable volumes significantly better. With the feature, - the first bootable volume created from an image is cached, so that subsequent volumes can be created directly from the cache, - instead of having image data transferred over the network multiple times. - - The following conditions must be met in order to use ZFSSA local cache feature: - - - - A storage pool needs to be configured. - - - REST and NFS services need to be turned on. - - - On an OpenStack controller, cinder.conf needs to contain necessary properties used to configure and - set up the ZFSSA NFS driver, including the following new properties: - - - zfssa_enable_local_cache: (True/False) To enable/disable the feature. - - - zfssa_cache_directory: The directory name inside zfssa_nfs_share where cache volumes are stored. - - - - - - - - Every cache volume has two additional properties stored as WebDAV properties. It is important that they are not altered outside of - Block Storage when the driver is in use: - - - - image_id: stores the image id as in Image service. - - - updated_at: stores the most current timestamp when the image is updated in Image service. - - - -
diff --git a/doc/config-reference/block-storage/section_backup-drivers.xml b/doc/config-reference/block-storage/section_backup-drivers.xml deleted file mode 100644 index 8c66ffb125..0000000000 --- a/doc/config-reference/block-storage/section_backup-drivers.xml +++ /dev/null @@ -1,20 +0,0 @@ - -
- Backup drivers - This section describes how to configure the cinder-backup service and its - drivers. - The volume drivers are included with the Block Storage repository - (https://git.openstack.org/cgit/openstack/cinder/). To set a - backup driver, use the backup_driver flag. - By default there is no backup driver enabled. - - - - -
diff --git a/doc/config-reference/block-storage/section_block-storage-overview.xml b/doc/config-reference/block-storage/section_block-storage-overview.xml deleted file mode 100644 index 49fc863a95..0000000000 --- a/doc/config-reference/block-storage/section_block-storage-overview.xml +++ /dev/null @@ -1,112 +0,0 @@ - -
- Introduction to the Block Storage service - The OpenStack Block Storage service provides persistent block storage resources that - OpenStack Compute instances can consume. This includes secondary attached storage similar to - the Amazon Elastic Block Storage (EBS) offering. In addition, you can write images to a - Block Storage device for Compute to use as a bootable persistent instance. - The Block Storage service differs slightly from the Amazon EBS offering. The Block Storage - service does not provide a shared storage solution like NFS. With the Block Storage service, - you can attach a device to only one instance. - The Block Storage service provides: - - - cinder-api. A WSGI app that authenticates - and routes requests throughout the Block Storage service. It supports the OpenStack - APIs only, although there is a translation that can be done through Compute's EC2 - interface, which calls in to the Block Storage client. - - - cinder-scheduler. Schedules and routes - requests to the appropriate volume service. Depending upon your configuration, this - may be simple round-robin scheduling to the running volume services, or it can be - more sophisticated through the use of the Filter Scheduler. The Filter Scheduler is - the default and enables filters on things like Capacity, Availability Zone, Volume - Types, and Capabilities as well as custom filters. - - - cinder-volume. - Manages Block Storage devices, specifically the - back-end devices themselves. - - - cinder-backup. - Provides a means to back up a Block Storage volume - to OpenStack Object Storage (swift). - - - The Block Storage service contains the following components: - - - Back-end Storage Devices. The Block Storage - service requires some form of back-end storage that the service is built on. The - default implementation is to use LVM on a local volume group named "cinder-volumes." - In addition to the base driver implementation, the Block Storage service also - provides the means to add support for other storage devices to be utilized such as - external Raid Arrays or other storage appliances. These back-end storage devices may - have custom block sizes when using KVM or QEMU as the hypervisor. - - - Users and Tenants (Projects). The Block Storage - service can be used by many different cloud computing consumers or customers - (tenants on a shared system), using role-based access assignments. Roles control the - actions that a user is allowed to perform. In the default configuration, most - actions do not require a particular role, but this can be configured by the system - administrator in the appropriate policy.json file that - maintains the rules. A user's access to particular volumes is limited by tenant, but - the user name and password are assigned per user. Key pairs granting access to a - volume are enabled per user, but quotas to control resource consumption across - available hardware resources are per tenant. - For tenants, quota controls are available to - limit: - - - The number of volumes that can be - created. - - - The number of snapshots that can be - created. - - - The total number of GBs allowed per tenant - (shared between snapshots and volumes). - - - You can revise the default quota values with the Block Storage CLI, so the limits - placed by quotas are editable by admin users. - - - Volumes, Snapshots, and Backups. The basic - resources offered by the Block Storage service are volumes and snapshots which are - derived from volumes and volume backups: - - - Volumes. - Allocated block storage resources that can be - attached to instances as secondary storage or - they can be used as the root store to boot - instances. Volumes are persistent R/W block - storage devices most commonly attached to the - compute node through iSCSI. - - - Snapshots. A read-only point in time copy - of a volume. The snapshot can be created from a volume that is currently in - use (through the use of --force True) or in an available - state. The snapshot can then be used to create a new volume through create - from snapshot. - - - Backups. An - archived copy of a volume currently stored in - OpenStack Object Storage (swift). - - - - -
diff --git a/doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml b/doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml deleted file mode 100644 index 4c7de8ddf6..0000000000 --- a/doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml +++ /dev/null @@ -1,39 +0,0 @@ -
- Block Storage sample configuration files - All the files in this section can be found in /etc/cinder. -
- cinder.conf - The cinder.conf file is installed in - /etc/cinder by default. When you manually - install the Block Storage service, the options in the - cinder.conf file are set to default values. - The cinder.conf file contains most - of the options to configure the Block Storage service. - - - -
-
- api-paste.ini - Use the api-paste.ini file to configure the Block Storage API service. - - - -
-
- policy.json - The policy.json file defines additional access controls that apply to the Block Storage service. - -
-
- rootwrap.conf - The rootwrap.conf file defines configuration values used by the - rootwrap script when the Block Storage service must escalate its - privileges to those of the root user. - -
-
diff --git a/doc/config-reference/block-storage/section_cinder-log-files.xml b/doc/config-reference/block-storage/section_cinder-log-files.xml deleted file mode 100644 index 20c1425082..0000000000 --- a/doc/config-reference/block-storage/section_cinder-log-files.xml +++ /dev/null @@ -1,77 +0,0 @@ - -
- Log files used by Block Storage - The corresponding log file of each Block Storage service - is stored in the /var/log/cinder/ - directory of the host on which each service runs. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Log files used by Block Storage services
- Log file - - Service/interface (for CentOS, Fedora, openSUSE, Red Hat - Enterprise Linux, and SUSE Linux Enterprise) - - Service/interface (for Ubuntu and Debian) -
- api.log - - openstack-cinder-api - - cinder-api -
- cinder-manage.log - - cinder-manage - - cinder-manage -
- scheduler.log - - openstack-cinder-scheduler - - cinder-scheduler -
- volume.log - - openstack-cinder-volume - - cinder-volume -
-
diff --git a/doc/config-reference/block-storage/section_fc-zoning.xml b/doc/config-reference/block-storage/section_fc-zoning.xml deleted file mode 100644 index d29f545255..0000000000 --- a/doc/config-reference/block-storage/section_fc-zoning.xml +++ /dev/null @@ -1,105 +0,0 @@ -
- Fibre Channel Zone Manager - The Fibre Channel Zone Manager allows FC SAN Zone/Access - control management in conjunction with Fibre Channel block - storage. The configuration of Fibre Channel Zone Manager and - various zone drivers are described in this section. -
- Configure Block Storage to use Fibre Channel Zone Manager - - If Block Storage is configured to use a Fibre Channel volume - driver that supports Zone Manager, update - cinder.conf to add the following - configuration options to enable Fibre Channel Zone Manager. - - Make the following changes in the - /etc/cinder/cinder.conf - file. - - To use different Fibre Channel Zone Drivers, use the - parameters described in this section. - - When multi backend configuration is used, provide - the configuration option as - part of the volume driver configuration where - option is specified. - - - Default value of is - None and this needs to be changed to - fabric to allow fabric zoning. - - - can be configured as - initiator-target or - initiator - - -
-
- Brocade Fibre Channel Zone Driver - Brocade Fibre Channel Zone Driver performs zoning operations - through SSH. Configure Brocade Zone Driver and lookup service by - specifying the following parameters: - - Configure SAN fabric parameters in the form of fabric - groups as described in the example below: - - - Define a fabric group for each fabric using the fabric names - used in configuration option - as group name. - - -
- System requirements - Brocade Fibre Channel Zone Driver requires firmware version - FOS v6.4 or higher. - As a best practice for zone management, use a user account - with zoneadmin role. - Users with admin role (including the default - admin user account) are limited to a maximum - of two concurrent SSH sessions. - - For information about how to manage Brocade Fibre Channel - switches, see the Brocade Fabric OS user documentation. -
-
-
- Cisco Fibre Channel Zone Driver - Cisco Fibre Channel Zone Driver automates the zoning operations - through SSH. Configure Cisco Zone Driver, Cisco Southbound connector, - FC SAN lookup service and Fabric name. - Set the following options in the cinder.conf configuration file. - [fc-zone-manager] -zone_driver = cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver.CiscoFCZoneDriver -fc_san_lookup_service = cinder.zonemanager.drivers.cisco.cisco_fc_san_lookup_service.CiscoFCSanLookupService -fc_fabric_names = CISCO_FABRIC_EXAMPLE -cisco_sb_connector = cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI - - Configure SAN fabric parameters in the form of fabric - groups as described in the example below: - - - Define a fabric group for each fabric using the fabric names - used in configuration option - as group name. - - The Cisco Fibre Channel Zone Driver supports basic and enhanced zoning - modes.The zoning VSAN must exist with an active zone set name which is - same as the option. - - -
- System requirements - Cisco MDS 9000 Family Switches. - Cisco MDS NX-OS Release 6.2(9) or later. - For information about how to manage Cisco Fibre Channel - switches, see the Cisco MDS 9000 user documentation. -
-
-
diff --git a/doc/config-reference/block-storage/section_misc.xml b/doc/config-reference/block-storage/section_misc.xml deleted file mode 100644 index d144fe0fdb..0000000000 --- a/doc/config-reference/block-storage/section_misc.xml +++ /dev/null @@ -1,56 +0,0 @@ - -
- Additional options - - - These options can also be set in the - cinder.conf file. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/doc/config-reference/block-storage/section_volume-drivers.xml b/doc/config-reference/block-storage/section_volume-drivers.xml deleted file mode 100644 index 726e904785..0000000000 --- a/doc/config-reference/block-storage/section_volume-drivers.xml +++ /dev/null @@ -1,59 +0,0 @@ - -
- Volume drivers - To use different volume drivers for the cinder-volume service, use - the parameters described in these sections. - The volume drivers are included in the Block Storage repository - (https://git.openstack.org/cgit/openstack/cinder/). To set a - volume driver, use the volume_driver flag. - The default is: - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/doc/config-reference/block-storage/section_volume-encryption.xml b/doc/config-reference/block-storage/section_volume-encryption.xml deleted file mode 100644 index fa1df24a76..0000000000 --- a/doc/config-reference/block-storage/section_volume-encryption.xml +++ /dev/null @@ -1,224 +0,0 @@ - - - %openstack; - ]> -
- Volume encryption supported by the key manager - - We recommend the Key management service (barbican) for storing - encryption keys used by the OpenStack volume encryption feature. It - can be enabled by updating - cinder.conf and nova.conf. - - -
- Initial configuration - - Configuration changes need to be made to any nodes running the - cinder-volume or - nova-compute - server. - Steps to - update cinder-volume - servers: - Edit the /etc/cinder/cinder.conf file to - use Key management service as follows: - - - Look for the [keymgr] section. - - - Enter a new line directly below - [keymgr] with the following: - api_class=cinder.keymgr.barbican.BarbicanKeyManager - - Use a '#' prefix to comment out the line in this - section that begins with 'fixed_key'. - - - - - - Restart cinder-volume. - - - - Update nova-compute - servers: - - Repeat the same steps above to set up the Key management - service by editing /etc/nova/nova.conf - - - - Restart nova-compute. - - - Follow the instructions in the OpenStack Admin User Guide under the heading - -Create an encrypted volume type - or alternatively, see - - in this manual to do this via the command line. - Create an encrypted volume by typing the command: - -$ cinder create --name encryptedVolume --volume-type LUKS 1 - For alternate instructions and details, including the console output, see the - in this document. -
-
- Create an encrypted volume type - - Block Storage volume type assignment provides scheduling to a specific back-end, - and can be used to specify actionable information for a back-end storage device. - This example creates a volume type called LUKS and provides configuration information - for the storage system to encrypt or decrypt the volume. - - - Source your admin credentials: - $ source admin-openrc.sh - - - Create the volume type: - $ cinder type-create LUKS -+--------------------------------------+-------+ -| ID | Name | -+--------------------------------------+-------+ -| e64b35a4-a849-4c53-9cc7-2345d3c8fbde | LUKS | -+--------------------------------------+-------+ - - - Mark the volume type as encrypted and provide the necessary details. Use - --control_location to specify where encryption is - performed: front-end (default) or back-end. - $ cinder encryption-type-create --cipher aes-xts-plain64 --key_size 512 \ - --control_location front-end LUKS nova.volume.encryptors.luks.LuksEncryptor -+--------------------------------------+-------------------------------------------+-----------------+----------+------------------+ -| Volume Type ID | Provider | Cipher | Key Size | Control Location | -+--------------------------------------+-------------------------------------------+-----------------+----------+------------------+ -| e64b35a4-a849-4c53-9cc7-2345d3c8fbde | nova.volume.encryptors.luks.LuksEncryptor | aes-xts-plain64 | 512 | front-end | -+--------------------------------------+-------------------------------------------+-----------------+----------+------------------+ - - - The OpenStack dashboard (horizon) supports creating the encrypted volume type as of the Kilo release. -
-
- Create an encrypted volume - Use the OpenStack dashboard (horizon), or the cinder command to create volumes just as you normally would. For an encrypted volume use the LUKS tag, - for unencrypted leave the LUKS tag off. - - - Source your admin credentials: - $ source admin-openrc.sh - - - Create an unencrypted 1 GB test volume: - $ cinder create --display-name 'unencrypted volume' 1 -+--------------------------------+--------------------------------------+ -| Property | Value | -+--------------------------------+--------------------------------------+ -| attachments | [] | -| availability_zone | nova | -| bootable | false | -| created_at | 2014-08-10T01:24:03.000000 | -| description | None | -| encrypted | False | -| id | 081700fd-2357-44ff-860d-2cd78ad9c568 | -| metadata | {} | -| name | unencrypted volume | -| os-vol-host-attr:host | controller | -| os-vol-mig-status-attr:migstat | None | -| os-vol-mig-status-attr:name_id | None | -| os-vol-tenant-attr:tenant_id | 08fdea76c760475f82087a45dbe94918 | -| size | 1 | -| snapshot_id | None | -| source_volid | None | -| status | creating | -| user_id | 7cbc6b58b372439e8f70e2a9103f1332 | -| volume_type | None | -+--------------------------------+--------------------------------------+ - - - Create an encrypted 1 GB test volume: - $ cinder create --display-name 'encrypted volume' --volume-type LUKS 1 -+--------------------------------+--------------------------------------+ -| Property | Value | -+--------------------------------+--------------------------------------+ -| attachments | [] | -| availability_zone | nova | -| bootable | false | -| created_at | 2014-08-10T01:24:24.000000 | -| description | None | -| encrypted | True | -| id | 86060306-6f43-4c92-9ab8-ddcd83acd973 | -| metadata | {} | -| name | encrypted volume | -| os-vol-host-attr:host | controller | -| os-vol-mig-status-attr:migstat | None | -| os-vol-mig-status-attr:name_id | None | -| os-vol-tenant-attr:tenant_id | 08fdea76c760475f82087a45dbe94918 | -| size | 1 | -| snapshot_id | None | -| source_volid | None | -| status | creating | -| user_id | 7cbc6b58b372439e8f70e2a9103f1332 | -| volume_type | LUKS | -+--------------------------------+--------------------------------------+ - - - Notice the encrypted parameter; it will show True/False. The option is also shown for easy review. - - Due to the issue that some of the volume drivers do not set - 'encrypted' flag, attaching of encrypted volumes to a virtual guest - will fail, because OpenStack Compute service will not run - encryption providers. - -
-
- Testing volume encryption - This is a simple test scenario to help validate your encryption. It assumes an LVM based Block Storage server. - Perform these steps after completing the volume encryption setup and creating the volume-type for LUKS as described in the preceding sections. - - - Create a VM: - $ nova boot --flavor m1.tiny --image cirros-0.3.1-x86_64-disk vm-test - - - Create two volumes, one encrypted and one not encrypted then attach them to your VM: - $ cinder create --display-name 'unencrypted volume' 1 -$ cinder create --display-name 'encrypted volume' --volume-type LUKS 1 -$ cinder list -+--------------------------------------+-----------+--------------------+------+-------------+----------+-------------+ -| ID | Status | Name | Size | Volume Type | Bootable | Attached to | -+--------------------------------------+-----------+--------------------+------+-------------+----------+-------------+ -| 64b48a79-5686-4542-9b52-d649b51c10a2 | available | unencrypted volume | 1 | None | false | | -| db50b71c-bf97-47cb-a5cf-b4b43a0edab6 | available | encrypted volume | 1 | LUKS | false | | -+--------------------------------------+-----------+--------------------+------+-------------+----------+-------------+ -$ nova volume-attach vm-test 64b48a79-5686-4542-9b52-d649b51c10a2 /dev/vdb -$ nova volume-attach vm-test db50b71c-bf97-47cb-a5cf-b4b43a0edab6 /dev/vdc - - - On the VM, send some text to the newly attached volumes and synchronize them: - # echo "Hello, world (unencrypted /dev/vdb)" >> /dev/vdb -# echo "Hello, world (encrypted /dev/vdc)" >> /dev/vdc -# sync && sleep 2 -# sync && sleep 2 - - - On the system hosting cinder volume services, synchronize to flush the I/O cache then test to see if your strings can be found: - # sync && sleep 2 -# sync && sleep 2 -# strings /dev/stack-volumes/volume-* | grep "Hello" -Hello, world (unencrypted /dev/vdb) - - - In the above example you see that the search returns the string written to the unencrypted volume, but not the encrypted one. -
-
diff --git a/doc/config-reference/ch_baremetalconfigure.xml b/doc/config-reference/ch_baremetalconfigure.xml deleted file mode 100644 index 7200b96e57..0000000000 --- a/doc/config-reference/ch_baremetalconfigure.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - Bare metal - The Bare metal service is capable of managing and provisioning physical - machines. The configuration file of this module is - /etc/ironic/ironic.conf. - The following tables provide a comprehensive list of the Bare metal service - configuration options. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/config-reference/ch_blockstorageconfigure.xml b/doc/config-reference/ch_blockstorageconfigure.xml deleted file mode 100644 index d9d840b106..0000000000 --- a/doc/config-reference/ch_blockstorageconfigure.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - Block Storage - The OpenStack Block Storage service works with many different - storage drivers that you can configure by using these instructions. - - - - - - - - - - diff --git a/doc/config-reference/ch_computeconfigure.xml b/doc/config-reference/ch_computeconfigure.xml deleted file mode 100644 index a6d5827990..0000000000 --- a/doc/config-reference/ch_computeconfigure.xml +++ /dev/null @@ -1,100 +0,0 @@ - - - Compute - The OpenStack Compute service is a cloud computing fabric - controller, which is the main part of an IaaS system. You can use - OpenStack Compute to host and manage cloud computing systems. This - section describes the OpenStack Compute configuration - options. - To configure your Compute installation, you must define - configuration options in these files: - - - nova.conf. Contains most of the - Compute configuration options. Resides in the - /etc/nova directory. - - - api-paste.ini. Defines Compute - limits. Resides in the /etc/nova - directory. - - - Related Image service and Identity service management - configuration files. - - - -
- Configure logging - You can use nova.conf file to configure - where Compute logs events, the level of logging, and log - formats. - To customize log formats for OpenStack Compute, use the - configuration option settings documented in - . -
-
- Configure authentication and authorization - There are different methods of authentication for the - OpenStack Compute project, including no authentication. The - preferred system is the OpenStack Identity service, code-named - Keystone. - To customize authorization settings for Compute, use the - configuration options documented in - . - To customize certificate authority settings for Compute, use the - configuration options documented in . - To customize Compute and the Identity service to use LDAP as a - backend, refer to the configuration options documented in . -
-
- - Configure resize - Resize (or Server resize) is the ability to change the - flavor of a server, thus allowing it to upscale or downscale - according to user needs. For this feature to work properly, you - might need to configure some underlying virt layers. -
- KVM - Resize on KVM is implemented currently by transferring the - images between compute nodes over ssh. For KVM you need - hostnames to resolve properly and passwordless ssh access - between your compute hosts. Direct access from one compute - host to another is needed to copy the VM file across. - Cloud end users can find out how to resize a server by - reading the OpenStack End User Guide. -
-
- XenServer - To get resize to work with XenServer (and XCP), you need - to establish a root trust between all hypervisor nodes and - provide an /image mount point to your hypervisors dom0. -
- -
- - - - - - - - - - - - - - - - -
diff --git a/doc/config-reference/ch_config-overview.xml b/doc/config-reference/ch_config-overview.xml deleted file mode 100644 index 7d38df00c0..0000000000 --- a/doc/config-reference/ch_config-overview.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - OpenStack configuration overview - OpenStack is a collection of open source project components - that enable setting up cloud services. Each component uses similar - configuration techniques and a common framework for INI file - options. - - This guide pulls together multiple references and configuration options for - the following OpenStack components: - - - Bare metal service - OpenStack Block Storage - OpenStack Compute - OpenStack dashboard - Database service for OpenStack - Data processing service - OpenStack Identity - OpenStack Image service - OpenStack Networking - OpenStack Object Storage - Orchestration - Telemetry - - - - diff --git a/doc/config-reference/ch_dashboardconfigure.xml b/doc/config-reference/ch_dashboardconfigure.xml deleted file mode 100644 index ea44d72fb5..0000000000 --- a/doc/config-reference/ch_dashboardconfigure.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - Dashboard - This chapter describes how to configure the OpenStack - dashboard with Apache web server. - - - - - diff --git a/doc/config-reference/ch_databaseserviceconfigure.xml b/doc/config-reference/ch_databaseserviceconfigure.xml deleted file mode 100644 index 20377cff36..0000000000 --- a/doc/config-reference/ch_databaseserviceconfigure.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - Database service - The Database service provides a scalable and reliable Cloud - Database-as-a-Service functionality for both relational and - non-relational database engines. - - The following tables provide a comprehensive list of the - Database service configuration options. - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/config-reference/ch_dataprocessingserviceconfigure.xml b/doc/config-reference/ch_dataprocessingserviceconfigure.xml deleted file mode 100644 index e5d7074cd5..0000000000 --- a/doc/config-reference/ch_dataprocessingserviceconfigure.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - Data processing service - - The Data processing service (sahara) provides a scalable - data-processing stack and associated management interfaces. - - - The following tables provide a comprehensive list of the - Data processing service configuration options. - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/config-reference/ch_identityconfigure.xml b/doc/config-reference/ch_identityconfigure.xml deleted file mode 100644 index 9d0fc98f95..0000000000 --- a/doc/config-reference/ch_identityconfigure.xml +++ /dev/null @@ -1,175 +0,0 @@ - - - Identity service - This chapter details the OpenStack Identity service configuration -options. For installation prerequisites and step-by-step walkthroughs, see the -OpenStack Installation Guide for your distribution (docs.openstack.org) and Cloud - Administrator Guide. - -
- Caching layer - Identity supports a caching layer that is above the - configurable subsystems, such as token or assignment. The - majority of the caching configuration options are set in the - [cache] section. However, each section that - has the capability to be cached usually has a - option that will toggle caching for that - specific section. By default, caching is globally disabled. - Options are as follows: - - - - Current functional backends are: - - dogpile.cache.memcached - Memcached backend using - the standard python-memcached library - dogpile.cache.pylibmc - Memcached backend using - the pylibmc library - dogpile.cache.bmemcached - Memcached using - python-binary-memcached library. - dogpile.cache.redis - Redis backend - dogpile.cache.dbm - Local DBM file backend - dogpile.cache.memory - In-memory cache, not - suitable for use outside of testing as it does not cleanup it's - internal cache on cache expiration and does not share cache - between processes. This means that caching and cache invalidation - will not be consistent or reliable. - - dogpile.cache.mongo - MongoDB as caching - backend. - - -
- -
- Identity service configuration file - The Identity service is configured in the - /etc/keystone/keystone.conf file. - The following tables provide a comprehensive list of the Identity - service options. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- Domain-specific configuration - The Identity service supports domain-specific - Identity drivers which allow a domain to have its - own LDAP or SQL back end. By default, domain-specific - drivers are disabled. - Domain-specific Identity configuration options can be stored in - domain-specific configuration files, or in the Identity SQL database - using API REST calls. - - Storing and managing configuration options in - a SQL database is experimental in Kilo. - -
- Enable drivers for domain-specific configuration files - To enable domain-specific drivers, - set these options in the /etc/keystone/keystone.conf - file: - [identity] -domain_specific_drivers_enabled = True -domain_config_dir = /etc/keystone/domains - When you enable domain-specific drivers, Identity looks - in the directory for - configuration files that are named as - keystone.DOMAIN_NAME.conf. - Any domain without a domain-specific configuration - file uses options in the primary configuration file. -
-
- Domain-specific configuration file - Any options that you define in the domain-specific - configuration file override options in the /etc/keystone/keystone.conf - configuration file. - Domains configured for the service user or project - use the Identity API v3 to retrieve the service token. - To configure the domain for the service user, set the - following options in the [DEFAULT] section of the - /etc/keystone/domains/keystone.DOMAIN_NAME.conf - file: - admin_user_domain_id = USER_DOMAIN_ID -admin_user_domain_name = USER_DOMAIN_NAME - Replace USER_DOMAIN_ID with - the Identity service account user domain ID, and - USER_DOMAIN_NAME with the Identity - service account user domain name. - To configure the domain for the project, set the - following options in the [DEFAULT] section of the - /etc/keystone/domains/keystone.DOMAIN_NAME.conf - file: - admin_project_domain_id = PROJECT_DOMAIN_ID -admin_project_domain_name = PROJECT_DOMAIN_NAME - Replace PROJECT_DOMAIN_ID with - the Identity service account project domain ID, and - PROJECT_DOMAIN_NAME with the - Identity service account project domain name. -
-
- Enable drivers for storing configuration options in SQL database - To enable domain-specific drivers, set these options in - the /etc/keystone/keystone.conf file: - [identity] -domain_specific_drivers_enabled = True -domain_configurations_from_database = True - Any domain-specific configuration options specified through - the Identity v3 API will override domain-specific configuration files - in the /etc/keystone/domains directory. -
-
- Migrate domain-specific configuration files to the SQL database - You can use the keystone-manage command - to migrate configuration options in domain-specific configuration - files to the SQL database: - $ keystone-manage domain_config_upload --all - To upload options from a specific domain-configuration - file, specify the domain name: - $ keystone-manage domain_config_upload --domain-name DOMAIN_NAME -
-
- - -
diff --git a/doc/config-reference/ch_imageservice.xml b/doc/config-reference/ch_imageservice.xml deleted file mode 100644 index 0c79a27e12..0000000000 --- a/doc/config-reference/ch_imageservice.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - Image service - Compute relies on an external image service to store virtual - machine images and maintain a catalog of available images. By - default, Compute is configured to use the OpenStack Image service - (glance), which is currently the only supported image - service. - - - If your installation requires euca2ools to register new - images, you must run the nova-objectstore - service. This service provides an Amazon S3 front-end for Glance, - which is required by euca2ools. - - To customize the Compute Service, use the configuration option - settings documented in and - . - - You can modify many options in the OpenStack Image service. - The following tables provide a comprehensive list. - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/config-reference/ch_networkingconfigure.xml b/doc/config-reference/ch_networkingconfigure.xml deleted file mode 100644 index bdcc42c17d..0000000000 --- a/doc/config-reference/ch_networkingconfigure.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - Networking - This chapter explains the OpenStack Networking configuration options. For installation - prerequisites, steps, and use cases, see the OpenStack Installation - Guide for your distribution (docs.openstack.org) and Cloud - Administrator Guide. - - - - - diff --git a/doc/config-reference/ch_objectstorageconfigure.xml b/doc/config-reference/ch_objectstorageconfigure.xml deleted file mode 100644 index 5c47320be3..0000000000 --- a/doc/config-reference/ch_objectstorageconfigure.xml +++ /dev/null @@ -1,256 +0,0 @@ - - - Object Storage - OpenStack Object Storage uses multiple configuration files - for multiple services and background daemons, and - paste.deploy to manage server - configurations. Default configuration options appear in the - [DEFAULT] section. You can override the default values - by setting values in the other sections. - - -
- Object server configuration - Find an example object server configuration at - etc/object-server.conf-sample in the source code - repository. - The available configuration options are: - - - - - - - - - -
- Sample object server configuration file - -
-
-
- Object expirer configuration - Find an example object expirer configuration at - etc/object-expirer.conf-sample in the source code - repository. - The available configuration options are: - - - - - - - -
- Sample object expirer configuration file - -
-
-
- Container server configuration - Find an example container server configuration at - etc/container-server.conf-sample - in the source code repository. - The available configuration options are: - - - - - - - - - - - -
- Sample container server configuration file - -
- -
-
- Container sync realms configuration - Find an example container sync realms configuration at - etc/container-sync-realms.conf-sample - in the source code repository. - The available configuration options are: - - - -
- Sample container sync realms configuration file - -
-
-
- Container reconciler configuration - Find an example container sync realms configuration at - etc/container-reconciler.conf-sample - in the source code repository. - The available configuration options are: - - - - - - - -
- Sample container sync reconciler configuration file - -
-
- -
- Account server configuration - Find an example account server configuration at - etc/account-server.conf-sample in - the source code repository. - The available configuration options are: - - - - - - - - - -
- Sample account server configuration file - -
-
-
- Proxy server configuration - Find an example proxy server configuration at - etc/proxy-server.conf-sample in - the source code repository. - The available configuration options are: - - - - - - - - - - - - - - - - - -
- Sample proxy server configuration file - -
-
-
- Proxy server memcache configuration - Find an example memcache configuration for the proxy server at - etc/memcache.conf-sample - in the source code repository. - The available configuration options are: - -
-
- Rsyncd configuration - Find an example rsyncd configuration at - etc/rsyncd.conf-sample - in the source code repository. - The available configuration options are: - - - - - - - - - - -
- - - - - -
diff --git a/doc/config-reference/ch_orchestrationconfigure.xml b/doc/config-reference/ch_orchestrationconfigure.xml deleted file mode 100644 index a1ab6e6c97..0000000000 --- a/doc/config-reference/ch_orchestrationconfigure.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - Orchestration - The Orchestration service is designed to manage the lifecycle of - infrastructure and applications within OpenStack clouds. Its - various agents and services are configured in the - /etc/heat/heat.conf file. - To install Orchestration, see the OpenStack Installation - Guide for your distribution (docs.openstack.org). - - The following tables provide a comprehensive list of the Orchestration - configuration options. - - - - - - - - - - - - - - - - - diff --git a/doc/config-reference/ch_sharedfilesystemsconfigure.xml b/doc/config-reference/ch_sharedfilesystemsconfigure.xml deleted file mode 100644 index a14397c419..0000000000 --- a/doc/config-reference/ch_sharedfilesystemsconfigure.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - Shared File Systems - The Shared File Systems service works with many different - drivers that you can configure by using these instructions. - - - - - - - diff --git a/doc/config-reference/ch_telemetryconfigure.xml b/doc/config-reference/ch_telemetryconfigure.xml deleted file mode 100644 index f473ce6b01..0000000000 --- a/doc/config-reference/ch_telemetryconfigure.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - Telemetry - The Telemetry service collects measurements within OpenStack. Its - various agents and services are configured in the - /etc/ceilometer/ceilometer.conf file. - To install Telemetry, see the OpenStack Installation - Guide for your distribution (docs.openstack.org). - - - - - - diff --git a/doc/config-reference/compute/samples/cells-resp.json b/doc/config-reference/compute/samples/cells-resp.json deleted file mode 100644 index cc74930d4d..0000000000 --- a/doc/config-reference/compute/samples/cells-resp.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "parent": { - "name": "parent", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": true - }, - "cell1": { - "name": "cell1", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit1.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": false - }, - "cell2": { - "name": "cell2", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit2.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": false - } -} diff --git a/doc/config-reference/compute/section_compute-cells.xml b/doc/config-reference/compute/section_compute-cells.xml deleted file mode 100644 index 439ed1ff3e..0000000000 --- a/doc/config-reference/compute/section_compute-cells.xml +++ /dev/null @@ -1,327 +0,0 @@ -
- Cells - - Cells functionality - enables you to scale an OpenStack Compute cloud in a more - distributed fashion without having to use complicated - technologies like database and message queue clustering. It - supports very large deployments. - When this functionality is enabled, the hosts in an - OpenStack Compute cloud are partitioned into groups called - cells. Cells are configured as a tree. The top-level cell - should have a host that runs a nova-api service, but no nova-compute services. Each - child cell should run all of the typical nova-* services in a regular - Compute cloud except for nova-api. You can think of cells as a normal - Compute deployment in that each cell has its own database - server and message queue broker. - The nova-cells - service handles communication between cells and selects cells - for new instances. This service is required for every cell. - Communication between cells is pluggable, and currently the - only option is communication through RPC. - Cells scheduling is separate from host scheduling. - nova-cells first - picks a cell. Once a cell is selected and the new build - request reaches its nova-cells service, it is sent over to the - host scheduler in that cell and the build proceeds as it would - have without cells. - - Cell functionality is currently considered - experimental. - -
- Cell configuration options - Cells are disabled by default. All cell-related - configuration options appear in the - [cells] section in - nova.conf. The following - cell-related options are currently supported: - - - - Set to True to turn - on cell functionality. Default is - false. - - - - - - Name of the current cell. Must be unique - for each cell. - - - - - - List of arbitrary - key=value - pairs defining capabilities of the current - cell. Values include - hypervisor=xenserver;kvm,os=linux;windows. - - - - - - How long in seconds to wait for replies - from calls between cells. - - - - - - Filter classes that the cells scheduler - should use. By default, uses - "nova.cells.filters.all_filters" - to map to all cells filters included with - Compute. - - - - - - Weight classes that the scheduler for cells uses. By default, uses - nova.cells.weights.all_weighers - to map to all cells weight algorithms - included with Compute. - - - - - - Multiplier used to weight RAM. - Negative numbers indicate that Compute should - stack VMs on one host instead of spreading - out new VMs to more hosts in the cell. The - default value is 10.0. - - - -
-
- Configure the API (top-level) cell - The cell type must be changed in the API cell so - that requests can be proxied through nova-cells down to - the correct cell properly. Edit the nova.conf - file in the API cell, and specify api - in the key:[DEFAULT] -compute_api_class=nova.compute.cells_api.ComputeCellsAPI -... - -[cells] -cell_type= api - -
-
- Configure the child cells - Edit the nova.conf file in the child - cells, and specify compute in the - key:[DEFAULT] -# Disable quota checking in child cells. Let API cell do it exclusively. -quota_driver=nova.quota.NoopQuotaDriver - -[cells] -cell_type = compute -
-
- Configure the database in each cell - Before bringing the services online, the database in - each cell needs to be configured with information about - related cells. In particular, the API cell needs to know - about its immediate children, and the child cells must - know about their immediate agents. The information needed - is the RabbitMQ server - credentials for the particular cell. - Use the nova-manage cell create - command to add this information to the database in each - cell:# nova-manage cell create -h -usage: nova-manage cell create [-h] [--name <name>] - [--cell_type <parent|api|child|compute>] - [--username <username>] [--password <password>] - [--broker_hosts <broker_hosts>] - [--hostname <hostname>] [--port <number>] - [--virtual_host <virtual_host>] - [--woffset <float>] [--wscale <float>] - -optional arguments: - -h, --help show this help message and exit - --name <name> Name for the new cell - --cell_type <parent|api|child|compute> - Whether the cell is parent/api or child/compute - --username <username> - Username for the message broker in this cell - --password <password> - Password for the message broker in this cell - --broker_hosts <broker_hosts> - Comma separated list of message brokers in this cell. - Each Broker is specified as hostname:port with both - mandatory. This option overrides the --hostname and - --port options (if provided). - --hostname <hostname> - Address of the message broker in this cell - --port <number> Port number of the message broker in this cell - --virtual_host <virtual_host> - The virtual host of the message broker in this cell - --woffset <float> - --wscale <float> - As an example, assume an API cell named - api and a child cell named - cell1. - Within the api cell, specify the - following RabbitMQ server - information:rabbit_host=10.0.0.10 -rabbit_port=5672 -rabbit_username=api_user -rabbit_password=api_passwd -rabbit_virtual_host=api_vhost - Within the cell1 child cell, specify - the following RabbitMQ server - information:rabbit_host=10.0.1.10 -rabbit_port=5673 -rabbit_username=cell1_user -rabbit_password=cell1_passwd -rabbit_virtual_host=cell1_vhost - You can run this in the API cell as - root:# nova-manage cell create --name cell1 --cell_type child \ - --username cell1_user --password cell1_passwd --hostname 10.0.1.10 \ - --port 5673 --virtual_host cell1_vhost --woffset 1.0 --wscale 1.0 - Repeat the previous steps for all child cells. - In the child cell, run the following, as - root:# nova-manage cell create --name api --cell_type parent \ - --username api_user --password api_passwd --hostname 10.0.0.10 \ - --port 5672 --virtual_host api_vhost --woffset 1.0 --wscale 1.0 - To customize the Compute cells, use the configuration - option settings documented in . -
-
- Cell scheduling configuration - To determine the best cell to use to launch a new - instance, Compute uses a set of filters and weights - defined in the /etc/nova/nova.conf - file. The following options are available to prioritize - cells for scheduling: - - - - - List of filter classes. By - default - - is specified, which maps to all cells filters - included with Compute (see ). - - - - - - List of weight classes. By - default - - is specified, which maps to all cell weight - algorithms included with Compute. - The following modules are available: - - - mute_child. - Downgrades the likelihood of child - cells being chosen for scheduling - requests, which haven't sent capacity - or capability updates in a while. - Options include - - (multiplier for mute children; value - should be negative). - - - ram_by_instance_type. - Select cells with the most RAM - capacity for the instance type being - requested. Because higher weights win, - Compute returns the number of - available units for the instance type - requested. The - - option defaults to 10.0 that adds to - the weight by a factor of 10. Use a - negative number to stack VMs on one - host instead of spreading out new VMs - to more hosts in the cell. - - - weight_offset. - Allows modifying the database to - weight a particular cell. You can use - this when you want to disable a cell - (for example, '0'), or to set a - default cell by making its - weight_offset very high (for example, - '999999999999999'). The highest weight - will be the first cell to be scheduled - for launching an instance. - - - - - - Additionally, the following options are available for - the cell scheduler: - - - - - Specifies how many times the scheduler tries - to launch a new instance when no cells are - available (default=10). - - - - - - Specifies the delay (in seconds) between - retries (default=2). - - - - As an admin user, you can also add a filter that directs - builds to a particular cell. The - policy.json file must have a line - with "cells_scheduler_filter:TargetCellFilter" : - "is_admin:True" to let an admin user specify - a scheduler hint to direct a build to a particular - cell. -
-
- Optional cell configuration - Cells store all inter-cell communication data, including - user names and passwords, in the database. Because the - cells data is not updated very frequently, use the - option to specify - a JSON file to store cells data. With this configuration, - the database is no longer consulted when reloading the - cells data. The file must have columns present in the Cell - model (excluding common database fields and the - column). You must specify the - queue connection information through a - field, instead of - , , - and so on. The has the - following form: - rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST - The scheme can be either qpid or - rabbit, as shown previously. The - following sample shows this optional configuration: - -
-
diff --git a/doc/config-reference/compute/section_compute-conductor.xml b/doc/config-reference/compute/section_compute-conductor.xml deleted file mode 100644 index 312123492f..0000000000 --- a/doc/config-reference/compute/section_compute-conductor.xml +++ /dev/null @@ -1,43 +0,0 @@ - -
- Conductor -The nova-conductor - service enables OpenStack to function without compute nodes - accessing the database. Conceptually, it implements a new - layer on top of nova-compute. It should not be deployed on - compute nodes, or else the security benefits of removing - database access from nova-compute are negated. Just like - other nova services such as nova-api or nova-scheduler, it can be scaled - horizontally. You can run multiple instances of nova-conductor on different - machines as needed for scaling purposes. -The methods exposed by nova-conductor are relatively - simple methods used by nova-compute to offload its database - operations. Places where nova-compute previously performed database - access are now talking to nova-conductor. However, we have plans in - the medium to long term to move more and more of what is - currently in nova-compute up to the nova-conductor layer. The - Compute service will start to look like a less intelligent - slave service to nova-conductor. The conductor service will - implement long running complex operations, ensuring forward - progress and graceful error handling. This will be especially - beneficial for operations that cross multiple compute nodes, - such as migrations or resizes. -To customize the Conductor, use the configuration option settings - documented in . -
- diff --git a/doc/config-reference/compute/section_compute-config-samples.xml b/doc/config-reference/compute/section_compute-config-samples.xml deleted file mode 100644 index 455717835f..0000000000 --- a/doc/config-reference/compute/section_compute-config-samples.xml +++ /dev/null @@ -1,85 +0,0 @@ -
- Example <filename>nova.conf</filename> configuration - files - The following sections describe the configuration options in - the nova.conf file. You must copy the - nova.conf file to each compute node. - The sample nova.conf files show examples - of specific configurations. - - Small, private cloud - This example nova.conf file - configures a small private cloud with cloud controller - services, database server, and messaging server on the - same server. In this case, CONTROLLER_IP represents the IP - address of a central server, BRIDGE_INTERFACE represents - the bridge such as br100, the NETWORK_INTERFACE represents - an interface to your VLAN setup, and passwords are - represented as DB_PASSWORD_COMPUTE for your Compute (nova) - database password, and RABBIT PASSWORD represents the - password to your message queue installation. - - - - KVM, Flat, MySQL, and Glance, OpenStack or EC2 - API - This example nova.conf file, from - an internal Rackspace test system, is used for - demonstrations. - -
- KVM, Flat, MySQL, and Glance, OpenStack or EC2 - API - - - - - -
-
- - XenServer, Flat networking, MySQL, and Glance, - OpenStack API - This example nova.conf file is from - an internal Rackspace test system. - verbose -nodaemon -network_manager=nova.network.manager.FlatManager -image_service=nova.image.glance.GlanceImageService -flat_network_bridge=xenbr0 -compute_driver=xenapi.XenAPIDriver -xenapi_connection_url=https://<XenServer IP> -xenapi_connection_username=root -xenapi_connection_password=supersecret -xenapi_image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore -rescue_timeout=86400 -use_ipv6=true - -# To enable flat_injected, currently only works on Debian-based systems -flat_injected=true -ipv6_backend=account_identifier -ca_path=./nova/CA - -# Add the following to your conf file if you're running on Ubuntu Maverick -xenapi_remap_vbd_dev=true -[database] -connection=mysql+pymysql://root:<password>@127.0.0.1/nova -
- KVM, Flat, MySQL, and Glance, OpenStack or EC2 - API - - - - - -
-
-
diff --git a/doc/config-reference/compute/section_compute-configure-backing-storage.xml b/doc/config-reference/compute/section_compute-configure-backing-storage.xml deleted file mode 100644 index e26cd81a66..0000000000 --- a/doc/config-reference/compute/section_compute-configure-backing-storage.xml +++ /dev/null @@ -1,38 +0,0 @@ -
- Configure Compute backing storage - - Backing Storage is the storage used to provide the expanded - operating system image, and any ephemeral storage. Inside the - virtual machine, this is normally presented as two virtual hard - disks (for example, /dev/vda and - /dev/vdb respectively). However, inside - OpenStack, this can be derived from one of three methods: LVM, - QCOW or RAW, chosen using the images_type - option in nova.conf on the compute node. - - - QCOW is the default backing store. It uses a copy-on-write - philosophy to delay allocation of storage until it is actually - needed. This means that the space required for the backing of an - image can be significantly less on the real disk than what seems - available in the virtual machine operating system. - - - RAW creates files without any sort of file formatting, effectively - creating files with the plain binary one would normally see on a - real disk. This can increase performance, but means that the - entire size of the virtual disk is reserved on the physical disk. - - - Local LVM volumes can also be used. Set - images_volume_group = nova_local where - nova_local is the name of the LVM group you - have created. - -
diff --git a/doc/config-reference/compute/section_compute-configure-db.xml b/doc/config-reference/compute/section_compute-configure-db.xml deleted file mode 100644 index a56d255e94..0000000000 --- a/doc/config-reference/compute/section_compute-configure-db.xml +++ /dev/null @@ -1,24 +0,0 @@ -
- Database configuration - You can configure OpenStack Compute to use any - SQLAlchemy-compatible database. The database name is - nova. The nova-conductor service is the - only service that writes to the database. The other Compute - services access the database through the nova-conductor service. - To ensure that the database schema is current, run the following command: - # nova-manage db sync - If nova-conductor - is not used, entries to the database are mostly written by the - nova-scheduler - service, although all services must be able to update - entries in the database. - In either case, use the configuration option settings documented in - to configure the connection - string for the nova database. -
diff --git a/doc/config-reference/compute/section_compute-configure-xapi.xml b/doc/config-reference/compute/section_compute-configure-xapi.xml deleted file mode 100644 index 3f34811738..0000000000 --- a/doc/config-reference/compute/section_compute-configure-xapi.xml +++ /dev/null @@ -1,81 +0,0 @@ -
- XenAPI configuration reference - - The following section discusses some commonly changed options when - using the XenAPI driver. The table below provides a complete reference - of all configuration options available for configuring XAPI with - OpenStack. - - - The recommended way to use XAPI with OpenStack is through the XenAPI - driver. To enable the XenAPI driver, add the following configuration - options to /etc/nova/nova.conf and restart - OpenStack Compute: - - compute_driver = xenapi.XenAPIDriver -[xenserver] -connection_url = http://your_xenapi_management_ip_address -connection_username = root -connection_password = your_password - - These connection details are used by OpenStack Compute service to - contact your hypervisor and are the same details you use to connect - XenCenter, the XenServer management console, to your XenServer node. - - - - The connection_url is generally the - management network IP address of the XenServer. - - -
- Agent - - The agent is a piece of software that runs on the instances, and - communicates with OpenStack. In case of the XenAPI driver, the - agent communicates with OpenStack through XenStore (see - the Xen Project Wiki - for more information on XenStore). - - - If you don't have the guest agent on your VMs, it takes a long time - for OpenStack Compute to detect that the VM has successfully - started. Generally a large timeout is required for Windows - instances, but you may want to adjust: - agent_version_timeout within the - [xenserver] section. - -
-
- VNC proxy address - - Assuming you are talking to XAPI through a management network, and - XenServer is on the address: 10.10.1.34 specify the same address - for the vnc proxy address: - vncserver_proxyclient_address=10.10.1.34 - -
-
- Storage - - You can specify which Storage Repository to use with nova by - editing the following flag. To use the local-storage setup by the - default installer: - sr_matching_filter = "other-config:i18n-key=local-storage" - Another alternative is to use the "default" storage (for example if - you have attached NFS or any other shared storage): - sr_matching_filter = "default-sr:true" - -
-
- XenAPI configuration reference - - To customize the XenAPI driver, use the configuration option - settings documented in . - -
-
diff --git a/doc/config-reference/compute/section_compute-hypervisors.xml b/doc/config-reference/compute/section_compute-hypervisors.xml deleted file mode 100644 index ef4060ffbd..0000000000 --- a/doc/config-reference/compute/section_compute-hypervisors.xml +++ /dev/null @@ -1,97 +0,0 @@ -
- Hypervisors - OpenStack Compute supports many hypervisors, which might - make it difficult for you to choose one. Most installations - use only one hypervisor. However, you can use and to schedule different - hypervisors within the same installation. The following links - help you choose a hypervisor. See http://docs.openstack.org/developer/nova/support-matrix.html - for a detailed list of features and support across the - hypervisors. - The following hypervisors are supported: - - - KVM - Kernel-based Virtual Machine. The - virtual disk formats that it supports is inherited from - QEMU since it uses a modified QEMU program to launch - the virtual machine. The supported formats include raw - images, the qcow2, and VMware formats. - - - LXC - Linux Containers (through libvirt), - used to run Linux-based virtual machines. - - - QEMU - Quick EMUlator, generally only used - for development purposes. - - - UML - User Mode Linux, generally only used - for development purposes. - - - VMware vSphere 4.1 update 1 and newer, runs VMware-based Linux and - Windows images through a connection with a vCenter server or directly with an ESXi - host. - - - Xen (using libvirt) - - Xen Project Hypervisor using libvirt as management interface into nova-compute to run Linux, Windows, FreeBSD - and NetBSD virtual machines. - - XenServer - - XenServer, Xen Cloud Platform (XCP) and other XAPI based Xen variants runs Linux - or Windows virtual machines. You must install the - nova-compute service in a - para-virtualized VM. - - - Hyper-V - Server virtualization with - Microsoft's Hyper-V, use to run Windows, Linux, and - FreeBSD virtual machines. Runs nova-compute natively - on the Windows virtualization platform. - - -
- Hypervisor configuration basics - The node where the nova-compute service is installed and - operates on the same node that runs all of the virtual machines. - This is referred to as the compute node in this guide. - By default, the selected hypervisor is KVM. To change to another hypervisor, change - the virt_type option in the [libvirt] section of - nova.conf and restart the nova-compute service. - Here are the general nova.conf - options that are used to configure the compute node's - hypervisor: . - Specific options for particular hypervisors - can be found in the following sections. -
- - - - - - - -
diff --git a/doc/config-reference/compute/section_compute-iscsioffload.xml b/doc/config-reference/compute/section_compute-iscsioffload.xml deleted file mode 100644 index afad854269..0000000000 --- a/doc/config-reference/compute/section_compute-iscsioffload.xml +++ /dev/null @@ -1,111 +0,0 @@ - -
- iSCSI interface and offload support in Compute - - iSCSI interface and offload support is only present since Kilo. - - - Compute supports open-iscsi iSCSI interfaces for - offload cards. - Offload hardware must be present and configured on every compute - node where offload is desired. Once an open-iscsi - interface is configured, the iface name (iface.iscsi_ifacename) - should be passed to libvirt via the iscsi_iface - parameter for use. All iscsi sessions will be bound to this iSCSI - interface. - - Currently supported transports (iface.transport_name) are - - be2iscsi - bnx2i - cxgb3i - cxgb4i - qla4xxx - ocs - . No configuration changes are needed outside of Compute node. - - iSER is currently supported via the separate iSER - LibvirtISERVolumeDriver and will be rejected if used via the - iscsi_iface parameter. - -
- iSCSI iface configuration - - - Note the distinction between the transport name - (iface.transport_name) - and iface name (iface.iscsi_ifacename). - The actual iface name must be specified via the - iscsi_iface parameter to libvirt for - offload to work. - - - - The default name for an iscsi iface (open-iscsi parameter - iface.iscsi_ifacename) is in the format - transport_name.hwaddress when generated by - iscsiadm. - - - - iscsiadm can be used to view - and generate current iface configuration. Every network - interface that supports an open-iscsi transport can have one - or more iscsi ifaces associated with it. If no ifaces have - been configured for a network interface supported by an - open-iscsi transport, this command will create a default - iface configuration for that network interface. For example : - # iscsiadm -m iface - default tcp,<empty>,<empty>,<empty>,<empty> - iser iser,<empty>,<empty>,<empty>,<empty> - bnx2i.00:05:b5:d2:a0:c2 bnx2i,00:05:b5:d2:a0:c2,5.10.10.20,<empty>,<empty> - cxgb4i.00:07:43:28:b2:58 cxgb4i,00:07:43:28:b2:58,102.50.50.80,<empty>,<empty> - qla4xxx.00:c0:dd:08:63:ea qla4xxx,00:c0:dd:08:63:ea,20.15.0.9,<empty>,<empty> - - - The output is in the format : - iface_name transport_name,hwaddress,ipaddress,net_ifacename,initiatorname. - - - - Individual iface configuration can be viewed via - # iscsiadm -m iface -I IFACE_NAME - # BEGIN RECORD 2.0-873 - iface.iscsi_ifacename = cxgb4i.00:07:43:28:b2:58 - iface.net_ifacename = <empty> - iface.ipaddress = 102.50.50.80 - iface.hwaddress = 00:07:43:28:b2:58 - iface.transport_name = cxgb4i - iface.initiatorname = <empty> - # END RECORD - - - Configuration can be updated as desired via - # iscsiadm -m iface-I IFACE_NAME--op=update -n iface.SETTING -v VALUE - - - - - All iface configurations need a minimum of - iface.iface_name, - iface.transport_name and - iface.hwaddress to be correctly configured - to work. - Some transports may require iface.ipaddress - and iface.net_ifacename as well to bind - correctly. - - Detailed configuration instructions can be found here - - http://www.open-iscsi.org/docs/README - - - - -
-
diff --git a/doc/config-reference/compute/section_compute-options-reference.xml b/doc/config-reference/compute/section_compute-options-reference.xml deleted file mode 100644 index 5a6a3275d8..0000000000 --- a/doc/config-reference/compute/section_compute-options-reference.xml +++ /dev/null @@ -1,67 +0,0 @@ - -
- Compute sample configuration files -
- nova.conf - configuration options - For a complete list of all available configuration options for each OpenStack Compute service, run bin/nova-<servicename> --help. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
diff --git a/doc/config-reference/compute/section_compute-sample-configuration-files.xml b/doc/config-reference/compute/section_compute-sample-configuration-files.xml deleted file mode 100644 index 7860f72a50..0000000000 --- a/doc/config-reference/compute/section_compute-sample-configuration-files.xml +++ /dev/null @@ -1,37 +0,0 @@ -
- Additional sample configuration files - Files in this section can be found in /etc/nova. -
- api-paste.ini - The Compute service stores its API configuration settings in the - api-paste.ini file. - - - -
-
- policy.json - The policy.json file defines additional access controls that apply to the Compute service. - - - -
-
- rootwrap.conf - The rootwrap.conf file defines configuration - values used by the rootwrap script when the Compute service - needs to escalate its privileges to those of the root user. - It is also possible to disable the root wrapper, and - default to sudo only. Configure the disable_rootwrap - option in the section - of the nova.conf configuration file. - - - -
-
- diff --git a/doc/config-reference/compute/section_compute-scheduler.xml b/doc/config-reference/compute/section_compute-scheduler.xml deleted file mode 100644 index a7399c6dc4..0000000000 --- a/doc/config-reference/compute/section_compute-scheduler.xml +++ /dev/null @@ -1,1062 +0,0 @@ - - -%openstack; -]> -
- - Scheduling - Compute uses the nova-scheduler service to determine how to - dispatch compute requests. For example, the - nova-scheduler - service determines on which host a VM should launch. In the - context of filters, the term host means - a physical node that has a nova-compute service running on it. You can - configure the scheduler through a variety of options. - Compute is configured with the following default scheduler - options in the /etc/nova/nova.conf - file: - -scheduler_driver_task_period = 60 -scheduler_driver = nova.scheduler.filter_scheduler.FilterScheduler -scheduler_available_filters = nova.scheduler.filters.all_filters -scheduler_default_filters = RetryFilter, AvailabilityZoneFilter, RamFilter, DiskFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter - By default, the is - configured as a filter scheduler, as described in the next - section. In the default configuration, this scheduler - considers hosts that meet all the following criteria: - - - Have not been attempted for scheduling purposes - (RetryFilter). - - - Are in the requested availability zone - (AvailabilityZoneFilter). - - - Have sufficient RAM available - (RamFilter). - - - Have sufficient disk space available for root and ephemeral storage - (DiskFilter). - - - Can service the request - (ComputeFilter). - - - Satisfy the extra specs associated with the instance - type - (ComputeCapabilitiesFilter). - - - Satisfy any architecture, hypervisor type, or - virtual machine mode properties specified on the - instance's image properties - (ImagePropertiesFilter). - - - - Are on a different host than other instances of a group - (if requested) - (ServerGroupAntiAffinityFilter). - - - - Are in a set of group hosts (if requested) - (ServerGroupAffinityFilter). - - - The scheduler caches its list of available hosts; use the - option to - specify how often the list is updated. - - Do not configure to - be much smaller than - ; - otherwise, hosts appear to be dead while the host list is - being cached. - - For information about the volume scheduler, see the Block - Storage section of - OpenStack Cloud Administrator - Guide. - The scheduler chooses a new host when an instance is - migrated. - When evacuating instances from a host, the scheduler service - honors the target host defined by the administrator on the evacuate - command. If a target is not defined by the administrator, the - scheduler determines the target host. For information about - instance evacuation, see Evacuate instances section of the - OpenStack Cloud Administrator - Guide. -
- Filter scheduler - The filter scheduler - (nova.scheduler.filter_scheduler.FilterScheduler) - is the default scheduler for scheduling virtual machine - instances. It supports filtering and weighting to make - informed decisions on where a new instance should be - created. -
-
- - Filters - When the filter scheduler receives a request for a - resource, it first applies filters to determine which - hosts are eligible for consideration when dispatching a - resource. Filters are binary: either a host is accepted by - the filter, or it is rejected. Hosts that are accepted by - the filter are then processed by a different algorithm to - decide which hosts to use for that request, described in - the Weights section. -
- Filtering - - - - - -
- The - configuration option in nova.conf - provides the Compute service with the list of the filters - that are used by the scheduler. The default setting - specifies all of the filter that are included with the - Compute service: - scheduler_available_filters = nova.scheduler.filters.all_filters - This configuration option can be specified multiple - times. For example, if you implemented your own custom - filter in Python called - myfilter.MyFilter and you wanted to - use both the built-in filters and your custom filter, your - nova.conf file would - contain: - scheduler_available_filters = nova.scheduler.filters.all_filters -scheduler_available_filters = myfilter.MyFilter - The scheduler_default_filters - configuration option in nova.conf - defines the list of filters that are applied by the - nova-scheduler service. The default - filters are: - scheduler_default_filters = RetryFilter, AvailabilityZoneFilter, RamFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter - The following sections describe the available - filters. -
- AggregateCoreFilter - - Filters host by CPU core numbers with a per-aggregate - cpu_allocation_ratio value. If the - per-aggregate value is not found, the value falls back - to the global setting. If the host is in more than one - aggregate and more than one value is found, the minimum - value will be used. For information about how to use - this filter, see . See - also . - -
-
- AggregateDiskFilter - - Filters host by disk allocation with a per-aggregate - disk_allocation_ratio value. If the - per-aggregate value is not found, the value falls back to - the global setting. If the host is in more than one - aggregate and more than one value is found, the minimum - value will be used. For information about how to use this - filter, see . See also - . - -
-
- AggregateImagePropertiesIsolation - Matches properties defined in an image's metadata - against those of aggregates to determine host - matches: - - - If a host belongs to an aggregate and the - aggregate defines one or more metadata that - matches an image's properties, that host is a - candidate to boot the image's instance. - - - If a host does not belong to any aggregate, - it can boot instances from all images. - - - For example, the following aggregate - myWinAgg has the Windows - operating system as metadata (named 'windows'): - $ nova aggregate-details MyWinAgg -+----+----------+-------------------+------------+---------------+ -| Id | Name | Availability Zone | Hosts | Metadata | -+----+----------+-------------------+------------+---------------+ -| 1 | MyWinAgg | None | 'sf-devel' | 'os=windows' | -+----+----------+-------------------+------------+---------------+ - In this example, because the following Win-2012 - image has the windows property, - it boots on the sf-devel host - (all other filters being equal): - $ glance image-show Win-2012 -+------------------+--------------------------------------+ -| Property | Value | -+------------------+--------------------------------------+ -| Property 'os' | windows | -| checksum | f8a2eeee2dc65b3d9b6e63678955bd83 | -| container_format | ami | -| created_at | 2013-11-14T13:24:25 | -| ... - You can configure the - AggregateImagePropertiesIsolation - filter by using the following options in the - nova.conf file: -# Considers only keys matching the given -namespace (string). Multiple values can be given, as a comma-separated list. -aggregate_image_properties_isolation_namespace = <None> - -# Separator used between the namespace and keys (string). -aggregate_image_properties_isolation_separator = . -
-
- AggregateInstanceExtraSpecsFilter - Matches properties defined in extra specs for an - instance type against admin-defined properties on a host - aggregate. Works with specifications that are scoped with - aggregate_instance_extra_specs. Multiple - values can be given, as a comma-separated list. For - backward compatibility, also works with non-scoped - specifications; this action is highly discouraged because - it conflicts with - ComputeCapabilitiesFilter filter when you enable - both filters. For information about how to use this - filter, see the host - aggregates section. -
-
- AggregateIoOpsFilter - - Filters host by disk allocation with a per-aggregate - max_io_ops_per_host value. If the - per-aggregate value is not found, the value falls back to - the global setting. If the host is in more than one - aggregate and more than one value is found, the minimum - value will be used. For information about how to use this - filter, see . See - also . - -
-
- AggregateMultiTenancyIsolation - Isolates tenants to specific host aggregates. - If a host is in an aggregate that has the - filter_tenant_id metadata key, - the host creates instances from only that tenant or - list of tenants. A host can be in different - aggregates. If a host does not belong to an aggregate - with the metadata key, the host can create instances - from all tenants. -
-
- AggregateNumInstancesFilter - - Filters host by number of instances with a per-aggregate - max_instances_per_host value. If the - per-aggregate value is not found, the value falls back to - the global setting. If the host is in more than one - aggregate and thus more than one value is found, the - minimum value will be used. For information about how to - use this filter, see . See also . - -
-
- AggregateRamFilter - - Filters host by RAM allocation of instances with a per-aggregate - ram_allocation_ratio value. If the - per-aggregate value is not found, the value falls back to - the global setting. If the host is in more than one - aggregate and thus more than one value is found, the - minimum value will be used. For information about how to - use this filter, see . See also . - -
-
- AggregateTypeAffinityFilter - - This filter passes hosts if no instance_type - key is set or the instance_type - aggregate metadata value contains the name of the - instance_type requested. - The value of the instance_type metadata entry - is a string that may contain either a single - instance_type name or a comma-separated list - of instance_type names, such as - 'm1.nano' or "m1.nano,m1.small." - For information about how to use this filter, see . See also . - -
-
- AllHostsFilter - This is a no-op filter. It does not eliminate any of - the available hosts. -
-
- AvailabilityZoneFilter - Filters hosts by availability zone. You must enable - this filter for the scheduler to respect availability - zones in requests. -
-
- ComputeCapabilitiesFilter - Matches properties defined in extra specs for an - instance type against compute capabilities. - If an extra specs key contains a colon - (:), anything before the colon - is treated as a namespace and anything after the colon - is treated as the key to be matched. If a namespace is - present and is not capabilities, - the filter ignores the namespace. For backward - compatibility, also treats the extra specs key as the - key to be matched if no namespace is present; this - action is highly discouraged because it conflicts with - - AggregateInstanceExtraSpecsFilter filter - when you enable both filters. -
-
- ComputeFilter - Passes all hosts that are operational and - enabled. - In general, you should always enable this filter. -
-
- CoreFilter - Only schedules instances on hosts if sufficient CPU - cores are available. If this filter is not set, the - scheduler might over-provision a host based on cores. - For example, the virtual cores running on an instance - may exceed the physical cores. - You can configure this filter to enable a fixed - amount of vCPU overcommitment by using the - - configuration option in - nova.conf. The default setting - is: - cpu_allocation_ratio = 16.0 - With this setting, if 8 vCPUs are on a node, the - scheduler allows instances up to 128 vCPU to be run on - that node. - To disallow vCPU overcommitment set: - cpu_allocation_ratio = 1.0 - - The Compute API always returns the actual - number of CPU cores available on a compute node - regardless of the value of the - - configuration key. As a result changes to the - are not - reflected via the command line clients or the - dashboard. Changes to this configuration key are - only taken into account internally in the - scheduler. - -
-
- NUMATopologyFilter - Filters hosts based on the NUMA topology that was specified - for the instance through the use of flavor extra_specsin - combination with the image properties, as described in detail - in the related nova-spec document: - - Filter will try to match - the exact NUMA cells of the instance to those of the host. - It will consider the standard over-subscription limits each cell, - and provide limits to the compute host accordingly. - - - If instance has no topology defined, it will - be considered for any host. If instance has a topology - defined, it will be considered only for NUMA capable - hosts. - - -
-
- DifferentHostFilter - Schedules the instance on a different host from a - set of instances. To take advantage of this filter, - the requester must pass a scheduler hint, using - different_host as the key and a - list of instance UUIDs as the value. This filter is - the opposite of the SameHostFilter. - Using the nova command-line tool, - use the --hint flag. For - example: - $ nova boot --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ - --hint different_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \ - --hint different_host=8c19174f-4220-44f0-824a-cd1eeef10287 server-1 - With the API, use the - os:scheduler_hints key. For - example: - -
-
- DiskFilter - Only schedules instances on hosts if there is - sufficient disk space available for root and ephemeral - storage. - You can configure this filter to enable a fixed - amount of disk overcommitment by using the - disk_allocation_ratio - configuration option in the - nova.conf configuration file. - The default setting disables the possibility of the - overcommitment and allows launching a VM only if - there is a sufficient amount of disk space available - on a host: - disk_allocation_ratio = 1.0 - DiskFilter always considers the value of the - property and not the - one of the property - of a hypervisor's statistics: -$ nova hypervisor-stats -+----------------------+-------+ -| Property | Value | -+----------------------+-------+ -| count | 1 | -| current_workload | 0 | -| disk_available_least | 29 | -| free_disk_gb | 35 | -| free_ram_mb | 3441 | -| local_gb | 35 | -| local_gb_used | 0 | -| memory_mb | 3953 | -| memory_mb_used | 512 | -| running_vms | 0 | -| vcpus | 2 | -| vcpus_used | 0 | -+----------------------+-------+ - As it can be viewed from the command output above, - the amount of the available disk space - can be less than the amount of the free disk space. - It happens because the - property accounts for the virtual size rather - than the actual size of images. If you use an image format that - is sparse or copy on write so that each virtual instance - does not require a 1:1 allocation of a virtual disk - to a physical storage, it may be useful to allow the - overcommitment of disk space. - To enable scheduling instances while overcommitting disk - resources on the node, adjust the value of - the disk_allocation_ratio - configuration option to greater than 1.0: - disk_allocation_ratio > 1.0 - - If the value is set to >1, we recommend - keeping track of the free disk space, as the value approaching - 0 may result in the incorrect functioning - of instances using it at the moment. - -
-
- GroupAffinityFilter - - This filter is deprecated in favor of ServerGroupAffinityFilter. - - The GroupAffinityFilter ensures that an instance is - scheduled on to a host from a set of group hosts. To - take advantage of this filter, the requester must pass - a scheduler hint, using group as - the key and an arbitrary name as the value. Using the - nova command-line tool, use the - --hint flag. For - example: - $ nova boot --image IMAGE_ID --flavor 1 --hint group=foo server-1 - This filter should not be enabled at the same time - as GroupAntiAffinityFilter or neither filter - will work properly. -
-
- GroupAntiAffinityFilter - - This filter is deprecated in favor of ServerGroupAntiAffinityFilter. - - The GroupAntiAffinityFilter ensures that each - instance in a group is on a different host. To take - advantage of this filter, the requester must pass a - scheduler hint, using group as the - key and an arbitrary name as the value. Using the - nova command-line tool, use the - --hint flag. For - example: - $ nova boot --image IMAGE_ID --flavor 1 --hint group=foo server-1 - This filter should not be enabled at the same time - as GroupAffinityFilter or neither filter will - work properly. -
-
- ImagePropertiesFilter - Filters hosts based on properties defined on the - instance's image. It passes hosts that can support the - specified image properties contained in the instance. - Properties include the architecture, hypervisor type, - hypervisor version (for Xen hypervisor type only), - and virtual machine mode. - For example, an instance - might require a host that runs an ARM-based processor, - and QEMU as the hypervisor. You can decorate an image - with these properties by using: - $ glance image-update img-uuid --property architecture=arm --property hypervisor_type=qemu - The image properties that the filter checks for are: - - - architecture: - describes the machine - architecture required by the image. Examples - are i686, x86_64, - arm, and ppc64. - - - hypervisor_type: - describes the hypervisor required by the image. - Examples are xen, - qemu, and xenapi. - qemu is used for both - QEMU and KVM hypervisor types. - - - hypervisor_version_requires: - describes the hypervisor version required by the image. - The property is supported for Xen hypervisor type only. - It can be used to enable support - for multiple hypervisor versions, - and to prevent instances with newer - Xen tools from being provisioned on an older version - of a hypervisor. If available, the property value - is compared to the hypervisor version of the compute host. - To filter the hosts by the hypervisor version, - add the hypervisor_version_requires property - on the image as metadata and pass an operator and a - required hypervisor version as its value: -$ glance image-update img-uuid --property hypervisor_type=xen --property hypervisor_version_requires=">=4.3" - - - vm_mode: describes the hypervisor application - binary interface (ABI) required by the image. - Examples are xen for Xen 3.0 paravirtual - ABI, hvm for native ABI, - uml for User Mode - Linux paravirtual ABI, exe - for container virt executable ABI. - - -
-
- IsolatedHostsFilter - Allows the admin to define a special (isolated) set - of images and a special (isolated) set of hosts, such - that the isolated images can only run on the isolated - hosts, and the isolated hosts can only run isolated - images. The flag - restrict_isolated_hosts_to_isolated_images - can be used to force isolated hosts to only run - isolated images. - The admin must specify the isolated set of images - and hosts in the nova.conf file - using the isolated_hosts and - isolated_images configuration - options. For example: - isolated_hosts = server1, server2 -isolated_images = 342b492c-128f-4a42-8d3a-c5088cf27d13, ebd267a6-ca86-4d6c-9a0e-bd132d6b7d09 -
-
- IoOpsFilter - - The IoOpsFilter filters hosts by concurrent I/O operations - on it. Hosts with too many concurrent I/O operations will - be filtered out. The - option specifies the maximum number of I/O intensive - instances allowed to run on a host. A host will be ignored - by the scheduler if more than - instances in build, - resize, snapshot, migrate, rescue or unshelve task states - are running on it. - -
-
- JsonFilter - The JsonFilter allows a user to construct a custom - filter by passing a scheduler hint in JSON format. The - following operators are supported: - - = - - - < - - - > - - - in - - - <= - - - >= - - - not - - - or - - - and - - The filter supports the following variables: - - $free_ram_mb - - - $free_disk_mb - - - $total_usable_ram_mb - - - $vcpus_total - - - $vcpus_used - - Using the nova - command-line tool, use the --hint - flag: - $ nova boot --image 827d564a-e636-4fc4-a376-d36f7ebe1747 \ - --flavor 1 --hint query='[">=","$free_ram_mb",1024]' server1 - With the API, use the - os:scheduler_hints key: - -
-
- MetricsFilter - Filters hosts based on meters - weight_setting. Only hosts with the - available meters are passed so that the metrics weigher - will not fail due to these hosts. - -
-
- NumInstancesFilter - - Hosts that have more instances running than specified by - the option are - filtered out when this filter is in place. - -
-
- PciPassthroughFilter - - The filter schedules instances on a host if the host has - devices that meet the device requests in the - extra_specs attribute for the flavor. - -
-
- RamFilter - Only schedules instances on hosts that have - sufficient RAM available. If this filter is not set, - the scheduler may over provision a host based on RAM - (for example, the RAM allocated by virtual machine - instances may exceed the physical RAM). - You can configure this filter to enable a fixed - amount of RAM overcommitment by using the - ram_allocation_ratio - configuration option in - nova.conf. The default setting - is: - ram_allocation_ratio = 1.5 - This setting enables 1.5 GB instances to run on - any compute node with 1 GB of free RAM. -
-
- RetryFilter - Filters out hosts that have already been attempted - for scheduling purposes. If the scheduler selects a - host to respond to a service request, and the host - fails to respond to the request, this filter prevents - the scheduler from retrying that host for the service - request. - This filter is only useful if the - scheduler_max_attempts - configuration option is set to a value greater than - zero. - If there are multiple force hosts/nodes, this filter - helps to retry on the force hosts/nodes if a VM fails - to boot. -
-
- SameHostFilter - Schedules the instance on the same host as another - instance in a set of instances. To take advantage of - this filter, the requester must pass a scheduler hint, - using same_host as the key and a - list of instance UUIDs as the value. This filter is - the opposite of the - DifferentHostFilter. Using the - nova command-line tool, use the - --hint flag: - $ nova boot --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ - --hint same_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \ - --hint same_host=8c19174f-4220-44f0-824a-cd1eeef10287 server-1 - With the API, use the - os:scheduler_hints key: - -
-
- ServerGroupAffinityFilter - The ServerGroupAffinityFilter ensures that an - instance is scheduled on to a host from a set of group - hosts. To take advantage of this filter, the requester - must create a server group with an - affinity policy, and pass a - scheduler hint, using group as the - key and the server group UUID as the value. Using the - nova command-line tool, use the - --hint flag. For - example: - $ nova server-group-create --policy affinity group-1 -$ nova boot --image IMAGE_ID --flavor 1 --hint group=SERVER_GROUP_UUID server-1 -
-
- ServerGroupAntiAffinityFilter - The ServerGroupAntiAffinityFilter ensures that each - instance in a group is on a different host. To take - advantage of this filter, the requester must create a - server group with an anti-affinity - policy, and pass a scheduler hint, using - group as the key and the server - group UUID as the value. Using the - nova command-line tool, use the - --hint flag. For - example: - $ nova server-group-create --policy anti-affinity group-1 -$ nova boot --image IMAGE_ID --flavor 1 --hint group=SERVER_GROUP_UUID server-1 -
-
- SimpleCIDRAffinityFilter - Schedules the instance based on host IP subnet - range. To take advantage of this filter, the requester - must specify a range of valid IP address in CIDR - format, by passing two scheduler hints: - - - build_near_host_ip - - The first IP address in the subnet (for - example, - 192.168.1.1) - - - - cidr - - The CIDR that corresponds to the subnet - (for example, - /24) - - - - Using the nova command-line tool, - use the --hint flag. For example, - to specify the IP subnet - 192.168.1.1/24 - $ nova boot --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ - --hint build_near_host_ip=192.168.1.1 --hint cidr=/24 server-1 - With the API, use the - os:scheduler_hints key: - -
-
- TrustedFilter - - Filters hosts based on their trust. Only passes hosts that - meet the trust requirements specified in the instance - properties. - -
-
- TypeAffinityFilter - - Dynamically limits hosts to one instance type. An instance - can only be launched on a host, if no instance with - different instances types are running on it, or if the - host has no running instances at all. - -
-
-
- Weights - - When resourcing instances, the filter scheduler filters - and weights each host in the list of acceptable hosts. Each - time the scheduler selects a host, it virtually consumes - resources on it, and subsequent selections are adjusted - accordingly. This process is useful when the customer asks - for the same large amount of instances, because weight is - computed for each requested instance. - All weights are normalized before being summed up; the - host with the largest weight is given the highest - priority. -
- Weighting hosts - - - - - -
- If cells are used, cells are weighted by the scheduler - in the same manner as hosts. - Hosts and cells are weighted based on the following - options in the /etc/nova/nova.conf - file: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Host weighting options
SectionOptionDescription
[DEFAULT]ram_weight_multiplierBy default, the scheduler spreads instances - across all hosts evenly. Set the - - option to a negative number if you prefer - stacking instead of spreading. Use a - floating-point value.
[DEFAULT]scheduler_host_subset_sizeNew instances are scheduled on a host that is - chosen randomly from a subset of the N best - hosts. This property defines the subset size - from which a host is chosen. A value of 1 - chooses the first host returned by the - weighting functions. This value must be at - least 1. A value less than 1 is ignored, and 1 - is used instead. Use an integer value.
[DEFAULT]scheduler_weight_classesDefaults to - nova.scheduler.weights.all_weighers, - which selects the - RamWeigher and MetricsWeigher. Hosts are then weighted and sorted - with the largest weight winning.
[DEFAULT]io_ops_weight_multiplierMultiplier used for weighing host I/O operations. A negative - value means a preference to choose light workload - compute hosts.
[metrics]weight_multiplierMultiplier for weighting meters. Use a - floating-point value.
[metrics]weight_settingDetermines how meters are weighted. Use a - comma-separated list of metricName=ratio. For - example: "name1=1.0, name2=-1.0" results in: - name1.value * 1.0 + name2.value * - -1.0 -
[metrics]requiredSpecifies how to treat unavailable meters: - - True—Raises an - exception. To avoid the raised - exception, you should use the - scheduler filter - MetricFilter to - filter out hosts with unavailable - meters. - - - False—Treated as a - negative factor in the weighting - process (uses the - - option). - -
[metrics]weight_of_unavailableIf is set to False, - and any one of the meters set by - is - unavailable, the - - value is returned to the scheduler.
- For example: - [DEFAULT] -scheduler_host_subset_size = 1 -scheduler_weight_classes = nova.scheduler.weights.all_weighers -ram_weight_multiplier = 1.0 -io_ops_weight_multiplier = 2.0 -[metrics] -weight_multiplier = 1.0 -weight_setting = name1=1.0, name2=-1.0 -required = false -weight_of_unavailable = -10000.0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Cell weighting options
SectionOptionDescription
[cells]mute_weight_multiplierMultiplier to weight mute children (hosts which - have not sent capacity or capacity updates for - some time). Use a negative, floating-point - value.
[cells]offset_weight_multiplierMultiplier to weight cells, so you can specify - a preferred cell. Use a floating point - value.
[cells]ram_weight_multiplierBy default, the scheduler spreads instances - across all cells evenly. Set the - - option to a negative number if you prefer - stacking instead of spreading. Use a - floating-point value.
[cells]scheduler_weight_classesDefaults to - nova.cells.weights.all_weighers, - which maps to all cell weighers included with - Compute. Cells are then weighted and sorted - with the largest weight winning.
- For example: - [cells] -scheduler_weight_classes = nova.cells.weights.all_weighers -mute_weight_multiplier = -10.0 -ram_weight_multiplier = 1.0 -offset_weight_multiplier = 1.0 -
-
- Chance scheduler - - As an administrator, you work with the filter scheduler. - However, the Compute service also uses the Chance - Scheduler, - nova.scheduler.chance.ChanceScheduler, - which randomly selects from lists of filtered - hosts. -
-
- Utilization aware scheduling - - It is possible to schedule VMs using advanced scheduling - decisions. These decisions are made based on enhanced - usage statistics encompassing data like memory cache - utilization, memory bandwidth utilization, or network - bandwidth utilization. This is disabled by default. - The administrator can configure how the metrics are - weighted in the configuration file by using the - weight_setting configuration option in - the nova.conf configuration file. - For example to configure metric1 with ratio1 and metric2 - with ratio2: - - weight_setting = "metric1=ratio1, metric2=ratio2" - -
- -
- Configuration reference - To customize the Compute scheduler, use the - configuration option settings documented in . -
-
diff --git a/doc/config-reference/compute/section_hypervisor_hyper-v.xml b/doc/config-reference/compute/section_hypervisor_hyper-v.xml deleted file mode 100644 index 9685385857..0000000000 --- a/doc/config-reference/compute/section_hypervisor_hyper-v.xml +++ /dev/null @@ -1,456 +0,0 @@ - - -%openstack; -]> -
- - Hyper-V virtualization platform - It is possible to use Hyper-V as a compute node within an OpenStack Deployment. The - nova-compute service runs as "openstack-compute," a 32-bit service directly upon the Windows - platform with the Hyper-V role enabled. The necessary Python components as well as the - nova-compute service are installed directly onto the Windows platform. Windows Clustering - Services are not needed for functionality within the OpenStack infrastructure. The use of - the Windows Server 2012 platform is recommend for the best experience and is the platform - for active development. The following Windows platforms have been tested as compute nodes: - - Windows Server 2008 R2 - Both Server and Server Core with the Hyper-V role enabled (Shared Nothing Live - migration is not supported using 2008 R2) - - - Windows Server 2012 and Windows Server 2012 R2 - Server and Core (with the Hyper-V role enabled), and Hyper-V Server - - - -
- Hyper-V configuration - The only OpenStack services required on a Hyper-V node are - nova-compute and - neutron-hyperv-agent. - Regarding the resources needed for this host you have to consider that Hyper-V will require 16 GB - 20 GB of - disk space for the OS itself, including updates. Two NICs are required, - one connected to the management network and one to the guest data network. - - - The following sections discuss how to prepare the Windows Hyper-V - node for operation as an OpenStack compute node. Unless stated otherwise, - any configuration information should work for the Windows 2008 R2, - 2012 and 2012 R2 platforms. - -
- Local storage considerations - The Hyper-V compute node needs to have ample storage for storing the virtual machine - images running on the compute nodes. You may use a single volume for all, or partition - it into an OS volume and VM volume. It is up to the individual deploying to - decide. -
-
- Configure NTP - Network time services must be configured to ensure proper operation of the OpenStack nodes. To set network time on your Windows host you must run the following commands: - C:\>net stop w32time -C:\>w32tm /config /manualpeerlist:pool.ntp.org,0x8 /syncfromflags:MANUAL -C:\>net start w32time - Keep in mind that the node will have to be time synchronized with - the other nodes of your OpenStack environment, so it is important to use - the same NTP server. Note that in case of an Active Directory environment, - you may do this only for the AD Domain Controller. -
-
- Configure Hyper-V virtual switching - Information regarding the Hyper-V virtual Switch can be located here: http://technet.microsoft.com/en-us/library/hh831823.aspx - - To quickly enable an interface to be used as a Virtual Interface the following - PowerShell may be used: - PS C:\>$if = Get-NetIPAddress -IPAddress 192* | Get-NetIPInterface -PS C:\>New-VMSwitch -NetAdapterName $if.ifAlias -Name YOUR_BRIDGE_NAME $false - It is very important to make sure that when you are using an Hyper-V node with only 1 NIC the -AllowManagementOS - option is set on True, otherwise you will lose connectivity to the Hyper-V node. - -
-
- Enable iSCSI initiator service - To prepare the Hyper-V node to be able to attach to volumes provided by cinder - you must first make sure the Windows iSCSI initiator service is running and - started automatically. - PS C:\>Set-Service -Name MSiSCSI -StartupType Automatic -PS C:\>Start-Service MSiSCSI -
-
- Configure shared nothing live migration - Detailed information on the configuration of live migration can be found here: http://technet.microsoft.com/en-us/library/jj134199.aspx - The following outlines the steps of shared nothing live migration. - - - The target hosts ensures that live migration is - enabled and properly configured in Hyper-V. - - - The target hosts checks if the image to be - migrated requires a base VHD and pulls it from the - Image service if not already available on the target - host. - - - The source hosts ensures that live migration is - enabled and properly configured in Hyper-V. - - - The source hosts initiates a Hyper-V live migration. - - - The source hosts communicates to the manager the outcome of the - operation. - - - The following two configuration options/flags are needed in order to support Hyper-V - live migration and must be added to your nova.conf on the Hyper-V - compute node: - - instances_shared_storage = False - This needed to support "shared nothing" Hyper-V live migrations. It is - used in nova/compute/manager.py - - - limit_cpu_features = True - This flag is needed to support live migration to hosts with different CPU - features. This flag is checked during instance creation in order to limit - the CPU features used by the VM. - - - - instances_path = DRIVELETTER:\PATH\TO\YOUR\INSTANCES - - - - Additional Requirements: - - - Hyper-V 2012 R2 or Windows Server 2012 R2 with Hyper-V role enabled - - - A Windows domain controller with the Hyper-V compute nodes as domain - members - - - The instances_path command-line option/flag needs to be the same on all - hosts. - - - The openstack-compute service - deployed with the setup must run with domain - credentials. You can set the service credentials - with: - C:\>sc config openstack-compute obj="DOMAIN\username" password="password" - - - How to setup live migration on Hyper-V - To enable 'shared nothing live' migration, run the 3 PowerShell instructions below on - each Hyper-V host: - PS C:\>Enable-VMMigration -PS C:\>Set-VMMigrationNetwork IP_ADDRESS -PS C:\>Set-VMHost -VirtualMachineMigrationAuthenticationTypeKerberos - - Please replace the IP_ADDRESS with the address of the interface which will provide live migration. - - Additional Reading - Here's an article that clarifies the various live migration options in Hyper-V: - - http://ariessysadmin.blogspot.ro/2012/04/hyper-v-live-migration-of-windows.html - -
-
-
- Install nova-compute using OpenStack Hyper-V installer - - In case you want to avoid all the manual setup, you can use Cloudbase Solutions' installer. - You can find it here: - - https://www.cloudbase.it/downloads/HyperVNovaCompute_Beta.msi - - It installs an independent Python environment, in order to avoid conflicts with existing applications, generates dynamically a nova.conf file based on the parameters provided by you. - - The installer can also be used for an automated and unattended mode for deployments on a massive number of servers. More details about how to use the installer and its features can be found here: - - https://www.cloudbase.it -
-
- Requirements -
- Python - Python 2.7 32bit must be installed as most of the libraries are not - working properly on the 64bit version. - - Setting up Python prerequisites - - Download and then install it using the MSI installer from - here: - - http://www.python.org/ftp/python/2.7.3/python-2.7.3.msi - PS C:\> $src = "http://www.python.org/ftp/python/2.7.3/python-2.7.3.msi" -PS C:\> $dest = "$env:temp\python-2.7.3.msi" -PS C:\> Invoke-WebRequest –Uri $src –OutFile $dest -PS C:\> Unblock-File $dest -PS C:\> Start-Process $dest - - - Make sure that the Python and - Python\Scripts paths are set up in the - PATH environment variable. - PS C:\>$oldPath = [System.Environment]::GetEnvironmentVariable("Path") -PS C:\>$newPath = $oldPath + ";C:\python27\;C:\python27\Scripts\" -PS C:\>[System.Environment]::SetEnvironmentVariable("Path", $newPath, [System.EnvironmentVariableTarget]::User - - -
-
- Python dependencies - The following packages need to be downloaded and manually - installed: - - - setuptools - - - http://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11.win32-py2.7.exel - - - - pip - - - http://pip.readthedocs.org/en/latest/installing.html - - - - PyMySQL - - - http://codegood.com/download/10/ - - - - PyWin32 - - - http://sourceforge.net/projects/pywin32/files/pywin32/Build%20217/pywin32-217.win32-py2.7.exe - - - - Greenlet - - - http://www.lfd.uci.edu/~gohlke/pythonlibs/#greenlet - - - - PyCryto - - - http://www.voidspace.org.uk/downloads/pycrypto26/pycrypto-2.6.win32-py2.7.exe - - - - The following packages must be installed with pip: - - - ecdsa - - - amqp - - - wmi - - - PS C:\> pip install ecdsa -PS C:\> pip install amqp -PS C:\> pip install wmi -
-
- Other dependencies - qemu-img is required for some of the image - related operations. You can get it from here: http://qemu.weilnetz.de/. You - must make sure that the qemu-img path is set in the - PATH environment variable. - Some Python packages need to be compiled, so you may use MinGW or - Visual Studio. You can get MinGW from here: - http://sourceforge.net/projects/mingw/. You must configure which - compiler to be used for this purpose by using the - distutils.cfg file in - $Python27\Lib\distutils, which can contain: - [build] -compiler = mingw32 - As a last step for setting up MinGW, make sure that the MinGW - binaries' directories are set up in PATH. -
-
-
- Install Nova-compute -
- Download the nova code - - - Use Git to download the necessary source code. The installer to run Git on Windows - can be downloaded here: - - https://github.com/msysgit/msysgit/releases/download/Git-1.9.2-preview20140411/Git-1.9.2-preview20140411.exe - - - - Download the installer. Once the download is complete, run the installer and follow the prompts in the installation wizard. The default should be acceptable for the needs of the document. - - PS C:\>$src = "https://github.com/msysgit/msysgit/releases/download/Git-1.9.2-preview20140411/Git-1.9.2-preview20140411.exe" -PS C:\>$dest = "$env:temp\Git-1.9.2-preview20140411.exe" -PS C:\>Invoke-WebRequest –Uri $src –OutFile $dest -PS C:\>Unblock-File $dest -PS C:\>Start-Process $dest - - - Run the following to clone the Nova code. - PS C:\>git.exe clone https://github.com/openstack/nova.git - - -
-
- Install nova-compute service - - To install Nova-compute, run: - - - PS C:\>cd c:\Nova -PS C:\>python setup.py install -
-
- Configure nova-compute - The nova.conf file must be placed in - C:\etc\nova for running OpenStack on Hyper-V. Below is a sample - nova.conf for Windows: - [DEFAULT] -[DEFAULT] -auth_strategy = keystone -image_service = nova.image.glance.GlanceImageService -compute_driver = nova.virt.hyperv.driver.HyperVDriver -volume_api_class = nova.volume.cinder.API -fake_network = true -instances_path = C:\Program Files (x86)\OpenStack\Instances -glance_api_servers = IP_ADDRESS:9292 -use_cow_images = true -force_config_drive = false -injected_network_template = C:\Program Files (x86)\OpenStack\Nova\etc\interfaces.template -policy_file = C:\Program Files (x86)\OpenStack\Nova\etc\policy.json -mkisofs_cmd = C:\Program Files (x86)\OpenStack\Nova\bin\mkisofs.exe -verbose = false -allow_resize_to_same_host = true -running_deleted_instance_action = reap -running_deleted_instance_poll_interval = 120 -resize_confirm_window = 5 -resume_guests_state_on_host_boot = true -rpc_response_timeout = 1800 -lock_path = C:\Program Files (x86)\OpenStack\Log\ -rpc_backend = nova.openstack.common.rpc.impl_kombu -rabbit_host = IP_ADDRESS -rabbit_port = 5672 -rabbit_userid = guest -rabbit_password = Passw0rd -logdir = C:\Program Files (x86)\OpenStack\Log\ -logfile = nova-compute.log -instance_usage_audit = true -instance_usage_audit_period = hour -network_api_class = nova.network.neutronv2.api.API -[neutron] -url = http://IP_ADDRESS:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = Passw0rd -admin_auth_url = http://IP_ADDRESS:35357/v2.0 -[hyperv] -vswitch_name = newVSwitch0 -limit_cpu_features = false -config_drive_inject_password = false -qemu_img_cmd = C:\Program Files (x86)\OpenStack\Nova\bin\qemu-img.exe -config_drive_cdrom = true -dynamic_memory_ratio = 1 -enable_instance_metrics_collection = true -[rdp] -enabled = true -html5_proxy_base_url = https://IP_ADDRESS:4430 - contains a reference of all - options for hyper-v. -
-
- Prepare images for use with Hyper-V - Hyper-V currently supports only the VHD and VHDX file format for virtual machine instances. - Detailed instructions for installing virtual machines on Hyper-V can be found - here: - http://technet.microsoft.com/en-us/library/cc772480.aspx - Once you have successfully created a virtual machine, you can then upload the image to - glance using the native glance-client: - PS C:\>glance image-create --name "VM_IMAGE_NAME" --is-public False --container-format bare --disk-format vhd - - VHD and VHDX files sizes can be bigger than their maximum internal size, as such you need to boot instances using a flavor with a slightly bigger disk size than - the internal size of the disk file. To create VHDs, use the following PowerShell cmdlet: - PS C:\>New-VHD DISK_NAME.vhd -SizeBytes VHD_SIZE - -
-
- Run Compute with Hyper-V - To start the nova-compute service, run this command from a console in the Windows - server: - PS C:\>C:\Python27\python.exe c:\Python27\Scripts\nova-compute --config-file c:\etc\nova\nova.conf -
-
-
- Troubleshoot Hyper-V configuration - - - - I ran the nova-manage service list command - from my controller; however, I'm not seeing smiley faces for Hyper-V - compute nodes, what do I do? - - Verify that you are synchronized with a - network time source. For instructions about how to configure NTP on - your Hyper-V compute node, see . - - - - How do I restart the compute service? - - PS C:\>net stop nova-compute && net start nova-compute - - - - - How do I restart the iSCSI initiator service? - - PS C:\>net stop msiscsi && net start msiscsi - - -
-
diff --git a/doc/config-reference/compute/section_hypervisor_kvm.xml b/doc/config-reference/compute/section_hypervisor_kvm.xml deleted file mode 100644 index 4dd75e6e94..0000000000 --- a/doc/config-reference/compute/section_hypervisor_kvm.xml +++ /dev/null @@ -1,180 +0,0 @@ - -
- - KVM - KVM is configured as the default hypervisor for Compute. - - This document contains several sections about hypervisor selection. If you are reading - this document linearly, you do not want to load the KVM module before you install - nova-compute. The nova-compute service depends on qemu-kvm, which - installs /lib/udev/rules.d/45-qemu-kvm.rules, which sets the - correct permissions on the /dev/kvm device node. - - To enable KVM explicitly, add the following configuration options to the - /etc/nova/nova.conf file: - compute_driver = libvirt.LibvirtDriver - -[libvirt] -virt_type = kvm - The KVM hypervisor supports the following virtual machine image formats: - - - Raw - - - QEMU Copy-on-write (qcow2) - - - QED Qemu Enhanced Disk - - - VMware virtual machine disk format (vmdk) - - - This section describes how to enable KVM on your system. For more information, see the - following distribution-specific documentation: - - - Fedora: Virtualization Getting Started Guide from the Fedora 22 - documentation. - - - Ubuntu: - KVM/Installation from the Community Ubuntu documentation. - - - Debian: Virtualization with KVM from the Debian handbook. - - - Red Hat Enterprise Linux: Installing virtualization packages on an existing Red - Hat Enterprise Linux system from the Red Hat Enterprise Linux - Virtualization Host Configuration and Guest Installation - Guide. - - - openSUSE: Installing KVM from the openSUSE Virtualization with KVM - manual. - - - SLES: Installing KVM from the SUSE Linux Enterprise Server - Virtualization Guide. - - - - - - - -
- Specify the CPU model of KVM guests - The Compute service enables you to control the guest CPU model that is exposed to KVM - virtual machines. Use cases include: - - - To maximize performance of virtual machines by exposing new host CPU features - to the guest - - - To ensure a consistent default CPU across all machines, removing reliance of - variable QEMU defaults - - - In libvirt, the CPU is specified by providing a base CPU model name (which is a - shorthand for a set of feature flags), a set of additional feature flags, and the - topology (sockets/cores/threads). The libvirt KVM driver provides a number of standard - CPU model names. These models are defined in the - /usr/share/libvirt/cpu_map.xml file. Check this file to - determine which models are supported by your local installation. - Two Compute configuration options in the [libvirt] group of - nova.conf define which type of CPU model is exposed to the - hypervisor when using KVM: cpu_mode and - cpu_model. - The cpu_mode option can take one of the following values: - none, host-passthrough, - host-model, and custom. - - Host model (default for KVM & QEMU) - If your nova.conf file contains - cpu_mode=host-model, libvirt identifies the CPU model in - /usr/share/libvirt/cpu_map.xml file that most closely - matches the host, and requests additional CPU flags to complete the match. This - configuration provides the maximum functionality and performance and maintains good - reliability and compatibility if the guest is migrated to another host with slightly - different host CPUs. - - - Host pass through - If your nova.conf file contains - cpu_mode=host-passthrough, libvirt tells KVM to pass through - the host CPU with no modifications. The difference to host-model, instead of just - matching feature flags, every last detail of the host CPU is matched. This gives - the best performance, and can be important to some apps which check low level - CPU details, but it comes at a cost with respect to migration. The guest can only be - migrated to a matching host CPU. - - - Custom - If your nova.conf file contains - cpu_mode=custom, you can explicitly specify one of the - supported named models using the cpu_model configuration option. For example, to - configure the KVM guests to expose Nehalem CPUs, your nova.conf - file should contain: - [libvirt] -cpu_mode = custom -cpu_model = Nehalem - - - None (default for all libvirt-driven hypervisors other than KVM & - QEMU) - If your nova.conf file contains - cpu_mode=none, libvirt does not specify a CPU model. Instead, - the hypervisor chooses the default model. - -
-
- Guest agent support - Use guest agents to enable optional access between compute nodes and guests through a - socket, using the QMP protocol. - To enable this feature, you must set hw_qemu_guest_agent=yes as a - metadata parameter on the image you wish to use to create the guest-agent-capable instances - from. You can explicitly disable the feature by setting - hw_qemu_guest_agent=no in the image metadata. -
-
- KVM performance tweaks - The VHostNet kernel - module improves network performance. To load the kernel module, run the following - command as root: - # modprobe vhost_net -
-
- Troubleshoot KVM - Trying to launch a new virtual machine instance fails with the - ERRORstate, and the following error appears in the - /var/log/nova/nova-compute.log file: - libvirtError: internal error no supported architecture for os type 'hvm' - This message indicates that the KVM kernel modules were not loaded. - If you cannot start VMs after installation without rebooting, the permissions might - not be set correctly. This can happen if you load the KVM module before you install - nova-compute. To check whether the group is - set to kvm, run: - # ls -l /dev/kvm - If it is not set to kvm, run: - # udevadm trigger -
-
diff --git a/doc/config-reference/compute/section_hypervisor_lxc.xml b/doc/config-reference/compute/section_hypervisor_lxc.xml deleted file mode 100644 index e52df1a81f..0000000000 --- a/doc/config-reference/compute/section_hypervisor_lxc.xml +++ /dev/null @@ -1,32 +0,0 @@ - -
- LXC (Linux containers) - LXC (also known as Linux containers) is a virtualization technology that works at the - operating system level. This is different from hardware virtualization, the approach used by - other hypervisors such as KVM, Xen, and VMware. LXC (as currently implemented using libvirt in - the Compute service) is not a secure virtualization technology for multi-tenant environments - (specifically, containers may affect resource quotas for other containers hosted on the same - machine). Additional containment technologies, such as AppArmor, may be used to provide better - isolation between containers, although this is not the case by default. For all these reasons, - the choice of this virtualization technology is not recommended in production. - If your compute hosts do not have hardware support for virtualization, LXC will likely - provide better performance than QEMU. In addition, if your guests must access specialized - hardware, such as GPUs, this might be easier to achieve with LXC than other hypervisors. - Some OpenStack Compute features might be missing when running with LXC as the hypervisor. See - the hypervisor support - matrix for details. - To enable LXC, ensure the following options are set in - /etc/nova/nova.conf on all hosts running the nova-compute - service.compute_driver = libvirt.LibvirtDriver - -[libvirt] -virt_type = lxc - On Ubuntu, enable LXC support in OpenStack by installing the - nova-compute-lxc package. -
- diff --git a/doc/config-reference/compute/section_hypervisor_qemu.xml b/doc/config-reference/compute/section_hypervisor_qemu.xml deleted file mode 100644 index a7156ac223..0000000000 --- a/doc/config-reference/compute/section_hypervisor_qemu.xml +++ /dev/null @@ -1,54 +0,0 @@ - -
- - QEMU -From the perspective of the Compute service, the QEMU hypervisor is very similar to the KVM - hypervisor. Both are controlled through libvirt, both support the same feature set, and all - virtual machine images that are compatible with KVM are also compatible with QEMU. The main - difference is that QEMU does not support native virtualization. Consequently, QEMU has worse - performance than KVM and is a poor choice for a production deployment. - The typical uses cases for QEMU are - - Running on older hardware that lacks - virtualization support. - - - Running the Compute service inside of a virtual - machine for development or testing purposes, where - the hypervisor does not support native - virtualization for guests. - - - To enable QEMU, add these settings to - nova.conf:compute_driver = libvirt.LibvirtDriver - -[libvirt] -virt_type = qemu - - For some operations you may also have to install the guestmount utility: - On Ubuntu: - # apt-get install guestmount - - On Red Hat Enterprise Linux, Fedora, or CentOS: - # yum install libguestfs-tools - - On openSUSE: - # zypper install guestfs-tools - - The QEMU hypervisor supports the following virtual machine image formats: - - - Raw - - - QEMU Copy-on-write (qcow2) - - - VMware virtual machine disk format (vmdk) - - -
diff --git a/doc/config-reference/compute/section_hypervisor_vmware.xml b/doc/config-reference/compute/section_hypervisor_vmware.xml deleted file mode 100644 index d1f7aa3ec1..0000000000 --- a/doc/config-reference/compute/section_hypervisor_vmware.xml +++ /dev/null @@ -1,1050 +0,0 @@ - - -%openstack; -]> -
- VMware vSphere - -
- Introduction - OpenStack Compute supports the VMware vSphere product family - and enables access to advanced features such as vMotion, High - Availability, and Dynamic Resource Scheduling (DRS). - This section describes how to configure VMware-based virtual - machine images for launch. vSphere versions 4.1 and later are - supported. - The VMware vCenter driver enables the - nova-compute service to communicate - with a VMware vCenter server that manages one or more ESX host - clusters. The driver aggregates the ESX hosts in each cluster to - present one large hypervisor entity for each cluster to the - Compute scheduler. Because individual ESX hosts are not exposed - to the scheduler, Compute schedules to the granularity of - clusters and vCenter uses DRS to select the actual ESX host - within the cluster. When a virtual machine makes its way into a - vCenter cluster, it can use all vSphere features. - The following sections describe how to configure the VMware - vCenter driver. -
-
- High-level architecture - The following diagram shows a high-level view of the VMware - driver architecture: -
- VMware driver architecture - - - - - -
- As the figure shows, the OpenStack Compute Scheduler sees - three hypervisors that each correspond to a cluster in vCenter. - Nova-compute contains - the VMware driver. You can run with multiple nova-compute services. While - Compute schedules at the granularity of a cluster, the VMware - driver inside nova-compute - interacts with the vCenter APIs to select an appropriate ESX host within - the cluster. Internally, vCenter uses DRS for placement. - The VMware vCenter driver also interacts with the OpenStack - Image service to copy VMDK images from the Image service back-end - store. The dotted line in the figure represents VMDK images - being copied from the OpenStack Image service to the vSphere - data store. VMDK images are cached in the data store so the copy - operation is only required the first time that the VMDK image is - used. - After OpenStack boots a VM into a vSphere cluster, the VM - becomes visible in vCenter and can access vSphere advanced - features. At the same time, the VM is visible in the OpenStack - dashboard and you can manage it as you would any other OpenStack - VM. You can perform advanced vSphere operations in vCenter while - you configure OpenStack resources such as VMs through the - OpenStack dashboard. - The figure does not show how networking fits into the - architecture. Both nova-network - and the OpenStack Networking Service are supported. For details, see - . -
-
- Configuration overview - To get started with the VMware vCenter driver, complete the - following high-level steps: - - - Configure vCenter. See . - - - Configure the VMware - vCenter driver in the nova.conf file. See . - - - Load desired VMDK images into the OpenStack Image - Service. See . - - - Configure networking with either nova-network - or the OpenStack Networking Service. See . - - -
-
- Prerequisites and limitations - Use the following list to prepare a vSphere environment that - runs with the VMware vCenter driver: - - - Copying VMDK files (vSphere 5.1 - only). In vSphere 5.1, copying large image files - (for example, 12 GB and greater) from Glance can take a long - time. To improve performance, VMware recommends that you - upgrade to VMware vCenter Server 5.1 Update 1 or later. For - more information, see the Release Notes. - - - DRS. For any cluster - that contains multiple ESX hosts, enable DRS and enable - fully automated placement. - - - Shared storage. Only - shared storage is supported and data stores must be shared - among all hosts in a cluster. It is recommended to remove - data stores not intended for OpenStack from clusters being - configured for OpenStack. - - - Clusters and data - stores. Do not use OpenStack clusters and data - stores for other purposes. If you do, OpenStack displays - incorrect usage information. - - - Networking. The - networking configuration depends on the desired networking - model. See . - - - Security groups. If you - use the VMware driver with OpenStack Networking and the NSX - plug-in, security groups are supported. If you use - nova-network, - security groups are not supported. - The NSX plug-in is the only plug-in that is - validated for vSphere. - - - VNC. The port range - 5900 - 6105 (inclusive) is automatically enabled for VNC - connections on every ESX host in all clusters under - OpenStack control. For more information about using a VNC - client to connect to virtual machine, see http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1246. - In addition to the default VNC port - numbers (5900 to 6000) specified in the above document, the - following ports are also used: 6101, 6102, and 6105. - You must modify the ESXi firewall configuration to allow - the VNC ports. Additionally, for the firewall modifications - to persist after a reboot, you must create a custom vSphere - Installation Bundle (VIB) which is then installed onto the - running ESXi host or added to a custom image profile used to - install ESXi hosts. For details about how to create a VIB - for persisting the firewall configuration modifications, see - - http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2007381. - The VIB can be downloaded from - - https://github.com/openstack-vmwareapi-team/Tools. - - - - To use multiple vCenter installations with OpenStack, each vCenter - must be assigned to a separate availability zone. This is required - as the OpenStack Block Storage VMDK driver does not currently work - across multiple vCenter installations. - - -
-
- VMware vCenter service account - OpenStack integration requires a vCenter service account with the following - minimum permissions. Apply the permissions to the - Datacenter root object, and select the Propagate to Child Objects option. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
vCenter permissions tree
All Privileges - - -
- Datastore - -
- - Allocate space -
- - Browse datastore -
- - Low level file operation -
- - Remove file -
- Extension - -
- - Register extension -
- Folder - -
- - Create folder -
- Host - -
- - Configuration -
- - - Maintenance
- - - Network configuration
- - - Storage partition configuration
- Network - -
- - Assign network -
- Resource - -
- - Assign virtual machine to resource pool -
- - Migrate powered off virtual machine -
- - Migrate powered on virtual machine -
- Virtual Machine - -
- - Configuration -
- - - Add existing disk
- - - Add new disk
- - - Add or remove device
- - - Advanced
- - - CPU count
- - - Disk change tracking
- - - Host USB device
- - - Memory
- - - Raw device
- - - Remove disk
- - - Rename
- - - Swapfile placement
- - Interaction -
- - - Configure CD media
- - - Power Off
- - - Power On
- - - Reset
- - - Suspend
- - Inventory -
- - - Create from existing
- - - Create new
- - - Move
- - - Remove
- - - Unregister
- - Provisioning -
- - - Clone virtual machine
- - - Customize
- - Sessions -
- - - Validate session
- - - View and stop sessions
- - Snapshot management -
- - - Create snapshot
- - - Remove snapshot
- vApp - -
- - Export -
- - Import -
-
-
- VMware vCenter driver - Use the VMware vCenter driver (VMwareVCDriver) to connect - OpenStack Compute with vCenter. This recommended configuration - enables access through vCenter to advanced vSphere features like - vMotion, High Availability, and Dynamic Resource Scheduling - (DRS). -
- VMwareVCDriver configuration options - When you use the VMwareVCDriver (vCenter versions 5.1 and - later) with OpenStack Compute, add the following - VMware-specific configuration options to the - nova.conf file: - [DEFAULT] -compute_driver=vmwareapi.VMwareVCDriver - -[vmware] -host_ip=<vCenter host IP> -host_username=<vCenter username> -host_password=<vCenter password> -cluster_name=<vCenter cluster name> -datastore_regex=<optional datastore regex> - - - - vSphere vCenter versions 5.0 and earlier: You must - specify the location of the WSDL files by adding the - wsdl_location=http://127.0.0.1:8080/vmware/SDK/wsdl/vim25/vimService.wsdl - setting to the above configuration. For more - information, see - vSphere 5.0 and - earlier additional set up. - - - Clusters: The vCenter driver can support multiple - clusters. To use more than one cluster, simply add - multiple lines in - nova.conf with the appropriate - cluster name. Clusters and data stores used by the - vCenter driver should not contain any VMs other than - those created by the driver. - - - Data stores: The - setting specifies the data stores to use with Compute. - For example, - selects all the data stores that have a name starting - with "nas". If this line is omitted, Compute uses the - first data store returned by the vSphere API. It is - recommended not to use this field and instead remove - data stores that are not intended for OpenStack. - - - Reserved host memory: The - option value is - 512 MB by default. However, VMware recommends that - you set this option to 0 MB because the vCenter - driver reports the effective memory available to the - virtual machines. - - - The vCenter driver generates instance name by instance ID. Instance name template - is ignored. - - - The minimum supported vCenter version is 5.1.0. - In OpenStack Liberty release this will be logged as a - warning. In OpenStack "M" release this will be - enforced. - - - - - A nova-compute - service can control one or more clusters containing multiple - ESX hosts, making nova-compute - a critical service from a high availability perspective. Because the - host that runs nova-compute can - fail while the vCenter and ESX still run, you must protect the - nova-compute - service against host failures. - - Many nova.conf options are relevant - to libvirt but do not apply to this driver. - - You must complete additional configuration for - environments that use vSphere 5.0 and earlier. See . -
-
-
- Images with VMware vSphere - The vCenter driver supports images in the VMDK format. Disks - in this format can be obtained from VMware Fusion or from an ESX - environment. It is also possible to convert other formats, such - as qcow2, to the VMDK format using the - utility. After a VMDK disk is available, load it into the - OpenStack Image service. Then, you can use it with the VMware - vCenter driver. The following sections provide additional - details on the supported disks and the commands used for - conversion and upload. -
- Supported image types - Upload images to the OpenStack Image service in VMDK - format. The following VMDK disk types are supported: - - - VMFS Flat Disks - (includes thin, thick, zeroedthick, and eagerzeroedthick). - Note that once a VMFS thin disk is exported from VMFS to a - non-VMFS location, like the OpenStack Image service, it - becomes a preallocated flat disk. This impacts the - transfer time from the OpenStack Image service to the data - store when the full preallocated flat disk, rather than - the thin disk, must be transferred. - - - Monolithic Sparse - disks. Sparse disks get imported from the - OpenStack Image service into ESX as thin provisioned - disks. Monolithic Sparse disks can be obtained from VMware - Fusion or can be created by converting from other virtual - disk formats using the qemu-img - utility. - - - The following table shows the - property that applies to each of the supported VMDK disk - types: - - - - - - - - - - - - - - - - - - - - - - -
OpenStack Image service disk type settings
vmware_disktype propertyVMDK disk type
sparse - Monolithic Sparse -
thin - VMFS flat, thin provisioned -
preallocated (default) - VMFS flat, - thick/zeroedthick/eagerzeroedthick -
- The property is set when an - image is loaded into the OpenStack Image service. For example, - the following command creates a Monolithic Sparse image by - setting to - sparse: - $ glance image-create --name "ubuntu-sparse" --disk-format vmdk \ ---container-format bare \ ---property vmware_disktype="sparse" \ ---property vmware_ostype="ubuntu64Guest" < ubuntuLTS-sparse.vmdk - Specifying thin does not - provide any advantage over preallocated - with the current version of the driver. Future versions might - restore the thin properties of the disk after it is downloaded - to a vSphere data store. -
-
- Convert and load images - Using the qemu-img utility, disk images in - several formats (such as, qcow2) can be converted to the VMDK - format. - For example, the following command can be used to convert - a qcow2 Ubuntu Trusty cloud image: - $ qemu-img convert -f qcow2 ~/Downloads/trusty-server-cloudimg-amd64-disk1.img \ --O vmdk trusty-server-cloudimg-amd64-disk1.vmdk - VMDK disks converted through qemu-img are - always monolithic sparse - VMDK disks with an IDE adapter type. Using the previous - example of the Ubuntu Trusty image after the - qemu-img conversion, the command to upload the - VMDK disk should be something like: - $ glance image-create --name trusty-cloud \ ---container-format bare --disk-format vmdk \ ---property vmware_disktype="sparse" \ ---property vmware_adaptertype="ide" < \ -trusty-server-cloudimg-amd64-disk1.vmdk - Note that the is set to - sparse and the - vmware_adaptertype is set to ide in the previous command. - If the image did not come from the qemu-img - utility, the vmware_disktype and - vmware_adaptertype might be different. To - determine the image adapter type from an image file, use the - following command and look for the - line: - $ head -20 <vmdk file name> - Assuming a preallocated disk type and an iSCSI lsiLogic - adapter type, the following command uploads the VMDK - disk: - $ glance image-create --name "ubuntu-thick-scsi" --disk-format vmdk \ ---container-format bare \ ---property vmware_adaptertype="lsiLogic" \ ---property vmware_disktype="preallocated" \ ---property vmware_ostype="ubuntu64Guest" < ubuntuLTS-flat.vmdk - Currently, OS boot VMDK disks with an IDE adapter type - cannot be attached to a virtual SCSI controller and likewise - disks with one of the SCSI adapter types (such as, busLogic, - lsiLogic, lsiLogicsas, paraVirtual) cannot be attached to the - IDE controller. Therefore, as the previous examples show, it - is important to set the - property correctly. The default adapter type is lsiLogic, which - is SCSI, so you can omit the - property if you are certain that the image adapter type is - lsiLogic. -
-
- Tag VMware images - In a mixed hypervisor environment, OpenStack Compute uses - the tag to match images to - the correct hypervisor type. For VMware images, set the - hypervisor type to vmware. Other valid - hypervisor types include: hyperv, - ironic, lxc, - qemu, uml, and - xen. - Note that qemu is used for both QEMU and - KVM hypervisor types. - $ glance image-create --name "ubuntu-thick-scsi" --disk-format vmdk \ ---container-format bare \ ---property vmware_adaptertype="lsiLogic" \ ---property vmware_disktype="preallocated" \ ---property hypervisor_type="vmware" \ ---property vmware_ostype="ubuntu64Guest" < ubuntuLTS-flat.vmdk -
-
- Optimize images - Monolithic Sparse disks are considerably faster to - download but have the overhead of an additional conversion - step. When imported into ESX, sparse disks get converted to - VMFS flat thin provisioned disks. The download and conversion - steps only affect the first launched instance that uses the - sparse disk image. The converted disk image is cached, so - subsequent instances that use this disk image can simply use - the cached version. - To avoid the conversion step (at the cost of longer - download times) consider converting sparse disks to thin - provisioned or preallocated disks before loading them into the - OpenStack Image service. - Use one of the following tools to pre-convert sparse - disks. - - - vSphere CLI - tools - - Sometimes called the remote CLI or rCLI. - Assuming that the sparse disk is made available on a - data store accessible by an ESX host, the following - command converts it to preallocated format: - vmkfstools --server=ip_of_some_ESX_host -i /vmfs/volumes/datastore1/sparse.vmdk /vmfs/volumes/datastore1/converted.vmdk - Note that the vifs tool from the same CLI package can - be used to upload the disk to be converted. The vifs - tool can also be used to download the converted disk if - necessary. - - - vmkfstools directly on the ESX - host - If the SSH service is enabled on an ESX host, the - sparse disk can be uploaded to the ESX data store - through scp and the vmkfstools local to the ESX host can - use used to perform the conversion. After you log in to - the host through ssh, run this command: - vmkfstools -i /vmfs/volumes/datastore1/sparse.vmdk /vmfs/volumes/datastore1/converted.vmdk - - vmware-vdiskmanager - vmware-vdiskmanager is a utility that - comes bundled with VMware Fusion and VMware Workstation. - The following example converts a sparse disk to - preallocated format: - '/Applications/VMware Fusion.app/Contents/Library/vmware-vdiskmanager' -r sparse.vmdk -t 4 converted.vmdk - - - In the previous cases, the converted vmdk is - actually a pair of files: - The descriptor file converted.vmdk. - - - The actual virtual disk data file converted-flat.vmdk. - - The file to be uploaded to the OpenStack Image - Service is converted-flat.vmdk. -
-
- Image handling - The ESX hypervisor requires a copy of the VMDK file in - order to boot up a virtual machine. As a result, the vCenter - OpenStack Compute driver must download the VMDK via HTTP from - the OpenStack Image service to a data store that is visible to - the hypervisor. To optimize this process, the first time a - VMDK file is used, it gets cached in the data store. - A cached image is stored in a folder named after the image ID. - Subsequent virtual machines that need the VMDK use the cached - version and don't have to copy the file again from the - OpenStack Image service. - Even with a cached VMDK, there is still a copy operation - from the cache location to the hypervisor file directory in - the shared data store. To avoid this copy, boot the image in - linked_clone mode. To learn how to enable this mode, see . - You can also use - the vmware_linked_clone property in the OpenStack - Image service to - override the linked_clone mode on a per-image basis. - If spawning a virtual machine image from ISO with - a VMDK disk, the image is created and attached to - the virtual machine as a blank disk. - In that case vmware_linked_clone property - for the image is just ignored. - -If multiple compute nodes are running on the same host, - or have a shared file system, you can enable them to use the - same cache folder on the back-end data store. To configure - this action, set the option - in the nova.conf file. Its value stands for - the name prefix of the folder where cached images are stored. -This can take effect only if compute nodes are running - on the same host, or have a shared file system. - You can automatically purge unused images after a - specified period of time. To configure this action, set these - options in the DEFAULT section in the - nova.conf file: - - - -Set this option to True to - specify that unused images should be removed after the - duration specified in the option. - The default is True. - - - - Specifies the duration in seconds after which an unused - image is purged from the cache. The default is - 86400 (24 hours). - - -
-
-
- Networking with VMware vSphere - The VMware driver supports networking with the nova-network service or the - OpenStack Networking Service. Depending on your installation, - complete these configuration steps before you provision - VMs: - - - - The nova-network - service with the FlatManager or FlatDHCPManager. - Create a port group with the same name as the - flat_network_bridge value in the - nova.conf file. The default value is - br100. If you specify another value, - the new value must be a valid Linux bridge identifier that - adheres to Linux bridge naming conventions. - All VM NICs are attached to this port group. - Ensure that the flat interface of the node that runs - the nova-network - service has a path to this network. - - When configuring the port binding for this port group - in vCenter, specify ephemeral for the - port binding type. For more information, see - Choosing a port binding - type in ESX/ESXi in the VMware Knowledge Base. - - - - The nova-network service with the - VlanManager. Set the - vlan_interface configuration option to - match the ESX host interface that handles VLAN-tagged VM - traffic. - OpenStack Compute automatically creates the - corresponding port groups. - - - If you are using the OpenStack Networking Service: - Before provisioning VMs, create a port group with the same - name as the vmware.integration_bridge - value in nova.conf (default is - br-int). All VM NICs are attached to - this port group for management by the OpenStack Networking - plug-in. - - -
-
- Volumes with VMware vSphere - The VMware driver supports attaching volumes from the - OpenStack Block Storage service. The VMware VMDK driver for - OpenStack Block Storage is recommended and should be used for - managing volumes based on vSphere data stores. For more information - about the VMware VMDK driver, see VMware VMDK Driver. Also an iSCSI volume driver - provides limited support and can be used only for - attachments. -
-
- vSphere 5.0 and earlier additional set up - Users of vSphere 5.0 or earlier must host their WSDL files - locally. These steps are applicable for vCenter 5.0 or ESXi 5.0 - and you can either mirror the WSDL from the vCenter or ESXi - server that you intend to use or you can download the SDK - directly from VMware. These workaround steps fix a known issue with the WSDL that was resolved in later - versions. - When setting the VMwareVCDriver configuration options, you - must include the wsdl_location option. For more - information, see - VMwareVCDriver - configuration options above. - - To mirror WSDL from vCenter (or ESXi) - - Set the VMWAREAPI_IP shell variable to the - IP address for your vCenter or ESXi host from where you plan - to mirror files. For example: - $ export VMWAREAPI_IP=<your_vsphere_host_ip> - - - Create a local file system directory to hold the WSDL - files: - $ mkdir -p /opt/stack/vmware/wsdl/5.0 - - - Change into the new directory. - $ cd /opt/stack/vmware/wsdl/5.0 - - - Use your OS-specific tools to install a command-line - tool that can download files like - wget. - - - Download the files to the local file cache: - wget --no-check-certificate https://$VMWAREAPI_IP/sdk/vimService.wsdl -wget --no-check-certificate https://$VMWAREAPI_IP/sdk/vim.wsdl -wget --no-check-certificate https://$VMWAREAPI_IP/sdk/core-types.xsd -wget --no-check-certificate https://$VMWAREAPI_IP/sdk/query-messagetypes.xsd -wget --no-check-certificate https://$VMWAREAPI_IP/sdk/query-types.xsd -wget --no-check-certificate https://$VMWAREAPI_IP/sdk/vim-messagetypes.xsd -wget --no-check-certificate https://$VMWAREAPI_IP/sdk/vim-types.xsd -wget --no-check-certificate https://$VMWAREAPI_IP/sdk/reflect-messagetypes.xsd -wget --no-check-certificate https://$VMWAREAPI_IP/sdk/reflect-types.xsd - Because the reflect-types.xsd and - reflect-messagetypes.xsd files do not - fetch properly, you must stub out these files. Use the - following XML listing to replace the missing file content. - The XML parser underneath Python can be very particular and - if you put a space in the wrong place, it can break the - parser. Copy the following contents and formatting - carefully. - <?xml version="1.0" encoding="UTF-8"?> - <schema - targetNamespace="urn:reflect" - xmlns="http://www.w3.org/2001/XMLSchema" - xmlns:xsd="http://www.w3.org/2001/XMLSchema" - elementFormDefault="qualified"> - </schema> - - - Now that the files are locally present, tell the driver - to look for the SOAP service WSDLs in the local file system - and not on the remote vSphere server. Add the following - setting to the nova.conf file for your - nova-compute - node: - [vmware] -wsdl_location=file:///opt/stack/vmware/wsdl/5.0/vimService.wsdl - - - Alternatively, download the version appropriate SDK from - http://www.vmware.com/support/developer/vc-sdk/ and - copy it to the /opt/stack/vmware file. Make - sure that the WSDL is available, in for example - /opt/stack/vmware/SDK/wsdl/vim25/vimService.wsdl. - You must point nova.conf to fetch this WSDL - file from the local file system by using a URL. - When using the VMwareVCDriver (vCenter) with OpenStack - Compute with vSphere version 5.0 or earlier, - nova.conf must include the following - extra config option: - [vmware] -wsdl_location=file:///opt/stack/vmware/SDK/wsdl/vim25/vimService.wsdl -
-
- Configuration reference - To customize the VMware driver, use the configuration option settings - documented in . -
-
diff --git a/doc/config-reference/compute/section_hypervisor_xen_api.xml b/doc/config-reference/compute/section_hypervisor_xen_api.xml deleted file mode 100644 index 3f9f8a7535..0000000000 --- a/doc/config-reference/compute/section_hypervisor_xen_api.xml +++ /dev/null @@ -1,307 +0,0 @@ - -
- - XenServer (and other XAPI based Xen variants) - - This section describes XAPI managed hypervisors, and how to - use them with OpenStack. - -
- Terminology -
- Xen - - A hypervisor that provides the fundamental isolation between - virtual machines. Xen is open source (GPLv2) and is managed by - XenProject.org, - a cross-industry organization and a Linux Foundation Collaborative - project. - - Xen is a component of many different products and projects. The hypervisor itself - is very similar across all these projects, but the way that it is managed can be - different, which can cause confusion if you're not clear which toolstack you are - using. Make sure you know what toolstack you - want before you get started. If you want to use Xen with libvirt in OpenStack Compute - refer to . -
-
- XAPI - - XAPI is one of the toolstacks that could control a Xen based - hypervisor. XAPI's role is similar to libvirt's in the KVM - world. The API provided by XAPI is called XenAPI. To learn more - about the provided interface, look at - - XenAPI Object Model Overview - - for definitions of XAPI specific terms such as SR, VDI, VIF - and PIF. - - - OpenStack has a compute driver which talks to XAPI, therefore - all XAPI managed servers could be used with OpenStack. - -
-
- XenAPI - - XenAPI is the API provided by XAPI. This name is also used by - the python library that is a client for XAPI. - -
-
- XenServer - - An Open Source virtualization platform that delivers all features - needed for any server and datacenter implementation including the - Xen hypervisor and XAPI for the management. For more information - and product downloads, visit - - xenserver.org - . - -
-
- XCP - - XCP is not supported anymore. XCP project recommends all XCP users - to upgrade to the latest version of XenServer by visiting - - xenserver.org - . - -
-
- XenServer-core - - This is a method for building the core packages in a XenServer - installation on an existing RPM-based system. Initial support - for this configuration (notably running Compute services in domain 0) - was added in Havana. XenServer-core for Debian/Ubuntu is built - from the main branch and, therefore, is continuously up to date. - -
-
- Kronos - - This is a project initiated to provide the ability to install - XAPI toolstack onto an existing Debian-based deployment. - For more information, visit the Xen wiki - - wiki.xenproject.org/wiki/Project_Kronos - . - -
-
- Privileged and unprivileged domains - - A Xen host runs a number of virtual machines, VMs, or domains - (the terms are synonymous on Xen). One of these is in charge of - running the rest of the system, and is known as domain 0, or - dom0. It is the first domain to boot after Xen, and owns the - storage and networking hardware, the device drivers, and the - primary control software. Any other VM is unprivileged, and is - known as a domU or guest. All customer VMs are unprivileged, - but you should note that on Xen, the OpenStack Compute service - (nova-compute) - also runs in a domU. This gives a level of security isolation - between the privileged system software and the OpenStack - software (much of which is customer-facing). This architecture - is described in more detail later. - -
-
- Paravirtualized versus hardware virtualized domains - - A Xen virtual machine can be paravirtualized (PV) or hardware - virtualized (HVM). This refers to the interaction between Xen, - domain 0, and the guest VM's kernel. PV guests are aware of - the fact that they are virtualized and will co-operate with Xen - and domain 0; this gives them better performance - characteristics. HVM guests are not aware of their environment, - and the hardware has to pretend that they are running on an - unvirtualized machine. HVM guests do not need to modify the - guest operating system, which is essential when running - Windows. - - - In OpenStack, customer VMs may run in either PV or HVM mode. - However, the OpenStack domU (that's the one running - nova-compute) must be - running in PV mode. - -
-
-
- - XenAPI deployment architecture - - - A basic OpenStack deployment on a XAPI-managed server, assuming - that the network provider is nova-network, looks like this: - - - - - - - - - - Key things to note: - - - - The hypervisor: Xen - - - - - Domain 0: runs XAPI and some small pieces from - OpenStack, the XAPI plug-ins. - - - - - OpenStack VM: The - Compute - service runs in a paravirtualized virtual machine, on - the host under management. Each host runs a local - instance of - Compute. - It is also running an instance of nova-network. - - - - - OpenStack Compute uses the XenAPI Python library to - talk to XAPI, and it uses the Management Network to - reach from the OpenStack VM to Domain 0. - - - - - - Some notes on the networking: - - - - The above diagram assumes FlatDHCP networking. - - - - - There are three main OpenStack networks: - - - - Management network: RabbitMQ, MySQL, - inter-host communication, and compute-XAPI - communication. Please note that the VM - images are downloaded by the XenAPI - plug-ins, so make sure that the OpenStack - Image service is accessible through this - network. It usually means binding those - services to the management interface. - - - - - Tenant network: controlled by - nova-network, this is used for tenant - traffic. - - - - - Public network: floating IPs, public API - endpoints. - - - - - - - - The networks shown here must be connected to the - corresponding physical networks within the data center. - In the simplest case, three individual physical network - cards could be used. It is also possible to use VLANs - to separate these networks. Please note, that the - selected configuration must be in line with the - networking model selected for the cloud. (In case of - VLAN networking, the physical channels have to be able - to forward the tagged traffic.) - - - - -
-
- Further reading - - Here are some of the resources available to learn more - about Xen: - - - - Citrix XenServer official documentation: - - http://docs.vmd.citrix.com/XenServer - - - - - - What is Xen? by XenProject.org: - - XenProject.org > Users > Cloud - - - - - - Xen Hypervisor project: - - http://www.xenproject.org/developers/teams/hypervisor.html - - - - - - Xapi project: - - http://www.xenproject.org/developers/teams/xapi.html - - - - - - Further XenServer and OpenStack information: - - http://wiki.openstack.org/XenServer - - - - - -
- - -
diff --git a/doc/config-reference/compute/section_hypervisor_xen_libvirt.xml b/doc/config-reference/compute/section_hypervisor_xen_libvirt.xml deleted file mode 100644 index cac7a150c9..0000000000 --- a/doc/config-reference/compute/section_hypervisor_xen_libvirt.xml +++ /dev/null @@ -1,223 +0,0 @@ - -
- - Xen via Libvirt - OpenStack Compute supports the Xen Project Hypervisor (or Xen). Xen can be integrated with - OpenStack Compute via the libvirt - toolstack or via the - XAPI - toolstack. This - section describes how to set up OpenStack Compute with Xen and libvirt. For information on - how to set up Xen with XAPI refer to . -
- Installing Xen with Libvirt - At this stage we recommend to use the baseline that we use for the Xen - Project OpenStack CI Loop, which contains the most recent stability fixes to - both Xen and Libvirt. - Xen 4.5.1 (or newer) and Libvirt 1.2.15 (or newer) contain the most recent OpenStack improvements for - Xen. The necessary Xen changes have also been backported to the Xen 4.4.3 stable branch - (not yet released at this stage). Please check with the Linux and FreeBSD distros you - are intending to use as Dom 0, - whether the relevant version of Xen and Libvirt are available as installable - packages. - The latest releases of Xen and libvirt packages that fulfil the above minimum - requirements for the various openSUSE distributions can always be found and installed - from the Open - Build Service Virtualization project. To install these latest packages, add - the Virtualization repository to your software management stack and get the newest - packages from there. More information about the latest Xen and Libvirt packages are - available here and here. - Alternatively, it is possible to use the Ubuntu LTS 14.04 Xen Package - 4.4.1-0ubuntu0.14.04.4 (Xen 4.4.1) and apply the patches - outlined here. You can also use the Ubuntu LTS 14.04 libvirt package 1.2.2 - libvirt_1.2.2-0ubuntu13.1.7 as baseline and update it to libvirt version - 1.2.15, or 1.2.14 with the patches outlined here applied. Note that this will require re-build these packages partly - from source. - For further information and latest developments, you may want to consult the Xen - Project's mailing lists for OpenStack related issues and questions. -
-
- Configuring Xen with Libvirt - To enable Xen via libvirt, ensure the following options are set in - /etc/nova/nova.conf on all hosts running the nova-compute - service.compute_driver = libvirt.LibvirtDriver - -[libvirt] -virt_type = xen -
-
- Additional configuration options - Use the following as a guideline for configuring Xen for use in OpenStack: - - - Dom0 Memory: Set it between 1GB and 4GB by - adding the following parameter to the Xen Boot Options in the grub.conf file. - dom0_mem=1024M - Note that the above memory limits are suggestions and should be based on - the available compute host resources. For large hosts, that will run many hundred of - instances,the chosen values may need to be higher. - The location of the grub.conf file depends on the host Linux - distribution that you are using. Please refer to the distro documentation for - more details (see Dom 0 for more resources). - - - Dom0 vcpus: Set the virtual CPUs to 4 and - employ CPU pinning by adding the following parameters to the Xen Boot Options in the - grub.conf file. - dom0_max_vcpus=4 dom0_vcpus_pin - Note that the above virtual CPU limits are suggestions and should be based on - the available compute host resources. For large hosts, that will run many hundred of - instances, the suggested values may need to be higher. - - - PV vs HVM guests: A Xen virtual machine can - be paravirtualized (PV) or hardware virtualized (HVM). The virtualization mode - determines the interaction between Xen, Dom 0, and the guest VM's kernel. PV guests - are aware of the fact that they are virtualized and will co-operate with Xen and - Dom 0. The choice of virtualization mode determines performance characteristics. - For an overview of Xen virtualization modes, see - - Xen Guest Types. - In OpenStack, customer VMs may run in either PV or HVM mode. The mode is a - property of the operating system image used by the VM, and is changed by - adjusting the image metadata stored in the glance image service. The image - metadata can be changed using the nova or glance commands. - - - To choose one of the HVM modes (HVM, HVM with PV Drivers or PVHVM) use nova or - glance to set the vm_mode property to hvm - To choose one of the HVM modes (HVM, HVM with PV Drivers or PVHVM) use one of - the following two commands - $ nova image-meta img-uuid set vm_mode=hvm - $ glance image-update img-uuid --property vm_mode=hvm - To chose PV mode, which is supported by NetBSD, FreeBSD and Linux, useone of - the following two commands - $ nova image-meta img-uuid set vm_mode=xen - $ glance image-update img-uuid --property vm_mode=xen - - The default for virtualization mode in nova is PV mode. - - - Image Formats: Xen supports raw, qcow2 and vhd - image formats. For more information on image formats, refer to the OpenStack Virtual Image Guide and the Storage Options Guide on the Xen Project Wiki. - - - Image Metadata: In addition to the vm_mode property - discussed above, the hypervisor_type property is another important component of the image - metadata, especially if your cloud contains mixed hypervisor compute nodes. Setting the - hypervisor_type property allows the nova scheduler to select a compute node - running the specified hypervisor when launching instances of the image. Image - metadata such as vm_mode, hypervisor_type, architecture, and others can be set - when importing the image to glance. The metatdata can also be changed using the - nova or glance commands: - $ nova image-meta img-uuid set hypervisor_type=xen vm_mode=hvm - $ glance image-update img-uuid --property hypervisor_type=xen --property vm_mode=hvm - - For more more information on image metadata, refer to the OpenStack Virtual Image Guide. - - - - To customize the libvirt driver, use the configuration option settings documented in - . - -
-
- Troubleshoot Xen with Libvirt - - - Important Log Files: When an instance fails to start, or when - you come across other issues, you should first consult the following log files: - - /var/log/nova/compute.log (for more information refer to - ). - /var/log/libvirt/libxl/libxl-driver.log, - /var/log/xen/qemu-dm-${instancename}.log, - /var/log/xen/xen-hotplug.log, - /var/log/xen/console/guest-${instancename} (to enable see Enabling Guest Console Logs) and - - Host Console Logs (read Enabling and Retrieving Host Console Logs). - - - - If you need further help you can ask questions on the mailing lists xen-users@, wg-openstack@ or raise a - bug against Xen. - - -
-
- Known issues - - - Xen via libvirt is currently only supported with nova networking. A number of bugs - are currently worked on to make sure that Xen via libvirt will also work with Neutron. - - -
-
- Additional information and resources - The following section contains links to other useful resources - - - wiki.xenproject.org/wiki/OpenStack - OpenStack Documentation on the Xen Project wiki - - - wiki.xenproject.org/wiki/OpenStack_CI_Loop_for_Xen-Libvirt - Information about the - Xen Project OpenStack CI Loop - - - wiki.xenproject.org/wiki/OpenStack_via_DevStack - How to set up OpenStack via DevStack - - - Mailing lists for OpenStack related issues and questions - This list is dedicated - to coordinating bug fixes and issues across Xen, libvirt and OpenStack and the CI loop. - - -
-
diff --git a/doc/config-reference/compute/section_nova-conf.xml b/doc/config-reference/compute/section_nova-conf.xml deleted file mode 100644 index 0a3c5bfd31..0000000000 --- a/doc/config-reference/compute/section_nova-conf.xml +++ /dev/null @@ -1,244 +0,0 @@ - -
- Overview of nova.conf - The nova.conf configuration file is - an INI file format as explained in . - You can use a particular configuration option file by using - the option (nova.conf) - parameter when you run one of the nova-* - services. This parameter inserts configuration option - definitions from the specified configuration file name, which - might be useful for debugging or performance tuning. - For a list of configuration options, see the tables in this - guide. - To learn more about the nova.conf - configuration file, review the general purpose configuration - options documented in . - - Do not specify quotes around Nova options. - - - Sections - Configuration options are grouped by section. The - Compute configuration file supports the following sections: - - - [DEFAULT] - - Contains most configuration options. If - the documentation for a configuration - option does not specify its section, - assume that it appears in this - section. - - - - [baremetal] - - Configures - the baremetal hypervisor driver. - - - - [cells] - - - Configures cells functionality. For details, - see . - - - - - [conductor] - - Configures - the nova-conductor - service. - - - - [database] - - - Configures the database that Compute uses. - - - - - [glance] - - - Configures how to access the Image service. - - - - - [hyperv] - - - Configures the Hyper-V hypervisor driver. - - - - - [image_file_url] - - - Configures additional filesystems to access - the Image Service. - - - - - [keymgr] - - - Configures the key manager. - - - - - [keystone_authtoken] - - - Configures authorization via Identity service. - - - - - [libvirt] - - - Configures the hypervisor drivers using the - Libvirt library: KVM, LXC, Qemu, UML, Xen. - - - - - [matchmaker_redis] - - - Configures a Redis server. - - - - - [matchmaker_ring] - - - Configures a matchmaker ring. - - - - - [metrics] - - - Configures weights for the metrics weigher. - - - - - [neutron] - - - Configures Networking specific options. - - - - - [osapi_v3] - - - Configures the OpenStack Compute API v3. - - - - - [rdp] - - - Configures RDP proxying. - - - - - [serial_console] - - - Configures serial console. - - - - - [spice] - - - Configures virtual consoles using SPICE. - - - - - [ssl] - - - Configures certificate authority using SSL. - - - - - [trusted_computing] - - Configures - the trusted computing pools functionality - and how to connect to a remote attestation - service. - - - - [upgrade_levels] - - - Configures version locking on the RPC - (message queue) communications between the - various Compute services to allow live - upgrading an OpenStack installation. - - - - - [vmware] - - - Configures the VMware hypervisor driver. - - - - - [xenserver] - - - Configures the XenServer hypervisor driver. - - - - - [zookeeper] - - - Configures the ZooKeeper ServiceGroup - driver. - - - - - -
diff --git a/doc/config-reference/compute/section_nova-log-files.xml b/doc/config-reference/compute/section_nova-log-files.xml deleted file mode 100644 index fcf1473c13..0000000000 --- a/doc/config-reference/compute/section_nova-log-files.xml +++ /dev/null @@ -1,132 +0,0 @@ - -
- Compute log files - The corresponding log file of each Compute service - is stored in the /var/log/nova/ - directory of the host on which each service runs. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Log files used by Compute services
- Log file - - Service name (CentOS/Fedora/openSUSE/Red Hat Enterprise - Linux/SUSE Linux Enterprise) - - Service name (Ubuntu/Debian) -
- api.log - - openstack-nova-api - - nova-api -
- cert.log - - The X509 certificate service - (openstack-nova-cert/nova-cert) - is only required by the EC2 API to the Compute - service. - - openstack-nova-cert - - nova-cert -
- compute.log - - openstack-nova-compute - - nova-compute -
- conductor.log - - openstack-nova-conductor - - nova-conductor -
- consoleauth.log - - openstack-nova-consoleauth - - nova-consoleauth -
- network.log - The nova network service - (openstack-nova-network/nova-network) - only runs in deployments that are not configured to use - the Networking service (neutron). - - - openstack-nova-network - - nova-network -
- nova-manage.log - - nova-manage - - nova-manage -
- scheduler.log - - openstack-nova-scheduler - - nova-scheduler -
- -
diff --git a/doc/config-reference/compute/section_rpc.xml b/doc/config-reference/compute/section_rpc.xml deleted file mode 100644 index d680732ec1..0000000000 --- a/doc/config-reference/compute/section_rpc.xml +++ /dev/null @@ -1,97 +0,0 @@ - -
- - Configure the Oslo RPC messaging system - OpenStack projects use AMQP, an open standard for messaging - middleware. OpenStack services that run on multiple servers to - talk to each other. OpenStack Oslo RPC supports three - implementations of AMQP: RabbitMQ, - Qpid, and - ZeroMQ. -
- Configure RabbitMQ - OpenStack Oslo RPC uses RabbitMQ - by default. Use these options to configure the - RabbitMQ message system. The - rpc_backend option is not required as long - as RabbitMQ is the default messaging - system. However, if it is included the configuration, you must - set it to - rabbit. - rpc_backend=rabbit - You can use these additional options to configure the - RabbitMQ messaging system. You can - configure messaging communication for different installation - scenarios, tune retries for RabbitMQ, and define the size of the - RPC thread pool. To monitor notifications through RabbitMQ, you - must set the option to - nova.openstack.common.notifier.rpc_notifier in the - nova.conf file. The default for sending - usage data is sixty seconds plus a random number of seconds from - zero to sixty. - -
-
- Configure Qpid - Use these options to configure the - Qpid messaging system for OpenStack - Oslo RPC. Qpid is not the default - messaging system, so you must enable it by setting the - option in the - nova.conf file. - rpc_backend=qpid - - The Qpid driver has been deprecated. - The driver is planned to be removed during the 'M' - development cycle. - - This critical option points the compute nodes to the - Qpid broker (server). Set - to the host name where the - broker runs in the nova.conf file. - - The --qpid_hostname parameter accepts a host - name or IP address value. - - qpid_hostname=hostname.example.com - If the Qpid broker listens on a - port other than the AMQP default of 5672, you - must set the option to that - value: - qpid_port=12345 - If you configure the Qpid broker - to require authentication, you must add a user name and password - to the configuration: - qpid_username=username -qpid_password=password - By default, TCP is used as the transport. To enable SSL, set - the option: - qpid_protocol=ssl - This table lists additional options that you use to - configure the Qpid messaging driver for OpenStack Oslo RPC. - These options are used infrequently. - -
-
- Configure ZeroMQ - Use these options to configure the - ZeroMQ messaging system for - OpenStack Oslo RPC. ZeroMQ is not the - default messaging system, so you must enable it by setting the - option in the - nova.conf file. - -
-
- Configure messaging - Use these options to configure the - RabbitMQ and - Qpid messaging drivers. - - -
-
diff --git a/doc/config-reference/compute/section_xapi-ami-setup.xml b/doc/config-reference/compute/section_xapi-ami-setup.xml deleted file mode 100644 index 98fda6f544..0000000000 --- a/doc/config-reference/compute/section_xapi-ami-setup.xml +++ /dev/null @@ -1,27 +0,0 @@ - -
- - Prepare for AMI type images - - To support AMI type images in your OpenStack installation, you must - create the /boot/guest directory on dom0. One of - the OpenStack XAPI plugins will extract the kernel and ramdisk from - AKI and ARI images and put them to that directory. - - - OpenStack maintains the contents of this directory and its size should - not increase during normal operation. However, in case of power - failures or accidental shutdowns, some files might be left over. To - prevent these files from filling up dom0's filesystem, set up this - directory as a symlink that points to a subdirectory of the local SR. - - Run these commands in dom0 to achieve this setup: - # LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal) -# LOCALPATH="/var/run/sr-mount/$LOCAL_SR/os-guest-kernels" -# mkdir -p "$LOCALPATH" -# ln -s "$LOCALPATH" /boot/guest -
diff --git a/doc/config-reference/compute/section_xapi-install-plugins.xml b/doc/config-reference/compute/section_xapi-install-plugins.xml deleted file mode 100644 index 2b55037174..0000000000 --- a/doc/config-reference/compute/section_xapi-install-plugins.xml +++ /dev/null @@ -1,55 +0,0 @@ - -
- - Install XAPI plug-ins - - When you use a XAPI managed hypervisor, you can install a Python script - (or any executable) on the host side, and execute that through XenAPI. - These scripts are called plug-ins. The OpenStack related XAPI plug-ins - live in OpenStack Compute's code repository. These plug-ins have to be - copied to dom0's filesystem, to the appropriate directory, where XAPI - can find them. It is important to ensure that the version of the - plug-ins are in line with the OpenStack Compute installation you are - using. - - - The plugins should typically be copied from the Nova installation - running in the Compute's DomU, but if you want to download the latest - version the following procedure can be used. - -
- Manually installing the plug-ins - - - Create temporary files/directories: - $ NOVA_ZIPBALL=$(mktemp) -$ NOVA_SOURCES=$(mktemp -d) - - - - Get the source from GitHub. The example assumes the master - branch is used, and the XenServer host is accessible - as xenserver. Match those parameters to your setup. - - $ NOVA_URL=https://github.com/openstack/nova/archive/master.zip -$ wget -qO "$NOVA_ZIPBALL" "$NOVA_URL" -$ unzip "$NOVA_ZIPBALL" -d "$NOVA_SOURCES" - - - Copy the plug-ins to the hypervisor: - $ PLUGINPATH=$(find $NOVA_SOURCES -path '*/xapi.d/plugins' -type d -print) -$ tar -czf - -C "$PLUGINPATH" ./ | -> ssh root@xenserver tar -xozf - -C /etc/xapi.d/plugins/ - - - Remove temporary files/directories: - $ rm "$NOVA_ZIPBALL" -$ rm -rf "$NOVA_SOURCES" - - -
-
diff --git a/doc/config-reference/compute/section_xapi-install.xml b/doc/config-reference/compute/section_xapi-install.xml deleted file mode 100644 index 1ba6018266..0000000000 --- a/doc/config-reference/compute/section_xapi-install.xml +++ /dev/null @@ -1,117 +0,0 @@ - -
- Install XenServer - - Before you can run OpenStack with XenServer, you must install the - hypervisor on - - an appropriate server - - . - - - - Xen is a type 1 hypervisor: When your server starts, Xen is the - first software that runs. Consequently, you must install XenServer - before you install the operating system where you want to run - OpenStack code. You then install - nova-compute into a - dedicated virtual machine on the host. - - - - Use the following link to download XenServer's installation media: - - - - - http://xenserver.org/open-source-virtualization-download.html - - - - - - - When you install many servers, you might find it easier to perform - - PXE boot installations - - . You can also package any post-installation changes that you want to - make to your XenServer by following the instructions of - - creating your own XenServer supplemental pack - - . - - - - Make sure you use the EXT type of storage repository (SR). Features - that require access to VHD files (such as copy on write, snapshot - and migration) do not work when you use the LVM SR. Storage - repository (SR) is a XAPI-specific term relating to the physical - storage where virtual disks are stored. - - - On the XenServer installation screen, choose the - XenDesktop Optimized - option. If you use an answer file, make sure you use - srtype="ext" - in the - installation - tag of the answer file. - - -
- Post-installation steps - - The following steps need to be completed after the hypervisor's - installation: - - - - - For resize and migrate functionality, enable password-less - SSH authentication and set up the - /images - directory on dom0. - - - - - Install the XAPI plug-ins. - - - - - To support AMI type images, you must set up - /boot/guest - symlink/directory in dom0. - - - - - Create a paravirtualized virtual machine that can run - nova-compute. - - - - - Install and configure - nova-compute - in the above virtual machine. - - - - - - -
-
diff --git a/doc/config-reference/compute/section_xapi-resize-setup.xml b/doc/config-reference/compute/section_xapi-resize-setup.xml deleted file mode 100644 index 8469b6ce1c..0000000000 --- a/doc/config-reference/compute/section_xapi-resize-setup.xml +++ /dev/null @@ -1,45 +0,0 @@ - -
- - Modify dom0 for resize/migration support - To resize servers with XenServer you must: - - - - Establish a root trust between all hypervisor nodes - of your deployment: - - - To do so, generate an ssh key-pair with the - ssh-keygen command. Ensure that each of your - dom0's authorized_keys file (located in - /root/.ssh/authorized_keys) contains the - public key fingerprint (located in - /root/.ssh/id_rsa.pub). - - - - - Provide a /images mount point to the dom0 - for your hypervisor: - - - dom0 space is at a premium so creating a directory in dom0 is - potentially dangerous and likely to fail especially when you - resize large servers. The least you can do is to symlink - /images to your local storage SR. The - following instructions work for an English-based installation - of XenServer and in the case of ext3-based SR (with which the - resize functionality is known to work correctly). - - # LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal) -# IMG_DIR="/var/run/sr-mount/$LOCAL_SR/images" -# mkdir -p "$IMG_DIR" -# ln -s "$IMG_DIR" /images - - -
diff --git a/doc/config-reference/conf-changes/README.txt b/doc/config-reference/conf-changes/README.txt deleted file mode 100644 index d81a0e544d..0000000000 --- a/doc/config-reference/conf-changes/README.txt +++ /dev/null @@ -1,3 +0,0 @@ -Do not manually edit the files in this folder. They are automatically generated -and your changes will be overwritten. The tool to update the files lives in the -openstack-doc-tools repository. diff --git a/doc/config-reference/conf-changes/ceilometer.xml b/doc/config-reference/conf-changes/ceilometer.xml deleted file mode 100644 index 906844f8c3..0000000000 --- a/doc/config-reference/conf-changes/ceilometer.xml +++ /dev/null @@ -1,344 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for Telemetry - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] batch_polled_samples = True(BoolOpt) To reduce polling agent load, samples are sent to the notification agent in a batch. To gain higher throughput at the cost of load set this to False.
[DEFAULT] ceilometer_control_exchange = ceilometer(StrOpt) Exchange name for ceilometer notifications.
[DEFAULT] dns_control_exchange = central(StrOpt) Exchange name for DNS notifications.
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] magnum_control_exchange = magnum(StrOpt) Exchange name for Magnum notifications.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] pipeline_polling_interval = 20(IntOpt) Polling interval for pipeline file configuration in seconds.
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] refresh_event_pipeline_cfg = False(BoolOpt) Refresh Event Pipeline configuration on-the-fly.
[DEFAULT] refresh_pipeline_cfg = False(BoolOpt) Refresh Pipeline configuration on-the-fly.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[alarm] alarm_max_actions = -1(IntOpt) Maximum count of actions for each state of an alarm, non-positive number means no limit.
[api] aodh_is_enabled = None(BoolOpt) Set True to redirect alarms URLs to aodh. Default autodetection by querying keystone.
[api] aodh_url = None(StrOpt) The endpoint of Aodh to redirect alarms URLs to Aodh API. Default autodetection by querying keystone.
[api] default_api_return_limit = 100(IntOpt) Default maximum number of items returned by API request.
[api] gnocchi_is_enabled = None(BoolOpt) Set True to disable resource/meter/sample URLs. Default autodetection by querying keystone.
[api] workers = 1(IntOpt) Number of workers for api, default value is 1.
[collector] enable_rpc = False(BoolOpt) Enable the RPC functionality of collector. This functionality is now deprecated in favour of notifier publisher and queues.
[collector] workers = 1(IntOpt) Number of workers for collector service. default value is 1.
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[database] alarm_history_time_to_live = -1(IntOpt) Number of seconds that alarm histories are kept in the database for (<= 0 means forever).
[dispatcher_gnocchi] archive_policy = low(StrOpt) The archive policy to use when the dispatcher create a new metric.
[dispatcher_gnocchi] archive_policy_file = gnocchi_archive_policy_map.yaml(StrOpt) The Yaml file that defines per metric archive policies.
[dispatcher_gnocchi] filter_project = gnocchi(StrOpt) Gnocchi project used to filter out samples generated by Gnocchi service activity
[dispatcher_gnocchi] filter_service_activity = True(BoolOpt) Filter out samples generated by Gnocchi service activity
[dispatcher_gnocchi] resources_definition_file = gnocchi_resources.yaml(StrOpt) The Yaml file that defines mapping between samples and gnocchi resources/metrics
[dispatcher_gnocchi] url = http://localhost:8041(StrOpt) URL to Gnocchi.
[hardware] meter_definitions_file = snmp.yaml(StrOpt) Configuration file for defining hardware snmp meters.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[meter] meter_definitions_cfg_file = meters.yaml(StrOpt) Configuration file for defining meter notifications.
[notification] pipeline_processing_queues = 10(IntOpt) Number of queues to parallelize workload across. This value should be larger than the number of active notification agents for optimal results.
[notification] workers = 1(IntOpt) Number of workers for notification service, default value is 1.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[vmware] ca_file = None(StrOpt) CA bundle file to use in verifying the vCenter server certificate.
[vmware] insecure = False(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "ca_file" is set.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[matchmaker_redis] passwordNone
[notification] disable_non_metric_metersFalseTrue
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] api_workers[api] workers
[DEFAULT] notification_workers[notification] workers
[DEFAULT] database_connectionNone
[DEFAULT] collector_workers[collector] workers
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[DEFAULT] log_formatNone
[DEFAULT] use_syslogNone
-
diff --git a/doc/config-reference/conf-changes/cinder.xml b/doc/config-reference/conf-changes/cinder.xml deleted file mode 100644 index a9f8f6e13e..0000000000 --- a/doc/config-reference/conf-changes/cinder.xml +++ /dev/null @@ -1,895 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for OpenStack Block Storage - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] allow_availability_zone_fallback = False(BoolOpt) If the requested Cinder availability zone is unavailable, fall back to the value of default_availability_zone, then storage_availability_zone, instead of failing.
[DEFAULT] backup_posix_path = $state_path/backup(StrOpt) Path specifying where to store backups.
[DEFAULT] backup_service_inithost_offload = False(BoolOpt) Offload pending backup delete during backup service startup.
[DEFAULT] backup_swift_ca_cert_file = None(StrOpt) Location of the CA certificate file to use for swift client requests.
[DEFAULT] blockbridge_api_host = None(StrOpt) IP address/hostname of Blockbridge API.
[DEFAULT] blockbridge_api_port = None(IntOpt) Override HTTPS port to connect to Blockbridge API server.
[DEFAULT] blockbridge_auth_password = None(StrOpt) Blockbridge API password (for auth scheme 'password')
[DEFAULT] blockbridge_auth_scheme = token(StrOpt) Blockbridge API authentication scheme (token or password)
[DEFAULT] blockbridge_auth_token = None(StrOpt) Blockbridge API token (for auth scheme 'token')
[DEFAULT] blockbridge_auth_user = None(StrOpt) Blockbridge API user (for auth scheme 'password')
[DEFAULT] blockbridge_default_pool = None(StrOpt) Default pool name if unspecified.
[DEFAULT] blockbridge_pools = {'OpenStack': '+openstack'}(DictOpt) Defines the set of exposed pools and their associated backend query strings
[DEFAULT] cb_auth_group = None(StrOpt) This corresponds to the discovery authentication group in CloudByte storage. Chap users are added to this group. Driver uses the first user found for this group. Default value is None.
[DEFAULT] cb_confirm_volume_delete_retries = 3(IntOpt) Will confirm a successful volume deletion in CloudByte storage by making this many number of attempts.
[DEFAULT] cb_confirm_volume_delete_retry_interval = 5(IntOpt) A retry value in seconds. Will be used by the driver to check if volume deletion was successful in CloudByte storage.
[DEFAULT] cinder_internal_tenant_project_id = None(StrOpt) ID of the project which will be used as the Cinder internal tenant.
[DEFAULT] cinder_internal_tenant_user_id = None(StrOpt) ID of the user to be used in volume operations as the Cinder internal tenant.
[DEFAULT] dell_sc_verify_cert = False(BoolOpt) Enable HTTPS SC certificate verification.
[DEFAULT] dothill_api_protocol = https(StrOpt) DotHill API interface protocol.
[DEFAULT] dothill_backend_name = A(StrOpt) Pool or Vdisk name to use for volume creation.
[DEFAULT] dothill_backend_type = virtual(StrOpt) linear (for Vdisk) or virtual (for Pool).
[DEFAULT] dothill_iscsi_ips = (ListOpt) List of comma-separated target iSCSI IP addresses.
[DEFAULT] dothill_verify_certificate = False(BoolOpt) Whether to verify DotHill array SSL certificate.
[DEFAULT] dothill_verify_certificate_path = None(StrOpt) DotHill array SSL certificate path.
[DEFAULT] drbdmanage_redundancy = 1(StrOpt) Number of nodes that should replicate the data.
[DEFAULT] driver_ssl_cert_verify = False(BoolOpt) If set to True the http client will validate the SSL certificate of the backend endpoint.
[DEFAULT] enable_force_upload = False(BoolOpt) Enables the Force option on upload_to_image. This enables running upload_volume on in-use volumes for backends that support it.
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] flashsystem_iscsi_portid = 0(IntOpt) Default iSCSI Port ID of FlashSystem. (Default port is 0.)
[DEFAULT] glusterfs_backup_mount_point = $state_path/backup_mount(StrOpt) Base dir containing mount point for gluster share.
[DEFAULT] glusterfs_backup_share = None(StrOpt) GlusterFS share in <hostname|ipv4addr|ipv6addr>:<gluster_vol_name> format. Eg: 1.2.3.4:backup_vol
[DEFAULT] hgst_net = Net 1 (IPv4)(StrOpt) Space network name to use for data transfer
[DEFAULT] hgst_redundancy = 0(StrOpt) Should spaces be redundantly stored (1/0)
[DEFAULT] hgst_space_group = disk(StrOpt) Group to own created spaces
[DEFAULT] hgst_space_mode = 0600(StrOpt) UNIX mode for created spaces
[DEFAULT] hgst_space_user = root(StrOpt) User to own created spaces
[DEFAULT] hgst_storage_servers = os:gbd0(StrOpt) Comma separated list of Space storage servers:devices. ex: os1_stor:gbd0,os2_stor:gbd0
[DEFAULT] hitachi_horcm_resource_lock_timeout = 600(IntOpt) Timeout until a resource lock is released, in seconds. The value must be between 0 and 7200.
[DEFAULT] hpmsa_api_protocol = https(StrOpt) HPMSA API interface protocol.
[DEFAULT] hpmsa_backend_name = A(StrOpt) Pool or Vdisk name to use for volume creation.
[DEFAULT] hpmsa_backend_type = virtual(StrOpt) linear (for Vdisk) or virtual (for Pool).
[DEFAULT] hpmsa_iscsi_ips = (ListOpt) List of comma-separated target iSCSI IP addresses.
[DEFAULT] hpmsa_verify_certificate = False(BoolOpt) Whether to verify HPMSA array SSL certificate.
[DEFAULT] hpmsa_verify_certificate_path = None(StrOpt) HPMSA array SSL certificate path.
[DEFAULT] hpxp_async_copy_check_interval = 10(IntOpt) Interval to check copy asynchronously
[DEFAULT] hpxp_compute_target_ports = None(ListOpt) Target port names of compute node for host group or iSCSI target
[DEFAULT] hpxp_copy_check_interval = 3(IntOpt) Interval to check copy
[DEFAULT] hpxp_copy_speed = 3(IntOpt) Copy speed of storage system
[DEFAULT] hpxp_default_copy_method = FULL(StrOpt) Default copy method of storage system. There are two valid values: "FULL" specifies that a full copy; "THIN" specifies that a thin copy. Default value is "FULL"
[DEFAULT] hpxp_group_request = False(BoolOpt) Request for creating host group or iSCSI target
[DEFAULT] hpxp_horcm_add_conf = True(BoolOpt) Add to HORCM configuration
[DEFAULT] hpxp_horcm_name_only_discovery = False(BoolOpt) Only discover a specific name of host group or iSCSI target
[DEFAULT] hpxp_horcm_numbers = 200, 201(ListOpt) Instance numbers for HORCM
[DEFAULT] hpxp_horcm_resource_name = meta_resource(StrOpt) Resource group name of storage system for HORCM
[DEFAULT] hpxp_horcm_user = None(StrOpt) Username of storage system for HORCM
[DEFAULT] hpxp_ldev_range = None(StrOpt) Logical device range of storage system
[DEFAULT] hpxp_pool = None(StrOpt) Pool of storage system
[DEFAULT] hpxp_storage_cli = None(StrOpt) Type of storage command line interface
[DEFAULT] hpxp_storage_id = None(StrOpt) ID of storage system
[DEFAULT] hpxp_target_ports = None(ListOpt) Target port names for host group or iSCSI target
[DEFAULT] hpxp_thin_pool = None(StrOpt) Thin pool of storage system
[DEFAULT] hpxp_zoning_request = False(BoolOpt) Request for FC Zone creating host group
[DEFAULT] ignore_pool_full_threshold = False(BoolOpt) Force LUN creation even if the full threshold of pool is reached.
[DEFAULT] image_upload_use_cinder_backend = False(BoolOpt) If set to True, upload-to-image in raw format will create a cloned volume and register its location to the image service, instead of uploading the volume content. The cinder backend and locations support must be enabled in the image service, and glance_api_version must be set to 2.
[DEFAULT] image_upload_use_internal_tenant = False(BoolOpt) If set to True, the image volume created by upload-to-image will be placed in the internal tenant. Otherwise, the image volume is created in the current context's tenant.
[DEFAULT] image_volume_cache_enabled = False(BoolOpt) Enable the image volume cache for this backend.
[DEFAULT] image_volume_cache_max_count = 0(IntOpt) Max number of entries allowed in the image volume cache. 0 => unlimited.
[DEFAULT] image_volume_cache_max_size_gb = 0(IntOpt) Max size of the image volume cache for this backend in GB. 0 => unlimited.
[DEFAULT] infortrend_cli_max_retries = 5(IntOpt) Maximum retry time for cli. Default is 5.
[DEFAULT] infortrend_cli_path = /opt/bin/Infortrend/raidcmd_ESDS10.jar(StrOpt) The Infortrend CLI absolute path. By default, it is at /opt/bin/Infortrend/raidcmd_ESDS10.jar
[DEFAULT] infortrend_cli_timeout = 30(IntOpt) Default timeout for CLI copy operations in minutes. Support: migrate volume, create cloned volume and create volume from snapshot. By Default, it is 30 minutes.
[DEFAULT] infortrend_pools_name = (StrOpt) Infortrend raid pool name list. It is separated with comma.
[DEFAULT] infortrend_provisioning = full(StrOpt) Let the volume use specific provisioning. By default, it is the full provisioning. The supported options are full or thin.
[DEFAULT] infortrend_slots_a_channels_id = 0,1,2,3,4,5,6,7(StrOpt) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma. By default, it is the channel 0~7.
[DEFAULT] infortrend_slots_b_channels_id = 0,1,2,3,4,5,6,7(StrOpt) Infortrend raid channel ID list on Slot B for OpenStack usage. It is separated with comma. By default, it is the channel 0~7.
[DEFAULT] infortrend_tiering = 0(StrOpt) Let the volume use specific tiering level. By default, it is the level 0. The supported levels are 0,2,3,4.
[DEFAULT] io_port_list = *(StrOpt) Comma separated iSCSI or FC ports to be used in Nova or Cinder.
[DEFAULT] iscsi_target_flags = (StrOpt) Sets the target-specific flags for the iSCSI target. Only used for tgtadm to specify backing device flags using bsoflags option. The specified string is passed as is to the underlying tool.
[DEFAULT] lenovo_api_protocol = https(StrOpt) Lenovo api interface protocol.
[DEFAULT] lenovo_backend_name = A(StrOpt) Pool or Vdisk name to use for volume creation.
[DEFAULT] lenovo_backend_type = virtual(StrOpt) linear (for VDisk) or virtual (for Pool).
[DEFAULT] lenovo_iscsi_ips = (ListOpt) List of comma-separated target iSCSI IP addresses.
[DEFAULT] lenovo_verify_certificate = False(BoolOpt) Whether to verify Lenovo array SSL certificate.
[DEFAULT] lenovo_verify_certificate_path = None(StrOpt) Lenovo array SSL certificate path.
[DEFAULT] managed_replication_target = True(BoolOpt) There are two types of target configurations managed (replicate to another configured backend) or unmanaged (replicate to a device not managed by Cinder).
[DEFAULT] management_ips = (StrOpt) List of Management IP addresses (separated by commas)
[DEFAULT] nas_volume_prov_type = thin(StrOpt) Provisioning type that will be used when creating volumes.
[DEFAULT] netapp_enable_multiattach = False(BoolOpt) This option specifies whether the driver should allow operations that require multiple attachments to a volume. An example would be live migration of servers that have volumes attached. When enabled, this backend is limited to 256 total volumes in order to guarantee volumes can be accessed by more than one host.
[DEFAULT] netapp_host_type = None(StrOpt) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts.
[DEFAULT] netapp_lun_ostype = None(StrOpt) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created.
[DEFAULT] netapp_lun_space_reservation = enabled(StrOpt) This option determines if storage space is reserved for LUN allocation. If enabled, LUNs are thick provisioned. If space reservation is disabled, storage space is allocated on demand.
[DEFAULT] netapp_pool_name_search_pattern = (.+)(StrOpt) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
[DEFAULT] os_privileged_user_auth_url = None(StrOpt) Auth URL associated with the OpenStack privileged account.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] per_volume_size_limit = -1(IntOpt) Max size allowed per volume, in gigabytes
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] query_volume_filters = name, status, metadata, availability_zone(ListOpt) Volume filter options which non-admin user could use to query volumes. Default values are: ['name', 'status', 'metadata', 'availability_zone']
[DEFAULT] rados_connection_interval = 5(IntOpt) Interval value (in seconds) between connection retries to ceph cluster.
[DEFAULT] rados_connection_retries = 3(IntOpt) Number of retries if connection to ceph cluster failed.
[DEFAULT] rbd_cluster_name = ceph(StrOpt) The name of ceph cluster
[DEFAULT] replication_devices = None(ListOpt) List of k/v pairs representing a replication target for this backend device. For unmanaged the format is: {'key-1'='val1' 'key-2'='val2'...},{...} and for managed devices its simply a list of valid configured backend_names that the driver supports replicating to: backend-a,bakcend-b...
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] sf_enable_volume_mapping = True(BoolOpt) Create an internal mapping of volume IDs and account. Optimizes lookups and performance at the expense of memory, very large deployments may want to consider setting to False.
[DEFAULT] sheepdog_store_address = 127.0.0.1(StrOpt) IP address of sheep daemon.
[DEFAULT] sheepdog_store_port = 7000(IntOpt) Port of sheep daemon.
[DEFAULT] sio_force_delete = False(BoolOpt) Whether to allow force delete.
[DEFAULT] sio_protection_domain_id = None(StrOpt) Protection domain id.
[DEFAULT] sio_protection_domain_name = None(StrOpt) Protection domain name.
[DEFAULT] sio_rest_server_port = 443(StrOpt) REST server port.
[DEFAULT] sio_round_volume_capacity = True(BoolOpt) Whether to round volume capacity.
[DEFAULT] sio_server_certificate_path = None(StrOpt) Server certificate path.
[DEFAULT] sio_storage_pool_id = None(StrOpt) Storage pool id.
[DEFAULT] sio_storage_pool_name = None(StrOpt) Storage pool name.
[DEFAULT] sio_storage_pools = None(StrOpt) Storage pools.
[DEFAULT] sio_unmap_volume_before_deletion = False(BoolOpt) Whether to unmap volume before deletion.
[DEFAULT] sio_verify_server_certificate = False(BoolOpt) Whether to verify server certificate.
[DEFAULT] storage_vnx_pool_names = None(StrOpt) Comma-separated list of storage pool names to be used.
[DEFAULT] tintri_api_version = v310(StrOpt) API version for the storage system
[DEFAULT] tintri_server_hostname = None(StrOpt) The hostname (or IP address) for the storage system
[DEFAULT] tintri_server_password = None(StrOpt) Password for the storage system
[DEFAULT] tintri_server_username = None(StrOpt) User name for the storage system
[DEFAULT] trace_flags = None(ListOpt) List of options that control which trace info is written to the DEBUG log level to assist developers. Valid values are method and api.
[DEFAULT] violin_request_timeout = 300(IntOpt) Global backend request timeout, in seconds.
[DEFAULT] vmware_ca_file = None(StrOpt) CA bundle file to use in verifying the vCenter server certificate.
[DEFAULT] vmware_cluster_name = None(MultiStrOpt) Name of a vCenter compute cluster where volumes should be created.
[DEFAULT] vmware_insecure = False(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "vmware_ca_file" is set.
[DEFAULT] vzstorage_mount_options = None(ListOpt) Mount options passed to the vzstorage client. See section of the pstorage-mount man page for details.
[DEFAULT] vzstorage_mount_point_base = $state_path/mnt(StrOpt) Base dir containing mount points for vzstorage shares.
[DEFAULT] vzstorage_shares_config = /etc/cinder/vzstorage_shares(StrOpt) File with the list of available vzstorage shares.
[DEFAULT] vzstorage_sparsed_volumes = True(BoolOpt) Create volumes as sparsed files which take no space rather than regular files when using raw format, in which case volume creation takes lot of time.
[DEFAULT] vzstorage_used_ratio = 0.95(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination.
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] xtremio_array_busy_retry_count = 5(IntOpt) Number of retries in case array is busy
[DEFAULT] xtremio_array_busy_retry_interval = 5(IntOpt) Interval between retries in case array is busy
[DEFAULT] zfssa_cache_directory = os-cinder-cache(StrOpt) Name of directory inside zfssa_nfs_share where cache volumes are stored.
[DEFAULT] zfssa_cache_project = os-cinder-cache(StrOpt) Name of ZFSSA project where cache volumes are stored.
[DEFAULT] zfssa_enable_local_cache = True(BoolOpt) Flag to enable local caching: True, False.
[DEFAULT] zfssa_initiator = (StrOpt) iSCSI initiator IQNs. (comma separated)
[DEFAULT] zfssa_initiator_config = (StrOpt) iSCSI initiators configuration.
[DEFAULT] zfssa_initiator_group = (StrOpt) iSCSI initiator group.
[DEFAULT] zfssa_initiator_password = (StrOpt) Secret of the iSCSI initiator CHAP user.
[DEFAULT] zfssa_initiator_user = (StrOpt) iSCSI initiator CHAP user (name).
[DEFAULT] zfssa_lun_compression = off(StrOpt) Data compression.
[DEFAULT] zfssa_lun_logbias = latency(StrOpt) Synchronous write bias.
[DEFAULT] zfssa_lun_sparse = False(BoolOpt) Flag to enable sparse (thin-provisioned): True, False.
[DEFAULT] zfssa_lun_volblocksize = 8k(StrOpt) Block size.
[DEFAULT] zfssa_pool = None(StrOpt) Storage pool name.
[DEFAULT] zfssa_project = None(StrOpt) Project name.
[DEFAULT] zfssa_replication_ip = (StrOpt) IP address used for replication data. (maybe the same as data ip)
[DEFAULT] zfssa_target_group = tgt-grp(StrOpt) iSCSI target group name.
[DEFAULT] zfssa_target_interfaces = None(StrOpt) Network interfaces of iSCSI targets. (comma separated)
[DEFAULT] zfssa_target_password = (StrOpt) Secret of the iSCSI target CHAP user.
[DEFAULT] zfssa_target_portal = None(StrOpt) iSCSI target portal (Data-IP:Port, w.x.y.z:3260).
[DEFAULT] zfssa_target_user = (StrOpt) iSCSI target CHAP user (name).
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_policy] policy_default_rule = default(StrOpt) Default rule. Enforced when a requested rule is not found.
[oslo_policy] policy_dirs = ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
[oslo_policy] policy_file = policy.json(StrOpt) The JSON file that defines policies.
[oslo_versionedobjects] fatal_exception_format_errors = False(BoolOpt) Make exception message format errors fatal
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] auth_strategynoauthkeystone
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] storwize_svc_npiv_compatibility_modeFalseTrue
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[DEFAULT] vmware_volume_foldercinder-volumesVolumes
[DEFAULT] volume_drivercinder.volume.drivers.lvm.LVMISCSIDrivercinder.volume.drivers.lvm.LVMVolumeDriver
[matchmaker_redis] passwordNone
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] use_syslogNone
[DEFAULT] osapi_max_request_body_size[oslo_middleware] max_request_body_size
[DEFAULT] eqlx_chap_password[DEFAULT] chap_password
[DEFAULT] datera_api_tokenNone
[DEFAULT] glusterfs_sparsed_volumes[DEFAULT] nas_volume_prov_type
[DEFAULT] glusterfs_qcow2_volumes[DEFAULT] nas_volume_prov_type
[DEFAULT] eqlx_use_chap[DEFAULT] use_chap_auth
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[DEFAULT] enable_v1_apiNone
[DEFAULT] netapp_volume_list[DEFAULT] netapp_pool_name_search_pattern
[DEFAULT] netapp_storage_pools[DEFAULT] netapp_pool_name_search_pattern
[DEFAULT] host[DEFAULT] backend_host
[DEFAULT] netapp_eseries_host_type[DEFAULT] netapp_host_type
[DEFAULT] eqlx_chap_login[DEFAULT] chap_username
[DEFAULT] log_formatNone
[DEFAULT] storage_vnx_pool_name[DEFAULT] storage_vnx_pool_names
-
diff --git a/doc/config-reference/conf-changes/glance.xml b/doc/config-reference/conf-changes/glance.xml deleted file mode 100644 index b097487559..0000000000 --- a/doc/config-reference/conf-changes/glance.xml +++ /dev/null @@ -1,279 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for OpenStack Image service - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] enable_v3_api = False(BoolOpt) Deploy the v3 OpenStack Objects API.
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] max_request_id_length = 64(IntOpt) Limits request ID length.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] scrub_pool_size = 1(IntOpt) The size of thread pool to be used for scrubbing images. The default is one, which signifies serial scrubbing. Any value above one indicates the max number of images that may be scrubbed in parallel.
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[glance_store] rados_connect_timeout = 0(IntOpt) Timeout value (in seconds) used when connecting to ceph cluster. If value <= 0, no timeout is set and default librados value is used.
[glance_store] s3_store_enable_proxy = False(BoolOpt) Enable the use of a proxy.
[glance_store] s3_store_proxy_host = None(StrOpt) Address or hostname for the proxy server.
[glance_store] s3_store_proxy_password = None(StrOpt) The password to use when connecting over a proxy.
[glance_store] s3_store_proxy_port = 8080(IntOpt) The port to use when connecting over a proxy.
[glance_store] s3_store_proxy_user = None(StrOpt) The username to connect to the proxy.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] max_request_body_size = 114688(IntOpt) The maximum body size for each request, in bytes.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] allowed_rpc_exception_modulesopenstack.common.exception, glance.common.exception, exceptionsglance.common.exception, exceptions
[DEFAULT] client_socket_timeout0900
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] digest_algorithmsha1sha256
[DEFAULT] hostlocalhost127.0.0.1
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[matchmaker_redis] passwordNone
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[glance_store] vmware_datacenter_pathNone
[DEFAULT] log_formatNone
[DEFAULT] use_syslogNone
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[glance_store] vmware_datastore_nameNone
-
diff --git a/doc/config-reference/conf-changes/heat.xml b/doc/config-reference/conf-changes/heat.xml deleted file mode 100644 index 54f522d71a..0000000000 --- a/doc/config-reference/conf-changes/heat.xml +++ /dev/null @@ -1,334 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for Orchestration - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] encrypt_parameters_and_properties = False(BoolOpt) Encrypt template parameters that were marked as hidden and also all the resource properties before storing them in database.
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] hidden_stack_tags = data-processing-cluster(ListOpt) Stacks containing these tag names will be hidden. Multiple tags should be given in a comma-delimited list (eg. hidden_stack_tags=hide_me,me_too).
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[cache] backend = dogpile.cache.null(StrOpt) Dogpile.cache backend module. It is recommended that Memcache with pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be used in production deployments. Small workloads (single process) like devstack can use the dogpile.cache.memory backend.
[cache] backend_argument = [](MultiStrOpt) Arguments supplied to the backend module. Specify this option once per argument to be passed to the dogpile.cache backend. Example format: "<argname>:<value>".
[cache] config_prefix = cache.oslo(StrOpt) Prefix for building the configuration dictionary for the cache region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name.
[cache] debug_cache_backend = False(BoolOpt) Extra debugging from the cache backend (cache keys, get/set/delete/etc calls). This is only really useful if you need to see the specific cache-backend get/set/delete calls with the keys/values. Typically this should be left set to false.
[cache] enabled = False(BoolOpt) Global toggle for caching.
[cache] expiration_time = 600(IntOpt) Default TTL, in seconds, for any cached item in the dogpile.cache region. This applies to any cached method that doesn't have an explicit cache expiration time defined for it.
[cache] memcache_dead_retry = 300(IntOpt) Number of seconds memcached server is considered dead before it is tried again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
[cache] memcache_pool_connection_get_timeout = 10(IntOpt) Number of seconds that an operation will wait to get a memcache client connection.
[cache] memcache_pool_maxsize = 10(IntOpt) Max total number of open connections to every memcached server. (oslo_cache.memcache_pool backend only).
[cache] memcache_pool_unused_timeout = 60(IntOpt) Number of seconds a connection to memcached is held unused in the pool before it is closed. (oslo_cache.memcache_pool backend only).
[cache] memcache_servers = localhost:11211(ListOpt) Memcache servers in the format of "host:port". (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
[cache] memcache_socket_timeout = 3(IntOpt) Timeout in seconds for every call to a server. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
[cache] proxies = (ListOpt) Proxy classes to import that will affect the way the dogpile.cache backend functions. See the dogpile.cache documentation on changing-backend-behavior.
[clients_keystone] auth_uri = (StrOpt) Unversioned keystone url in format like http://0.0.0.0:5000.
[constraint_validation_cache] caching = True(BoolOpt) Toggle to enable/disable caching when Orchestration Engine validates property constraints of stack.During property validation with constraints Orchestration Engine caches requests to other OpenStack services. Please note that the global toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use this feature.
[constraint_validation_cache] expiration_time = 60(IntOpt) TTL, in seconds, for any cached item in the dogpile.cache region used for caching of validation constraints.
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[heat_api] tcp_keepidle = 600(IntOpt) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes.
[heat_api_cfn] tcp_keepidle = 600(IntOpt) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes.
[heat_api_cloudwatch] tcp_keepidle = 600(IntOpt) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_policy] policy_default_rule = default(StrOpt) Default rule. Enforced when a requested rule is not found.
[oslo_policy] policy_dirs = ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
[oslo_policy] policy_file = policy.json(StrOpt) The JSON file that defines policies.
[oslo_versionedobjects] fatal_exception_format_errors = False(BoolOpt) Make exception message format errors fatal
[trustee] auth_plugin = None(StrOpt) Name of the plugin to load
[trustee] auth_section = None(StrOpt) Config Section from which to load plugin specific options
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] enable_cloud_watch_liteTrueFalse
[DEFAULT] heat_waitcondition_server_urlNone
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[heat_api] workers08
[matchmaker_redis] passwordNone
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[DEFAULT] log_formatNone
[DEFAULT] use_syslogNone
-
diff --git a/doc/config-reference/conf-changes/ironic.xml b/doc/config-reference/conf-changes/ironic.xml deleted file mode 100644 index 47ff43b153..0000000000 --- a/doc/config-reference/conf-changes/ironic.xml +++ /dev/null @@ -1,356 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for Bare metal service - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[agent] manage_agent_boot = True(BoolOpt) Whether Ironic will manage booting of the agent ramdisk. If set to False, you will need to configure your mechanism to allow booting the agent ramdisk.
[agent] memory_consumed_by_agent = 0(IntOpt) The memory size in MiB consumed by agent when it is booted on a bare metal node. This is used for checking if the image can be downloaded and deployed on the bare metal node after booting agent ramdisk. This may be set according to the memory consumed by the agent ramdisk image.
[agent] post_deploy_get_power_state_retries = 6(IntOpt) Number of times to retry getting power state to check if bare metal node has been powered off after a soft power off.
[agent] post_deploy_get_power_state_retry_interval = 5(IntOpt) Amount of time (in seconds) to wait between polling power state after trigger soft poweroff.
[api] public_endpoint = None(StrOpt) Public URL to use when building the links to the API resources (for example, "https://ironic.rocks:6384"). If None the links will be built using the request's host URL. If the API is operating behind a proxy, you will want to change this to represent the proxy's URL. Defaults to None.
[cimc] action_interval = 10(IntOpt) Amount of time in seconds to wait in between power operations
[cimc] max_retry = 6(IntOpt) Number of times a power operation needs to be retried
[cisco_ucs] action_interval = 5(IntOpt) Amount of time in seconds to wait in between power operations
[cisco_ucs] max_retry = 6(IntOpt) Number of times a power operation needs to be retried
[conductor] clean_callback_timeout = 1800(IntOpt) Timeout (seconds) to wait for a callback from the ramdisk doing the cleaning. If the timeout is reached the node will be put in the "clean failed" provision state. Set to 0 to disable timeout.
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[deploy] erase_devices_iterations = 1(IntOpt) Number of iterations to be run for erasing devices.
[deploy] erase_devices_priority = None(IntOpt) Priority to run in-band erase devices via the Ironic Python Agent ramdisk. If unset, will use the priority set in the ramdisk (defaults to 10 for the GenericHardwareManager). If set to 0, will not run during cleaning.
[deploy] http_root = /httpboot(StrOpt) ironic-conductor node's HTTP root path.
[deploy] http_url = None(StrOpt) ironic-conductor node's HTTP server URL. Example: http://192.1.2.3:8080
[drac] client_retry_count = 5(IntOpt) In case there is a communication failure, the DRAC client is going to resend the request as many times as defined in this setting.
[drac] client_retry_delay = 5(IntOpt) In case there is a communication failure, the DRAC client is going to wait for as many seconds as defined in this setting before resending the request.
[iboot] max_retry = 3(IntOpt) Maximum retries for iBoot operations
[iboot] retry_interval = 1(IntOpt) Time between retry attempts for iBoot operations
[ilo] use_web_server_for_images = False(BoolOpt) Set this to True to use http web server to host floppy images and generated boot ISO. This requires http_root and http_url to be configured in the [deploy] section of the config file. If this is set to False, then Ironic will use Swift to host the floppy images and generated boot_iso.
[inspector] enabled = False(BoolOpt) whether to enable inspection using ironic-inspector
[inspector] service_url = None(StrOpt) ironic-inspector HTTP endpoint. If this is not set, the ironic-inspector client default (http://127.0.0.1:5050) will be used.
[inspector] status_check_period = 60(IntOpt) period (in seconds) to check status of nodes on inspection
[irmc] remote_image_server = None(StrOpt) IP of remote image server
[irmc] remote_image_share_name = share(StrOpt) share name of remote_image_server
[irmc] remote_image_share_root = /remote_image_share_root(StrOpt) Ironic conductor node's "NFS" or "CIFS" root path
[irmc] remote_image_share_type = CIFS(StrOpt) Share type of virtual media, either "NFS" or "CIFS"
[irmc] remote_image_user_domain = (StrOpt) Domain name of remote_image_user_name
[irmc] remote_image_user_name = None(StrOpt) User name of remote_image_server
[irmc] remote_image_user_password = None(StrOpt) Password of remote_image_user_name
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] max_request_body_size = 114688(IntOpt) The maximum body size for each request, in bytes.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_versionedobjects] fatal_exception_format_errors = False(BoolOpt) Make exception message format errors fatal
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] tempdirNone/tmp
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[matchmaker_redis] passwordNone
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[agent] agent_pxe_append_paramsNone
[agent] agent_erase_devices_priority[deploy] erase_devices_priority
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[DEFAULT] log_formatNone
[agent] agent_pxe_config_templateNone
[DEFAULT] use_syslogNone
[agent] manage_tftp[agent] manage_agent_boot
-
diff --git a/doc/config-reference/conf-changes/keystone.xml b/doc/config-reference/conf-changes/keystone.xml deleted file mode 100644 index 3724b81b5d..0000000000 --- a/doc/config-reference/conf-changes/keystone.xml +++ /dev/null @@ -1,336 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for OpenStack Identity - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] host = 127.0.0.1(StrOpt) Host to locate redis.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[endpoint_policy] enabled = True(BoolOpt) Enable endpoint_policy functionality.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[tokenless_auth] issuer_attribute = SSL_CLIENT_I_DN(StrOpt) The issuer attribute that is served as an IdP ID for the X.509 tokenless authorization along with the protocol to look up its corresponding mapping. It is the environment variable in the WSGI environment that references to the issuer of the client certificate.
[tokenless_auth] protocol = x509(StrOpt) The protocol name for the X.509 tokenless authorization along with the option issuer_attribute below can look up its corresponding mapping.
[tokenless_auth] trusted_issuer = [](MultiStrOpt) The list of trusted issuers to further filter the certificates that are allowed to participate in the X.509 tokenless authorization. If the option is absent then no certificates will be allowed. The naming format for the attributes of a Distinguished Name(DN) must be separated by a comma and contain no spaces. This configuration option may be repeated for multiple values. For example: trusted_issuer=CN=john,OU=keystone,O=openstack trusted_issuer=CN=mary,OU=eng,O=abc
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] crypt_strength4000010000
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[auth] externalkeystone.auth.plugins.external.DefaultDomainNone
[auth] oauth1keystone.auth.plugins.oauth1.OAuthNone
[auth] passwordkeystone.auth.plugins.password.PasswordNone
[auth] tokenkeystone.auth.plugins.token.TokenNone
[catalog] driverkeystone.catalog.backends.sql.Catalogsql
[credential] driverkeystone.credential.backends.sql.Credentialsql
[domain_config] driverkeystone.resource.config_backends.sql.DomainConfigsql
[endpoint_filter] driverkeystone.contrib.endpoint_filter.backends.sql.EndpointFiltersql
[endpoint_policy] driverkeystone.contrib.endpoint_policy.backends.sql.EndpointPolicysql
[federation] driverkeystone.contrib.federation.backends.sql.Federationsql
[identity] driverkeystone.identity.backends.sql.Identitysql
[identity_mapping] driverkeystone.identity.mapping_backends.sql.Mappingsql
[identity_mapping] generatorkeystone.identity.id_generators.sha256.Generatorsha256
[ldap] user_attribute_ignoredefault_project_id, tenantsdefault_project_id
[matchmaker_redis] passwordNone
[oauth1] driverkeystone.contrib.oauth1.backends.sql.OAuth1sql
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
[policy] driverkeystone.policy.backends.sql.Policysql
[revoke] driverkeystone.contrib.revoke.backends.sql.Revokesql
[token] driverkeystone.token.persistence.backends.sql.Tokensql
[token] providerkeystone.token.providers.uuid.Provideruuid
[trust] driverkeystone.trust.backends.sql.Trustsql
- - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] use_syslogNone
[DEFAULT] log_formatNone
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
-
diff --git a/doc/config-reference/conf-changes/manila.xml b/doc/config-reference/conf-changes/manila.xml deleted file mode 100644 index 18675065d5..0000000000 --- a/doc/config-reference/conf-changes/manila.xml +++ /dev/null @@ -1,435 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for Shared File Systems service - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] client_socket_timeout = 900(IntOpt) Timeout for client connections socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever.
[DEFAULT] emc_nas_root_dir = None(StrOpt) The root directory where shares will be located.
[DEFAULT] enable_periodic_hooks = False(BoolOpt) Whether to enable periodic hooks or not.
[DEFAULT] enable_post_hooks = False(BoolOpt) Whether to enable post hooks or not.
[DEFAULT] enable_pre_hooks = False(BoolOpt) Whether to enable pre hooks or not.
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] glusterfs_share_layout = None(StrOpt) Specifies GlusterFS share layout, that is, the method of associating backing GlusterFS resources to shares.
[DEFAULT] hds_hnas_cluster_admin_ip0 = None(StrOpt) The IP of the clusters admin node. Only set in HNAS multinode clusters.
[DEFAULT] hds_hnas_evs_id = None(StrOpt) Specify which EVS this backend is assigned to.
[DEFAULT] hds_hnas_evs_ip = None(StrOpt) Specify IP for mounting shares.
[DEFAULT] hds_hnas_file_system_name = None(StrOpt) Specify file-system name for creating shares.
[DEFAULT] hds_hnas_ip = None(StrOpt) HNAS management interface IP for communication between Manila controller and HNAS.
[DEFAULT] hds_hnas_password = None(StrOpt) HNAS user password. Required only if private key is not provided.
[DEFAULT] hds_hnas_ssh_private_key = None(StrOpt) RSA/DSA private key value used to connect into HNAS. Required only if password is not provided.
[DEFAULT] hds_hnas_stalled_job_timeout = 30(IntOpt) The time (in seconds) to wait for stalled HNAS jobs before aborting.
[DEFAULT] hds_hnas_user = None(StrOpt) HNAS username Base64 String in order to perform tasks such as create file-systems and network interfaces.
[DEFAULT] hook_drivers = (ListOpt) Driver(s) to perform some additional actions before and after share driver actions and on a periodic basis. Default is [].
[DEFAULT] max_over_subscription_ratio = 20.0(FloatOpt) Float representation of the over subscription ratio when thin provisioning is involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times the total physical capacity. If the ratio is 10.5, it means provisioned capacity can be 10.5 times the total physical capacity. A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. A ratio lower than 1.0 is invalid.
[DEFAULT] max_time_to_extend_volume = 180(IntOpt) Maximum time to wait for extending cinder volume.
[DEFAULT] migration_create_delete_share_timeout = 300(IntOpt) Timeout for creating and deleting share instances when performing share migration (seconds).
[DEFAULT] migration_data_copy_node_ip = None(StrOpt) The IP of the node responsible for copying data during migration, such as the data copy service node, reachable by the backend.
[DEFAULT] migration_ignore_files = lost+found(ListOpt) List of files and folders to be ignored when migrating shares. Items should be names (not including any path).
[DEFAULT] migration_mounting_backend_ip = None(StrOpt) Backend IP in admin network to use for mounting shares during migration.
[DEFAULT] migration_protocol_mount_command = None(StrOpt) The command for mounting shares for this backend. Must specifythe executable and all necessary parameters for the protocol supported. It is advisable to separate protocols per backend.
[DEFAULT] migration_readonly_support = True(BoolOpt) Specify whether read only access mode is supported in thisbackend.
[DEFAULT] migration_tmp_location = /tmp/(StrOpt) Temporary path to create and mount shares during migration.
[DEFAULT] migration_wait_access_rules_timeout = 90(IntOpt) Time to wait for access rules to be allowed/denied on backends when migrating shares using generic approach (seconds).
[DEFAULT] nova_api_microversion = 2.10(StrOpt) Version of Nova API to be used.
[DEFAULT] osapi_share_workers = 1(IntOpt) Number of workers for OpenStack Share API service.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] periodic_hooks_interval = 300.0(FloatOpt) Interval in seconds between execution of periodic hooks. Used when option 'enable_periodic_hooks' is set to True. Default is 300.
[DEFAULT] pool_weight_multiplier = 1.0(FloatOpt) Multiplier used for weighing pools which have existing share servers. Negative numbers mean to spread vs stack.
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] suppress_post_hooks_errors = False(BoolOpt) Whether to suppress post hook errors (allow driver's results to pass through) or not.
[DEFAULT] suppress_pre_hooks_errors = False(BoolOpt) Whether to suppress pre hook errors (allow driver perform actions) or not.
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] winrm_cert_key_pem_path = ~/.ssl/key.pem(StrOpt) Path to the x509 certificate key.
[DEFAULT] winrm_cert_pem_path = ~/.ssl/cert.pem(StrOpt) Path to the x509 certificate used for accessing the serviceinstance.
[DEFAULT] winrm_conn_timeout = 60(IntOpt) WinRM connection timeout.
[DEFAULT] winrm_operation_timeout = 60(IntOpt) WinRM operation timeout.
[DEFAULT] winrm_retry_count = 3(IntOpt) WinRM retry count.
[DEFAULT] winrm_retry_interval = 5(IntOpt) WinRM retry interval in seconds
[DEFAULT] winrm_use_cert_based_auth = False(BoolOpt) Use x509 certificates in order to authenticate to theservice instance.
[DEFAULT] wsgi_keep_alive = True(BoolOpt) If False, closes the client socket connection explicitly. Setting it to True to maintain backward compatibility. Recommended setting is set it to False.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] max_request_body_size = 114688(IntOpt) The maximum body size for each request, in bytes.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_policy] policy_default_rule = default(StrOpt) Default rule. Enforced when a requested rule is not found.
[oslo_policy] policy_dirs = ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
[oslo_policy] policy_file = policy.json(StrOpt) The JSON file that defines policies.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] enable_v1_apiTrueFalse
[DEFAULT] enable_v2_apiTrueFalse
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] osapi_share_listen0.0.0.0::
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] scheduler_default_filtersAvailabilityZoneFilter, CapacityFilter, CapabilitiesFilterAvailabilityZoneFilter, CapacityFilter, CapabilitiesFilter, ConsistencyGroupFilter
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[matchmaker_redis] passwordNone
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] glusterfs_native_server_password[DEFAULT] glusterfs_server_password
[DEFAULT] sql_max_retries[database] max_retries
[DEFAULT] sql_retry_interval[database] retry_interval
[DEFAULT] use_syslogNone
[DEFAULT] osapi_max_request_body_size[oslo_middleware] max_request_body_size
[DEFAULT] glusterfs_native_path_to_private_key[DEFAULT] glusterfs_path_to_private_key
[DEFAULT] sql_idle_timeout[database] idle_timeout
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[DEFAULT] db_backend[database] backend
[DEFAULT] log_formatNone
-
diff --git a/doc/config-reference/conf-changes/neutron.xml b/doc/config-reference/conf-changes/neutron.xml deleted file mode 100644 index fefc54e1f4..0000000000 --- a/doc/config-reference/conf-changes/neutron.xml +++ /dev/null @@ -1,402 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for OpenStack Networking - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] dns_domain = openstacklocal(StrOpt) Domain to use for building the hostnames
[DEFAULT] dnsmasq_base_log_dir = None(StrOpt) Base log dir for dnsmasq logging. The log contains DHCP and DNS log information and is useful for debugging issues with either DHCP or DNS. If this section is null, disable dnsmasq log.
[DEFAULT] enable_new_agents = True(BoolOpt) Agent starts with admin_state_up=False when enable_new_agents=False. In the case, user's resources will not be scheduled automatically to the agent until admin changes admin_state_up to True.
[DEFAULT] enable_snat_by_default = True(BoolOpt) Define the default value of enable_snat if not provided in external_gateway_info.
[DEFAULT] endpoint_url = None(StrOpt) Neutron endpoint URL, if not set will use endpoint from the keystone catalog along with endpoint_type
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] force_metadata = False(BoolOpt) Force to use DHCP to get Metadata on all networks.
[DEFAULT] ipam_driver = None(StrOpt) IPAM driver to use.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] pd_confs = $state_path/pd(StrOpt) Location to store IPv6 PD files.
[DEFAULT] pd_dhcp_driver = dibbler(StrOpt) Service to handle DHCPv6 Prefix delegation.
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] prefix_delegation_driver = dibbler(StrOpt) Driver used for ipv6 prefix delegation. This needs to be an entry point defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for entry points included with the neutron source.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] vendor_pen = 8888(StrOpt) A decimal value as Vendor's Registered Private Enterprise Number as required by RFC3315 DUID-EN.
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[AGENT] agent_type = Open vSwitch agent(StrOpt) Selects the Agent Type reported
[AGENT] drop_flows_on_start = False(BoolOpt) Reset flow table on start. Setting this to True will cause brief traffic interruption.
[AGENT] log_agent_heartbeats = False(BoolOpt) Log agent heartbeats
[AGENT] tunnel_csum = False(BoolOpt) Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/VXLAN tunnel.
[LINUX_BRIDGE] bridge_mappings = (ListOpt) List of <physical_network>:<physical_bridge>
[OVS] datapath_type = system(StrOpt) OVS datapath to use.
[OVS] of_connect_timeout = 30(IntOpt) Timeout in seconds to wait for the local switch connecting the controller. Used only for 'native' driver.
[OVS] of_interface = ovs-ofctl(StrOpt) OpenFlow interface to use.
[OVS] of_listen_address = 127.0.0.1(IPOpt) Address to listen on for OpenFlow connections. Used only for 'native' driver.
[OVS] of_listen_port = 6633(IntOpt) Port to listen on for OpenFlow connections. Used only for 'native' driver.
[OVS] of_request_timeout = 10(IntOpt) Timeout in seconds to wait for a single OpenFlow request. Used only for 'native' driver.
[QUOTAS] quota_rbac_entry = 10(IntOpt) Default number of RBAC entries allowed per tenant. A negative value means unlimited.
[QUOTAS] track_quota_usage = True(BoolOpt) Keep in track in the database of current resourcequota usage. Plugins which do not leverage the neutron database should set this flag to False
[agent] extensions = (ListOpt) Extensions list to use
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[ml2] external_network_type = None(StrOpt) Default network type for external networks when no provider attributes are specified. By default it is None, which means that if provider attributes are not specified while creating external networks then they will have the same type as tenant networks. Allowed values for external_network_type config option depend on the network type values configured in type_drivers config option.
[ml2_type_geneve] max_header_size = 50(IntOpt) Geneve encapsulation header size is dynamic, this value is used to calculate the maximum MTU for the driver.this is the sum of the sizes of the outer ETH + IP + UDP + GENEVE header sizes
[ml2_type_geneve] vni_ranges = (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of Geneve VNI IDs that are available for tenant network allocation
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_policy] policy_default_rule = default(StrOpt) Default rule. Enforced when a requested rule is not found.
[oslo_policy] policy_dirs = ['policy.d'](MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored.
[oslo_policy] policy_file = policy.json(StrOpt) The JSON file that defines policies.
[oslo_versionedobjects] fatal_exception_format_errors = False(BoolOpt) Make exception message format errors fatal
[qos] notification_drivers = message_queue(ListOpt) Drivers list to use to send the update notification
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] api_workers0None
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] dhcp_delete_namespacesFalseTrue
[DEFAULT] endpoint_typepublicURLadminURL
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] network_scheduler_driverneutron.scheduler.dhcp_agent_scheduler.ChanceSchedulerneutron.scheduler.dhcp_agent_scheduler.WeightScheduler
[DEFAULT] router_delete_namespacesFalseTrue
[DEFAULT] router_scheduler_driverneutron.scheduler.l3_agent_scheduler.ChanceSchedulerneutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
[DEFAULT] rpc_workers01
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[AGENT] prevent_arp_spoofingFalseTrue
[QUOTAS] quota_driverneutron.db.quota_db.DbQuotaDriverneutron.db.quota.driver.DbQuotaDriver
[matchmaker_redis] passwordNone
[ml2] type_driverslocal, flat, vlan, gre, vxlanlocal, flat, vlan, gre, vxlan, geneve
[ml2_sriov] agent_requiredFalseTrue
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] use_syslogNone
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[ml2_sriov] agent_requiredNone
[DEFAULT] log_formatNone
-
diff --git a/doc/config-reference/conf-changes/nova.xml b/doc/config-reference/conf-changes/nova.xml deleted file mode 100644 index 2bdc36e169..0000000000 --- a/doc/config-reference/conf-changes/nova.xml +++ /dev/null @@ -1,478 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for OpenStack Compute - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] console_allowed_origins = (ListOpt) Allowed Origin header hostnames for access to console proxy servers
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] max_concurrent_live_migrations = 1(IntOpt) Maximum number of live migrations to run concurrently. This limit is enforced to avoid outbound live migrations overwhelming the host/network and causing failures. It is not recommended that you change this unless you are very sure that doing so is safe and stable in your environment.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] secure_proxy_ssl_header = None(StrOpt) The HTTP header used to determine the scheme for the original request, even if it was removed by an SSL terminating proxy. Typical value is "HTTP_X_FORWARDED_PROTO".
[DEFAULT] update_resources_interval = 0(IntOpt) Interval in seconds for updating compute resources. A number less than 0 means to disable the task completely. Leaving this at the default of 0 will cause this to run at the default periodic interval. Setting it to any positive value will cause it to run at approximately that number of seconds.
[DEFAULT] use_rootwrap_daemon = False(BoolOpt) Start and use a daemon that can run the commands that need to be run with root privileges. This option is usually enabled on nodes that run nova compute processes
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[hyperv] power_state_check_timeframe = 60(IntOpt) The timeframe to be checked for instance power state changes.
[hyperv] power_state_event_polling_interval = 2(IntOpt) Instance power state change event polling frequency.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[libvirt] live_migration_completion_timeout = 800(IntOpt) Time to wait, in seconds, for migration to successfully complete transferring data before aborting the operation. Value is per GiB of guest RAM + disk to be transferred, with lower bound of a minimum of 2 GiB. Should usually be larger than downtime delay * downtime steps. Set to 0 to disable timeouts.
[libvirt] live_migration_downtime = 500(IntOpt) Maximum permitted downtime, in milliseconds, for live migration switchover. Will be rounded up to a minimum of 100ms. Use a large value if guest liveness is unimportant.
[libvirt] live_migration_downtime_delay = 75(IntOpt) Time to wait, in seconds, between each step increase of the migration downtime. Minimum delay is 10 seconds. Value is per GiB of guest RAM + disk to be transferred, with lower bound of a minimum of 2 GiB per device
[libvirt] live_migration_downtime_steps = 10(IntOpt) Number of incremental steps to reach max downtime value. Will be rounded up to a minimum of 3 steps
[libvirt] live_migration_progress_timeout = 150(IntOpt) Time to wait, in seconds, for migration to make forward progress in transferring data before aborting the operation. Set to 0 to disable timeouts.
[libvirt] remote_filesystem_transport = ssh(StrOpt) Use ssh or rsync transport for creating, copying, removing files on the remote host.
[mks] enabled = False(BoolOpt) Enable MKS related features
[mks] mksproxy_base_url = http://127.0.0.1:6090/(StrOpt) Location of MKS web console proxy, in the form "http://127.0.0.1:6090/"
[osapi_v21] enabled = True(BoolOpt) DEPRECATED: Whether the V2.1 API is enabled or not. This option will be removed in the near future.
[osapi_v21] extensions_blacklist = (ListOpt) DEPRECATED: A list of v2.1 API extensions to never load. Specify the extension aliases here. This option will be removed in the near future. After that point you have to run all of the API.
[osapi_v21] extensions_whitelist = (ListOpt) DEPRECATED: If the list is not empty then a v2.1 API extension will only be loaded if it exists in this list. Specify the extension aliases here. This option will be removed in the near future. After that point you have to run all of the API.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[oslo_versionedobjects] fatal_exception_format_errors = False(BoolOpt) Make exception message format errors fatal
[vmware] ca_file = None(StrOpt) Specify a CA bundle file to use in verifying the vCenter server certificate.
[vmware] console_delay_seconds = None(IntOpt) Set this value if affected by an increased network latency causing repeated characters when typing in a remote console.
[vmware] insecure = False(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "ca_file" is set.
[vmware] serial_port_proxy_uri = None(StrOpt) Identifies a proxy service that provides network access to the serial_port_service_uri. This option is ignored if serial_port_service_uri is not specified.
[vmware] serial_port_service_uri = None(StrOpt) Identifies the remote system that serial port traffic will be sent to. If this is not set, no serial ports will be added to the created VMs.
[vnc] enabled = True(BoolOpt) Enable VNC related features
[vnc] keymap = en-us(StrOpt) Keymap for VNC
[vnc] novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html(StrOpt) Location of VNC console proxy, in the form "http://127.0.0.1:6080/vnc_auto.html"
[vnc] vncserver_listen = 127.0.0.1(StrOpt) IP address on which instance vncservers should listen
[vnc] vncserver_proxyclient_address = 127.0.0.1(StrOpt) The address to which proxy clients (like nova-xvpvncproxy) should connect
[vnc] xvpvncproxy_base_url = http://127.0.0.1:6081/console(StrOpt) Location of nova xvp VNC console proxy, in the form "http://127.0.0.1:6081/console"
[workarounds] handle_virt_lifecycle_events = True(BoolOpt) Whether or not to handle events raised from the compute driver's 'emit_event' method. These are lifecycle events raised from compute drivers that implement the method. An example of a lifecycle event is an instance starting or stopping. If the instance is going through task state changes due to an API operation, like resize, the events are ignored. However, this is an advanced feature which allows the hypervisor to signal to the compute service that an unexpected state change has occurred in an instance and the instance can be shutdown automatically - which can inherently race in reboot operations or when the compute service or host is rebooted, either planned or due to an unexpected outage. Care should be taken when using this and sync_power_state_interval is negative since then if any instances are out of sync between the hypervisor and the Nova database they will have to be synchronized manually. See https://bugs.launchpad.net/bugs/1444630
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] compute_available_monitors['nova.compute.monitors.all_monitors']None
[DEFAULT] cpu_allocation_ratio16.00.0
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] osapi_compute_extension['nova.api.openstack.compute.contrib.standard_extensions']['nova.api.openstack.compute.legacy_v2.contrib.standard_extensions']
[DEFAULT] ram_allocation_ratio1.50.0
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] scheduler_default_filtersRetryFilter, AvailabilityZoneFilter, RamFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilterRetryFilter, AvailabilityZoneFilter, RamFilter, DiskFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[cells] mute_weight_multiplier-10.0-10000.0
[libvirt] remove_unused_kernelsFalseTrue
[matchmaker_redis] passwordNone
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] network_device_mtuNone
[DEFAULT] vnc_keymap[vnc] keymap
[osapi_v21] extensions_whitelistNone
[ironic] admin_auth_tokenNone
[DEFAULT] vnc_enabled[vnc] enabled
[DEFAULT] use_syslogNone
[DEFAULT] xvpvncproxy_base_url[vnc] xvpvncproxy_base_url
[ironic] client_log_levelNone
[neutron] admin_usernameNone
[DEFAULT] ssl_ca_file[ssl] ca_file
[neutron] auth_strategyNone
[osapi_v21] enabledNone
[DEFAULT] novncproxy_base_url[vnc] novncproxy_base_url
[DEFAULT] compute_available_monitorsNone
[neutron] admin_user_idNone
[neutron] admin_tenant_idNone
[DEFAULT] ssl_cert_file[ssl] cert_file
[DEFAULT] log_formatNone
[DEFAULT] vncserver_proxyclient_address[vnc] vncserver_proxyclient_address
[osapi_v21] extensions_blacklistNone
[workarounds] destroy_after_evacuateNone
[neutron] admin_tenant_nameNone
[DEFAULT] osapi_compute_ext_listNone
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[DEFAULT] vncserver_listen[vnc] vncserver_listen
[neutron] admin_passwordNone
[DEFAULT] share_dhcp_addressNone
[DEFAULT] ssl_key_file[ssl] key_file
[libvirt] remove_unused_kernelsNone
[neutron] admin_auth_urlNone
-
diff --git a/doc/config-reference/conf-changes/sahara.xml b/doc/config-reference/conf-changes/sahara.xml deleted file mode 100644 index fc25ad51db..0000000000 --- a/doc/config-reference/conf-changes/sahara.xml +++ /dev/null @@ -1,284 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for Data Processing service - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] cluster_operation_trust_expiration_hours = 24(IntOpt) Defines the period of time (in hours) after which trusts created to allow sahara to create or scale a cluster will expire. Note that this value should be significantly larger than the value of the cleanup_time_for_incomplete_clusters configuration key if use of the cluster cleanup feature is desired (the trust must last at least as long as a cluster could validly take to stall in its creation, plus the timeout value set in that key, plus one hour for the period of the cleanup job).
[DEFAULT] default_ntp_server = pool.ntp.org(StrOpt) Default ntp server for time sync
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] heat_stack_tags = data-processing-cluster(ListOpt) List of tags to be used during operating with stack.
[DEFAULT] host = 127.0.0.1(StrOpt) Host to locate redis.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[cinder] endpoint_type = internalURL(StrOpt) Endpoint type for cinder client requests
[cors] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[cors.subdomain] allow_credentials = True(BoolOpt) Indicate that the actual request can include user credentials
[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which header field names may be used during the actual request.
[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS(ListOpt) Indicate which methods can be used during the actual request.
[cors.subdomain] allowed_origin = None(StrOpt) Indicate whether this resource may be shared with the domain received in the requests "origin" header.
[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers.
[cors.subdomain] max_age = 3600(IntOpt) Maximum cache age of CORS preflight requests.
[heat] endpoint_type = internalURL(StrOpt) Endpoint type for heat client requests
[keystone] endpoint_type = internalURL(StrOpt) Endpoint type for keystone client requests
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[manila] api_insecure = True(BoolOpt) Allow to perform insecure SSL requests to manila.
[manila] api_version = 1(IntOpt) Version of the manila API to use.
[manila] ca_file = None(StrOpt) Location of ca certificates file to use for manila client requests.
[neutron] endpoint_type = internalURL(StrOpt) Endpoint type for neutron client requests
[nova] endpoint_type = internalURL(StrOpt) Endpoint type for nova client requests
[object_store_access] public_identity_ca_file = None(StrOpt) Location of ca certificate file to use for identity client requests via public endpoint
[object_store_access] public_object_store_ca_file = None(StrOpt) Location of ca certificate file to use for object-store client requests via public endpoint
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy.
[retries] retries_number = 5(IntOpt) Number of times to retry the request to client before failing
[retries] retry_after = 10(IntOpt) Time between the retries to client (in seconds).
[swift] endpoint_type = internalURL(StrOpt) Endpoint type for swift client requests
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] default_log_levelsamqplib=WARN, qpid.messaging=INFO, stevedore=INFO, eventlet.wsgi.server=WARN, sqlalchemy=WARN, boto=WARN, suds=INFO, keystone=INFO, paramiko=WARN, requests=WARN, iso8601=WARN, oslo_messaging=INFOamqplib=WARN, qpid.messaging=INFO, stevedore=INFO, eventlet.wsgi.server=WARN, sqlalchemy=WARN, boto=WARN, suds=INFO, keystone=INFO, paramiko=WARN, requests=WARN, iso8601=WARN, oslo_messaging=INFO, neutronclient=INFO
[DEFAULT] infrastructure_enginedirectheat
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[matchmaker_redis] passwordNone
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
- - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[DEFAULT] log_formatNone
[DEFAULT] use_syslogNone
-
diff --git a/doc/config-reference/conf-changes/swift.xml b/doc/config-reference/conf-changes/swift.xml deleted file mode 100644 index c7ca3d45e7..0000000000 --- a/doc/config-reference/conf-changes/swift.xml +++ /dev/null @@ -1,6 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for OpenStack Object Storage - There are no new, updated, and deprecated options in Liberty for OpenStack Object Storage. -
diff --git a/doc/config-reference/conf-changes/trove.xml b/doc/config-reference/conf-changes/trove.xml deleted file mode 100644 index 3e28cb55da..0000000000 --- a/doc/config-reference/conf-changes/trove.xml +++ /dev/null @@ -1,461 +0,0 @@ - -
- - New, updated, and deprecated options in Liberty for Database service - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New options
Option = default value(Type) Help string
[DEFAULT] executor_thread_pool_size = 64(IntOpt) Size of executor thread pool.
[DEFAULT] exists_notification_interval = 3600(IntOpt) Seconds to wait between pushing events.
[DEFAULT] nova_proxy_admin_tenant_id = (StrOpt) Admin tenant ID used to connect to Nova.
[DEFAULT] password = (StrOpt) Password for Redis server (optional).
[DEFAULT] port = 6379(IntOpt) Use this port to connect to redis host.
[DEFAULT] rpc_conn_pool_size = 30(IntOpt) Size of RPC connection pool.
[DEFAULT] rpc_poll_timeout = 1(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired.
[DEFAULT] rpc_zmq_all_req_rep = True(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT.
[DEFAULT] rpc_zmq_concurrency = eventlet(StrOpt) Type of concurrency used. Either "native" or "eventlet"
[DEFAULT] timeout_wait_for_service = 120(IntOpt) Maximum time (in seconds) to wait for a service to become alive.
[DEFAULT] watch_log_file = False(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set.
[DEFAULT] zmq_use_broker = True(BoolOpt) Shows whether zmq-messaging uses broker or not.
[cassandra] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for cassandra.
[couchbase] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for couchbase.
[couchdb] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for couchdb.
[db2] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for db2.
[keystone_authtoken] region_name = None(StrOpt) The region in which the identity server can be found.
[mariadb] backup_incremental_strategy = {'InnoBackupEx': 'InnoBackupExIncremental'}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental backup, the runner will use the default full backup.
[mariadb] backup_namespace = trove.guestagent.strategies.backup.mysql_impl(StrOpt) Namespace to load backup strategies from.
[mariadb] backup_strategy = InnoBackupEx(StrOpt) Default strategy to perform backups.
[mariadb] device_path = /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
[mariadb] mount_point = /var/lib/mysql(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
[mariadb] replication_namespace = trove.guestagent.strategies.replication.mysql_binlog(StrOpt) Namespace to load replication strategies from.
[mariadb] replication_strategy = MysqlBinlogReplication(StrOpt) Default strategy for replication.
[mariadb] restore_namespace = trove.guestagent.strategies.restore.mysql_impl(StrOpt) Namespace to load restore strategies from.
[mariadb] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for mysql.
[mariadb] root_on_create = False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
[mariadb] tcp_ports = 3306(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
[mariadb] udp_ports = (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
[mariadb] usage_timeout = 400(IntOpt) Maximum time (in seconds) to wait for a Guest to become active.
[mariadb] volume_support = True(BoolOpt) Whether to provision a Cinder volume for datadir.
[mongodb] add_members_timeout = 300(IntOpt) Maximum time to wait (in seconds) for a replica set initialization process to complete.
[mongodb] configsvr_port = 27019(IntOpt) Port for instances running as config servers.
[mongodb] ignore_dbs = admin, local, config(ListOpt) Databases to exclude when listing databases.
[mongodb] ignore_users = admin.os_admin, admin.root(ListOpt) Users to exclude when listing users.
[mongodb] mongodb_port = 27017(IntOpt) Port for mongod and mongos instances.
[mongodb] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for mongodb.
[mysql] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for mysql.
[oslo_messaging_amqp] password = (StrOpt) Password for message broker authentication
[oslo_messaging_amqp] sasl_config_dir = (StrOpt) Path to directory that contains the SASL configuration
[oslo_messaging_amqp] sasl_config_name = (StrOpt) Name of configuration file (without .conf suffix)
[oslo_messaging_amqp] sasl_mechanisms = (StrOpt) Space separated list of acceptable SASL mechanisms
[oslo_messaging_amqp] username = (StrOpt) User name for message broker authentication
[oslo_messaging_qpid] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[oslo_messaging_rabbit] kombu_reconnect_timeout = 60(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout.
[oslo_messaging_rabbit] send_single_reply = False(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release.
[percona] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for percona.
[postgresql] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for postgresql.
[pxc] api_strategy = trove.common.strategies.cluster.experimental.pxc.api.PXCAPIStrategy(StrOpt) Class that implements datastore-specific API logic.
[pxc] backup_incremental_strategy = {'InnoBackupEx': 'InnoBackupExIncremental'}(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental backup, the runner will use the default full backup.
[pxc] backup_namespace = trove.guestagent.strategies.backup.mysql_impl(StrOpt) Namespace to load backup strategies from.
[pxc] backup_strategy = InnoBackupEx(StrOpt) Default strategy to perform backups.
[pxc] cluster_support = True(BoolOpt) Enable clusters to be created and managed.
[pxc] device_path = /dev/vdb(StrOpt) Device path for volume if volume support is enabled.
[pxc] guestagent_strategy = trove.common.strategies.cluster.experimental.pxc.guestagent.PXCGuestAgentStrategy(StrOpt) Class that implements datastore-specific Guest Agent API logic.
[pxc] ignore_users = os_admin, root, clusterrepuser(ListOpt) Users to exclude when listing users.
[pxc] min_cluster_member_count = 3(IntOpt) Minimum number of members in PXC cluster.
[pxc] mount_point = /var/lib/mysql(StrOpt) Filesystem path for mounting volumes if volume support is enabled.
[pxc] replication_namespace = trove.guestagent.strategies.replication.mysql_gtid(StrOpt) Namespace to load replication strategies from.
[pxc] replication_strategy = MysqlGTIDReplication(StrOpt) Default strategy for replication.
[pxc] replication_user = slave_user(StrOpt) Userid for replication slave.
[pxc] restore_namespace = trove.guestagent.strategies.restore.mysql_impl(StrOpt) Namespace to load restore strategies from.
[pxc] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for pxc.
[pxc] root_on_create = False(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field.
[pxc] taskmanager_strategy = trove.common.strategies.cluster.experimental.pxc.taskmanager.PXCTaskManagerStrategy(StrOpt) Class that implements datastore-specific task manager logic.
[pxc] tcp_ports = 3306, 4444, 4567, 4568(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
[pxc] udp_ports = (ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True).
[pxc] usage_timeout = 450(IntOpt) Maximum time (in seconds) to wait for a Guest to become active.
[pxc] volume_support = True(BoolOpt) Whether to provision a Cinder volume for datadir.
[redis] api_strategy = trove.common.strategies.cluster.experimental.redis.api.RedisAPIStrategy(StrOpt) Class that implements datastore-specific API logic.
[redis] cluster_support = True(BoolOpt) Enable clusters to be created and managed.
[redis] guestagent_strategy = trove.common.strategies.cluster.experimental.redis.guestagent.RedisGuestAgentStrategy(StrOpt) Class that implements datastore-specific Guest Agent API logic.
[redis] replication_namespace = trove.guestagent.strategies.replication.experimental.redis_sync(StrOpt) Namespace to load replication strategies from.
[redis] root_controller = trove.extensions.common.service.DefaultRootController(StrOpt) Root controller implementation for redis.
[redis] taskmanager_strategy = trove.common.strategies.cluster.experimental.redis.taskmanager.RedisTaskManagerStrategy(StrOpt) Class that implements datastore-specific task manager logic.
[vertica] root_controller = trove.extensions.vertica.service.VerticaRootController(StrOpt) Root controller implementation for Vertica.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
New default values
OptionPrevious default valueNew default value
[DEFAULT] cluster_usage_timeout67536000
[DEFAULT] default_log_levelsamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARNamqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN
[DEFAULT] ignore_dbslost+found, #mysql50#lost+found, mysql, information_schemamysql, information_schema, performance_schema
[DEFAULT] logging_exception_prefix%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
[DEFAULT] notification_service_id{'vertica': 'a8d805ae-a3b2-c4fd-gb23-b62cee5201ae', 'db2': 'e040cd37-263d-4869-aaa6-c62aa97523b5', 'postgresql': 'ac277e0d-4f21-40aa-b347-1ea31e571720', 'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b', 'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415', 'mongodb': 'c8c907af-7375-456f-b929-b637ff9209ee', 'couchdb': 'f0a9ab7b-66f7-4352-93d7-071521d44c7c', 'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0', 'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed'}{'mongodb': 'c8c907af-7375-456f-b929-b637ff9209ee', 'percona': 'fd1723f5-68d2-409c-994f-a4a197892a17', 'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b', 'pxc': '75a628c3-f81b-4ffb-b10a-4087c26bc854', 'db2': 'e040cd37-263d-4869-aaa6-c62aa97523b5', 'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed', 'mariadb': '7a4f82cc-10d2-4bc6-aadc-d9aacc2a3cb5', 'postgresql': 'ac277e0d-4f21-40aa-b347-1ea31e571720', 'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415', 'couchdb': 'f0a9ab7b-66f7-4352-93d7-071521d44c7c', 'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0', 'vertica': 'a8d805ae-a3b2-c4fd-gb23-b62cee5201ae'}
[DEFAULT] report_interval1030
[DEFAULT] rpc_zmq_matchmakerlocalredis
[DEFAULT] usage_timeout600900
[DEFAULT] use_syslog_rfc_formatFalseTrue
[DEFAULT] verboseFalseTrue
[matchmaker_redis] passwordNone
[mongodb] backup_namespaceNonetrove.guestagent.strategies.backup.experimental.mongo_impl
[mongodb] backup_strategyNoneMongoDump
[mongodb] restore_namespaceNonetrove.guestagent.strategies.restore.experimental.mongo_impl
[oslo_messaging_rabbit] heartbeat_timeout_threshold060
[redis] backup_namespaceNonetrove.guestagent.strategies.backup.experimental.redis_impl
[redis] backup_strategyNoneRedisBackup
[redis] replication_strategyNoneRedisSyncReplication
[redis] restore_namespaceNonetrove.guestagent.strategies.restore.experimental.redis_impl
[redis] tcp_ports63796379, 16379
[redis] volume_supportFalseTrue
- - - - - - - - - - - - - - - - - - - - - - -
Deprecated options
Deprecated optionNew Option
[DEFAULT] use_syslogNone
[DEFAULT] rpc_thread_pool_size[DEFAULT] executor_thread_pool_size
[DEFAULT] log_formatNone
-
diff --git a/doc/config-reference/dashboard/section_dashboard-log-files.xml b/doc/config-reference/dashboard/section_dashboard-log-files.xml deleted file mode 100644 index 97f2f54ea7..0000000000 --- a/doc/config-reference/dashboard/section_dashboard-log-files.xml +++ /dev/null @@ -1,38 +0,0 @@ - -
- Dashboard log files - The dashboard is served to users through the Apache web - server (httpd). - As a result, dashboard-related logs appear in files in the - /var/log/httpd or - /var/log/apache2 directory on the - system where the dashboard is hosted. The following table - describes these files: - - - - - - - - - - - - - - - - - - - - -
Dashboard/httpd log files
Log fileDescription
access_logLogs all attempts to access the web server.
error_logLogs all unsuccessful attempts to access the web - server, along with the reason that each attempt - failed.
-
diff --git a/doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml b/doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml deleted file mode 100644 index 621b552e4e..0000000000 --- a/doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml +++ /dev/null @@ -1,34 +0,0 @@ - -
- Additional sample configuration files - Find the following files in /etc/openstack-dashboard. -
- keystone_policy.json - The keystone_policy.json file - defines additional access controls for the dashboard that - apply to the Identity service. - - The keystone_policy.json file - must match the Identity service - /etc/keystone/policy.json - policy file. - - -
-
- nova_policy.json - The nova_policy.json file defines - additional access controls for the dashboard that apply to - the Compute service. - - The nova_policy.json file must - match the Compute /etc/nova/policy.json - policy file. - - -
-
diff --git a/doc/config-reference/database-service/section-databaseservice-db.xml b/doc/config-reference/database-service/section-databaseservice-db.xml deleted file mode 100644 index 68fe24fad8..0000000000 --- a/doc/config-reference/database-service/section-databaseservice-db.xml +++ /dev/null @@ -1,23 +0,0 @@ -
- - Configure the database - - Use the options to configure the used databases: - - - - - - - - - - - - - -
diff --git a/doc/config-reference/database-service/section-databaseservice-rpc.xml b/doc/config-reference/database-service/section-databaseservice-rpc.xml deleted file mode 100644 index 28f1470c7f..0000000000 --- a/doc/config-reference/database-service/section-databaseservice-rpc.xml +++ /dev/null @@ -1,48 +0,0 @@ -
- - Configure the RPC messaging system - OpenStack projects use an open standard for messaging - middleware known as AMQP. This messaging middleware enables the - OpenStack services that run on multiple servers to talk to each - other. OpenStack Trove RPC supports three implementations of AMQP: - RabbitMQ, - Qpid, and - ZeroMQ. - -
- Configure RabbitMQ - Use these options to configure the - RabbitMQ messaging system: - - -
-
- Configure Qpid - Use these options to configure the - Qpid messaging system: - - -
-
- Configure ZeroMq - Use these options to configure the - ZeroMq messaging system: - - -
-
- Configure messaging - - Use these common options to configure the - RabbitMQ, - Qpid, and - ZeroMq messaging drivers: - - - -
-
diff --git a/doc/config-reference/identity/section_keystone-sample-conf-files.xml b/doc/config-reference/identity/section_keystone-sample-conf-files.xml deleted file mode 100644 index 86502a36d9..0000000000 --- a/doc/config-reference/identity/section_keystone-sample-conf-files.xml +++ /dev/null @@ -1,44 +0,0 @@ - -
- Identity service sample configuration files - You can find the files described in this section in the - /etc/keystone directory. -
- keystone.conf - Use the keystone.conf file to - configure most Identity service options: - - -
-
- keystone-paste.ini - Use the keystone-paste.ini file to - configure the Web Service Gateway Interface (WSGI) - middleware pipeline for the Identity service. - - -
-
- logging.conf - You can specify a special logging configuration file in - the keystone.conf configuration file. - For example, - /etc/keystone/logging.conf. - For details, see the (Python logging module documentation). - - -
-
- policy.json - Use the policy.json file to define - additional access controls that apply to the Identity - service. - -
-
diff --git a/doc/config-reference/image-service/section_image-service-ISO-support.xml b/doc/config-reference/image-service/section_image-service-ISO-support.xml deleted file mode 100644 index cda64e612c..0000000000 --- a/doc/config-reference/image-service/section_image-service-ISO-support.xml +++ /dev/null @@ -1,30 +0,0 @@ - -
- Support for ISO images - You can load ISO images into the Image service. You can - subsequently boot an ISO image using Compute. - - To load an ISO image to an Image service data - store - - In the Image service, run the following - command: - $ glance image-create --name "ubuntu-14.04.2-server-amd64.iso" \ - --copy-from http://releases.ubuntu.com/14.04.2/ubuntu-14.04.2-server-amd64.iso \ - --is-public True --container-format bare --disk-format iso - In this command, ubuntu.iso is - the name for the ISO image after it is - loaded to the Image service, and - ubuntu-14.04.2-server-amd64.iso - is the name of the source ISO image. - - - Optionally, to confirm the upload in Image Service (glance), run this command: Run this command: - $ glance image-list - - -
diff --git a/doc/config-reference/image-service/section_image-service-api.xml b/doc/config-reference/image-service/section_image-service-api.xml deleted file mode 100644 index 8a9b8f4661..0000000000 --- a/doc/config-reference/image-service/section_image-service-api.xml +++ /dev/null @@ -1,22 +0,0 @@ -
- Configure the API - The Image service has two APIs: the user-facing API, and - the registry API, which is for internal requests that require - access to the database. - Both of the APIs currently have two major versions, v1 and v2. - It is possible to run either or both versions, by setting appropriate - values of enable_v1_api, - enable_v2_api, enable_v1_registry and - enable_v2_registry. If the v2 API is used, running - glance-registry is optional, as - v2 of glance-api - can connect directly to the database. - Tables of all the options used to configure the APIs, including enabling - SSL and modifying WSGI settings are found below. - - -
diff --git a/doc/config-reference/image-service/section_image-service-backend-vmware.xml b/doc/config-reference/image-service/section_image-service-backend-vmware.xml deleted file mode 100644 index c7aab15ec0..0000000000 --- a/doc/config-reference/image-service/section_image-service-backend-vmware.xml +++ /dev/null @@ -1,117 +0,0 @@ - -
- Configure vCenter data stores for the Image service back - end - - To use vCenter data stores for the Image service back end, - you must update the glance-api.conf file, - as follows: - - - Add data store parameters to the VMware - Datastore Store Options section. - - - Specify vSphere as the back end. - - - - You must configure any configured Image service data - stores for the Compute service. - - You can specify vCenter data stores directly by using the - data store name or Storage Policy Based Management (SPBM), - which requires vCenter Server 5.5 or later. For details, see - . - If you intend to use multiple data stores for the - back end, use the SPBM feature. - - In the glance_store section, set the - option to - vsphere, as shown in this code - sample: - [glance_store] -# Which back end scheme should Glance use by default is not specified -# in a request to add a new image to Glance? Known schemes are determined -# by the known_stores option below. -# Default: 'file' -default_store = vsphere - The following table describes the parameters in the - VMware Datastore Store Options - section: - - The following block of text shows a sample - configuration: - # ============ VMware Datastore Store Options ===================== -# ESX/ESXi or vCenter Server target system. -# The server value can be an IP address or a DNS name -# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com -vmware_server_host = 192.168.0.10 - -# Server username (string value) -vmware_server_username = ADMINISTRATOR - -# Server password (string value) -vmware_server_password = password - -# Inventory path to a datacenter (string value) -# Value optional when vmware_server_ip is an ESX/ESXi host: if specified -# should be `ha-datacenter`. -vmware_datacenter_path = DATACENTER - -# Datastore associated with the datacenter (string value) -vmware_datastore_name = datastore1 - -# PBM service WSDL file location URL. e.g. -# file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this -# will disable storage policy based placement of images. -# (string value) -#vmware_pbm_wsdl_location = - -# The PBM policy. If `pbm_wsdl_location` is set, a PBM policy needs -# to be specified. This policy will be used to select the datastore -# in which the images will be stored. -#vmware_pbm_policy = - -# The interval used for polling remote tasks -# invoked on VMware ESX/VC server in seconds (integer value) -vmware_task_poll_interval = 5 - -# Absolute path of the folder containing the images in the datastore -# (string value) -vmware_store_image_dir = /openstack_glance - -# Allow to perform insecure SSL requests to the target system (boolean value) -vmware_api_insecure = False -
- Configure vCenter data stores for the back end - You can specify a vCenter data store for the back end by - setting the - parameter value to the vCenter name of the data store. - This configuration limits the back end to a single data - store. - - To configure a single data store - - If present, comment or delete the - - and - parameters. - - - Uncomment and define the - - parameter with the name of the vCenter data - store. - - - Complete the other vCenter configuration - parameters as appropriate. - - -
-
diff --git a/doc/config-reference/image-service/section_image-service-backends.xml b/doc/config-reference/image-service/section_image-service-backends.xml deleted file mode 100644 index 9a3257f40d..0000000000 --- a/doc/config-reference/image-service/section_image-service-backends.xml +++ /dev/null @@ -1,44 +0,0 @@ - -
- Configure back ends - The Image service supports several back ends for storing - virtual machine images: - - OpenStack Block Storage (cinder) - - - A directory on a local file system - - - GridFS - - - Ceph RBD - - - Amazon S3 - - - Sheepdog - - - OpenStack Object Storage (swift) - - - VMware ESX - - The following tables detail the options available - for each. - - - - - - - - -
diff --git a/doc/config-reference/image-service/section_image-service-rpc.xml b/doc/config-reference/image-service/section_image-service-rpc.xml deleted file mode 100644 index 086bbc08b6..0000000000 --- a/doc/config-reference/image-service/section_image-service-rpc.xml +++ /dev/null @@ -1,21 +0,0 @@ -
- Configure the RPC messaging system - OpenStack projects use an open standard for messaging - middleware known as AMQP. This messaging middleware enables the - OpenStack services that run on multiple servers to talk to each - other. The OpenStack common library project, oslo, supports three - implementations of AMQP: RabbitMQ, - Qpid, and - ZeroMQ. - The following tables contain settings to configure the - messaging middleware for the Image service: - - - - - -
diff --git a/doc/config-reference/image-service/section_image-service-sample-configuration-files.xml b/doc/config-reference/image-service/section_image-service-sample-configuration-files.xml deleted file mode 100644 index ad2c8b8a0c..0000000000 --- a/doc/config-reference/image-service/section_image-service-sample-configuration-files.xml +++ /dev/null @@ -1,61 +0,0 @@ -
- Image service sample configuration files - You can find the files that are described in this section in - the /etc/glance/ - directory. -
- glance-api.conf - The configuration file for the Image service API is found in the - glance-api.conf file. - This file must be modified after installation. - -
-
- glance-registry.conf - Configuration for the Image service's registry, which - stores the metadata about images, is found in the - glance-registry.conf file. - This file must be modified after installation. - -
-
- glance-api-paste.ini - Configuration for the Image service's API middleware pipeline is found in the - glance-api-paste.ini file. - You should not need to modify this file. - -
-
- glance-manage.conf - The Image service's custom logging options are found in the - glance-manage.conf file. - Options set in glance-manage.conf - will override options of the same section and name set in - glance-registry.conf and glance-api.conf. - Similarly, options in glance-api.conf will - override options set in glance-registry.conf. - -
-
- glance-registry-paste.ini - The Image service's middleware pipeline for its registry is found in the - glance-registry-paste.ini file. - -
-
- glance-scrubber.conf - glance-scrubber is a utility for the Image service that cleans up images that have been deleted; its configuration is stored in the - glance-scrubber.conf file. - -
-
- policy.json - The /etc/glance/policy.json file defines additional access controls - that apply to the Image service. - -
-
diff --git a/doc/config-reference/locale/config-reference.pot b/doc/config-reference/locale/config-reference.pot deleted file mode 100644 index f3612f5d62..0000000000 --- a/doc/config-reference/locale/config-reference.pot +++ /dev/null @@ -1,20469 +0,0 @@ -msgid "" -msgstr "" -"Project-Id-Version: PACKAGE VERSION\n" -"POT-Creation-Date: 2015-12-19 06:14+0000\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME \n" -"Language-Team: LANGUAGE \n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#: ./doc/config-reference/ch_config-overview.xml:7(title) -msgid "OpenStack configuration overview" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:8(para) -msgid "OpenStack is a collection of open source project components that enable setting up cloud services. Each component uses similar configuration techniques and a common framework for INI file options." -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:13(para) -msgid "This guide pulls together multiple references and configuration options for the following OpenStack components:" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:17(para) -msgid "Bare metal service" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:18(para) -msgid "OpenStack Block Storage" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:19(para) -msgid "OpenStack Compute" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:20(para) -msgid "OpenStack dashboard" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:21(para) -msgid "Database service for OpenStack" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:22(para) ./doc/config-reference/ch_dataprocessingserviceconfigure.xml:7(title) -msgid "Data processing service" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:23(para) -msgid "OpenStack Identity" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:24(para) -msgid "OpenStack Image service" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:25(para) -msgid "OpenStack Networking" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:26(para) -msgid "OpenStack Object Storage" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:27(para) ./doc/config-reference/ch_orchestrationconfigure.xml:7(title) -msgid "Orchestration" -msgstr "" - -#: ./doc/config-reference/ch_config-overview.xml:28(para) ./doc/config-reference/ch_telemetryconfigure.xml:7(title) -msgid "Telemetry" -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:7(title) ./doc/config-reference/networking/section_networking-options-reference.xml:38(title) ./doc/config-reference/networking/section_networking-options-reference.xml:44(title) -msgid "Compute" -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:8(para) -msgid "The OpenStack Compute service is a cloud computing fabric controller, which is the main part of an IaaS system. You can use OpenStack Compute to host and manage cloud computing systems. This section describes the OpenStack Compute configuration options." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:13(para) -msgid "To configure your Compute installation, you must define configuration options in these files:" -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:17(para) -msgid "nova.conf. Contains most of the Compute configuration options. Resides in the /etc/nova directory." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:22(para) -msgid "api-paste.ini. Defines Compute limits. Resides in the /etc/nova directory." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:27(para) -msgid "Related Image service and Identity service management configuration files." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:33(title) -msgid "Configure logging" -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:34(para) -msgid "You can use nova.conf file to configure where Compute logs events, the level of logging, and log formats." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:37(para) -msgid "To customize log formats for OpenStack Compute, use the configuration option settings documented in ." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:42(title) -msgid "Configure authentication and authorization" -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:43(para) -msgid "There are different methods of authentication for the OpenStack Compute project, including no authentication. The preferred system is the OpenStack Identity service, code-named Keystone." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:47(para) -msgid "To customize authorization settings for Compute, use the configuration options documented in ." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:50(para) -msgid "To customize certificate authority settings for Compute, use the configuration options documented in ." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:53(para) -msgid "To customize Compute and the Identity service to use LDAP as a backend, refer to the configuration options documented in ." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:59(title) -msgid "Configure resize" -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:60(para) -msgid "Resize (or Server resize) is the ability to change the flavor of a server, thus allowing it to upscale or downscale according to user needs. For this feature to work properly, you might need to configure some underlying virt layers." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:65(title) ./doc/config-reference/compute/section_hypervisor_kvm.xml:8(title) -msgid "KVM" -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:66(para) -msgid "Resize on KVM is implemented currently by transferring the images between compute nodes over ssh. For KVM you need hostnames to resolve properly and passwordless ssh access between your compute hosts. Direct access from one compute host to another is needed to copy the VM file across." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:71(para) -msgid "Cloud end users can find out how to resize a server by reading the OpenStack End User Guide." -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:77(title) ./doc/config-reference/compute/section_hypervisor_xen_api.xml:59(title) -msgid "XenServer" -msgstr "" - -#: ./doc/config-reference/ch_computeconfigure.xml:78(para) -msgid "To get resize to work with XenServer (and XCP), you need to establish a root trust between all hypervisor nodes and provide an /image mount point to your hypervisors dom0." -msgstr "" - -#: ./doc/config-reference/ch_orchestrationconfigure.xml:8(para) -msgid "The Orchestration service is designed to manage the lifecycle of infrastructure and applications within OpenStack clouds. Its various agents and services are configured in the /etc/heat/heat.conf file." -msgstr "" - -#: ./doc/config-reference/ch_orchestrationconfigure.xml:12(para) -msgid "To install Orchestration, see the OpenStack Installation Guide for your distribution (docs.openstack.org)." -msgstr "" - -#: ./doc/config-reference/ch_orchestrationconfigure.xml:16(para) -msgid "The following tables provide a comprehensive list of the Orchestration configuration options." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:7(title) -msgid "Object Storage" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:8(para) -msgid "OpenStack Object Storage uses multiple configuration files for multiple services and background daemons, and to manage server configurations. Default configuration options appear in the [DEFAULT] section. You can override the default values by setting values in the other sections." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:18(title) -msgid "Object server configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:19(para) -msgid "Find an example object server configuration at etc/object-server.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:22(para) ./doc/config-reference/ch_objectstorageconfigure.xml:51(para) ./doc/config-reference/ch_objectstorageconfigure.xml:76(para) ./doc/config-reference/ch_objectstorageconfigure.xml:109(para) ./doc/config-reference/ch_objectstorageconfigure.xml:126(para) ./doc/config-reference/ch_objectstorageconfigure.xml:152(para) ./doc/config-reference/ch_objectstorageconfigure.xml:181(para) ./doc/config-reference/ch_objectstorageconfigure.xml:226(para) ./doc/config-reference/ch_objectstorageconfigure.xml:235(para) -msgid "The available configuration options are:" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:42(title) -msgid "Sample object server configuration file" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:47(title) -msgid "Object expirer configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:48(para) -msgid "Find an example object expirer configuration at etc/object-expirer.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:67(title) -msgid "Sample object expirer configuration file" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:72(title) -msgid "Container server configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:73(para) -msgid "Find an example container server configuration at etc/container-server.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:99(title) -msgid "Sample container server configuration file" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:105(title) -msgid "Container sync realms configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:106(para) -msgid "Find an example container sync realms configuration at etc/container-sync-realms.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:117(title) -msgid "Sample container sync realms configuration file" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:122(title) -msgid "Container reconciler configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:123(para) -msgid "Find an example container sync realms configuration at etc/container-reconciler.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:142(title) -msgid "Sample container sync reconciler configuration file" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:148(title) -msgid "Account server configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:149(para) -msgid "Find an example account server configuration at etc/account-server.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:172(title) -msgid "Sample account server configuration file" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:177(title) -msgid "Proxy server configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:178(para) -msgid "Find an example proxy server configuration at etc/proxy-server.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:217(title) -msgid "Sample proxy server configuration file" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:222(title) -msgid "Proxy server memcache configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:223(para) -msgid "Find an example memcache configuration for the proxy server at etc/memcache.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:231(title) -msgid "Rsyncd configuration" -msgstr "" - -#: ./doc/config-reference/ch_objectstorageconfigure.xml:232(para) -msgid "Find an example rsyncd configuration at etc/rsyncd.conf-sample in the source code repository." -msgstr "" - -#: ./doc/config-reference/ch_databaseserviceconfigure.xml:7(title) -msgid "Database service" -msgstr "" - -#: ./doc/config-reference/ch_databaseserviceconfigure.xml:8(para) -msgid "The Database service provides a scalable and reliable Cloud Database-as-a-Service functionality for both relational and non-relational database engines." -msgstr "" - -#: ./doc/config-reference/ch_databaseserviceconfigure.xml:12(para) -msgid "The following tables provide a comprehensive list of the Database service configuration options." -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:6(caption) -msgid "Default ports that OpenStack components use" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:12(th) -msgid "OpenStack service" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:13(th) -msgid "Default ports" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:14(th) -msgid "Port type" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:19(literal) -msgid "cinder" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:19(td) -msgid "Block Storage ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:20(td) -msgid "8776" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:21(td) ./doc/config-reference/table_default-ports-primary-services.xml:26(td) ./doc/config-reference/table_default-ports-primary-services.xml:61(td) ./doc/config-reference/table_default-ports-primary-services.xml:77(td) ./doc/config-reference/table_default-ports-primary-services.xml:87(td) ./doc/config-reference/table_default-ports-primary-services.xml:98(td) ./doc/config-reference/table_default-ports-primary-services.xml:115(td) -msgid "publicurl and adminurl" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:24(literal) -msgid "nova" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:24(td) -msgid "Compute () endpoints" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:25(td) -msgid "8774" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:29(literal) ./doc/config-reference/compute/section_nova-log-files.xml:38(td) -msgid "nova-api" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:29(td) -msgid "Compute API ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:30(td) -msgid "8773, 8775" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:34(td) -msgid "Compute ports for access to virtual machine consoles" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:36(td) -msgid "5900-5999" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:41(systemitem) -msgid "openstack-nova-novncproxy" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:40(td) -msgid "Compute VNC proxy for browsers ( )" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:42(td) -msgid "6080" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:47(systemitem) -msgid "openstack-nova-xvpvncproxy" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:46(td) -msgid "Compute VNC proxy for traditional VNC clients ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:48(td) -msgid "6081" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:52(td) -msgid "Proxy port for HTML5 console used by Compute service" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:54(td) -msgid "6082" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:58(literal) -msgid "sahara" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:58(td) -msgid "Data processing service () endpoint" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:60(td) -msgid "8386" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:64(literal) ./doc/config-reference/conf-changes/cinder.xml:767(td) -msgid "keystone" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:64(td) -msgid "Identity service () administrative endpoint" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:66(td) -msgid "35357" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:67(td) -msgid "adminurl" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:70(td) -msgid "Identity service public endpoint" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:71(td) -msgid "5000" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:72(td) -msgid "publicurl" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:75(literal) -msgid "glance" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:75(td) -msgid "Image service () API" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:76(td) -msgid "9292" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:80(td) -msgid "Image service registry" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:81(td) -msgid "9191" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:85(literal) -msgid "neutron" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:85(td) -msgid "Networking ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:86(td) -msgid "9696" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:90(literal) ./doc/config-reference/table_default-ports-peripheral-services.xml:30(literal) -msgid "swift" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:90(td) -msgid "Object Storage ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:91(td) -msgid "6000, 6001, 6002" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:95(literal) ./doc/config-reference/conf-changes/sahara.xml:228(td) -msgid "heat" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:95(td) -msgid "Orchestration () endpoint" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:97(td) -msgid "8004" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:102(literal) -msgid "openstack-heat-api-cfn" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:101(td) -msgid "Orchestration AWS CloudFormation-compatible API ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:103(td) -msgid "8000" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:108(literal) -msgid "openstack-heat-api-cloudwatch" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:107(td) -msgid "Orchestration AWS CloudWatch-compatible API ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:109(td) -msgid "8003" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:113(literal) -msgid "ceilometer" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:113(td) -msgid "Telemetry ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:114(td) -msgid "8777" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:118(literal) -msgid "murano" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:118(td) -msgid "Application Catalog ()" -msgstr "" - -#: ./doc/config-reference/table_default-ports-primary-services.xml:119(td) -msgid "8082" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:6(caption) -msgid "Default ports that secondary services related to OpenStack components use" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:13(th) -msgid "Service" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:14(th) -msgid "Default port" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:15(th) -msgid "Used by" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:20(td) -msgid "HTTP" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:21(td) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:352(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:409(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:488(replaceable) -msgid "80" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:22(literal) -msgid "Horizon" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:22(td) -msgid "OpenStack dashboard () when it is not configured to use secure access." -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:27(td) -msgid "HTTP alternate" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:28(td) -msgid "8080" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:29(td) -msgid "OpenStack Object Storage () service." -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:33(td) -msgid "HTTPS" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:34(td) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:199(replaceable) -msgid "443" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:35(td) -msgid "Any OpenStack service that is enabled for SSL, especially secure-access dashboard." -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:39(td) -msgid "rsync" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:40(td) -msgid "873" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:41(td) -msgid "OpenStack Object Storage. Required." -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:44(td) -msgid "iSCSI target" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:45(td) -msgid "3260" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:46(td) -msgid "OpenStack Block Storage. Required." -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:49(td) -msgid "MySQL database service" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:50(td) -msgid "3306" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:51(td) -msgid "Most OpenStack components." -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:54(td) -msgid "Message Broker (AMQP traffic)" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:55(td) -msgid "5672" -msgstr "" - -#: ./doc/config-reference/table_default-ports-peripheral-services.xml:56(td) -msgid "OpenStack Block Storage, Networking, Orchestration, and Compute." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:10(title) -msgid "The policy.json file" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:12(para) -msgid "Each OpenStack service, Identity, Compute, Networking and so on, has its own role-based access policies. They determine which user can access which objects in which way, and are defined in the service's policy.json file." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:17(para) -msgid "Whenever an API call to an OpenStack service is made, the service's policy engine uses the appropriate policy definitions to determine if the call can be accepted. Any changes to policy.json are effective immediately, which allows new policies to be implemented while the service is running." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:23(para) -msgid "A policy.json file is a text file in JSON (Javascript Object Notation) format. Each policy is defined by a one-line statement in the form \"<target>\" : \"<rule>\"." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:27(para) -msgid "The policy target, also named \"action\", represents an API call like \"start an instance\" or \"attach a volume\"." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:32(para) -msgid "Action names are usually qualified. Example: OpenStack Compute features API calls to list instances, volumes and networks. In /etc/nova/policy.json, these APIs are represented by compute:get_all, volume:get_all and network:get_all, respectively." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:38(para) -msgid "The mapping between API calls and actions is not generally documented." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:40(para) -msgid "The policy rule determines under which circumstances the API call is permitted. Usually this involves the user who makes the call (hereafter named the \"API user\") and often the object on which the API call operates. A typical rule checks if the API user is the object's owner." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:45(title) -msgid "Modifying the policy" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:46(para) -msgid "While recipes for editing policy.json files are found on blogs, modifying the policy can have unexpected side effects and is not encouraged." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:54(title) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:130(title) -msgid "Examples" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:56(para) -msgid "A simple rule might look like this: The target is \"compute:get_all\", the \"list all instances\" API of the Compute service. The rule is an empty string meaning \"always\". This policy allows anybody to list instances." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:64(para) -msgid "You can also decline permission to use an API: The exclamation mark stands for \"never\" or \"nobody\", which effectively disables the Compute API \"shelve an instance\"." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:71(para) -msgid "Many APIs can only be called by admin users. This can be expressed by the rule \"role:admin\". The following policy ensures that only administrators can create new users in the Identity database: " -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:79(para) -msgid "You can limit APIs to any role. For example, the Orchestration service defines a role named heat_stack_user. Whoever has this role isn't allowed to create stacks: This rule makes use of the boolean operator not. More complex rules can be built using operators and, or and parentheses." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:88(para) -msgid "You can define aliases for rules: The policy engine understands that \"deny_stack_user\" is not an API and consequently interprets it as an alias. The stack creation policy above can then be written as: This is taken verbatim from /etc/heat/policy.json." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:100(para) -msgid "Rules can compare API attributes to object attributes. For example: states that only the owner of an instance can start it up. The user_id string before the colon is an API attribute, namely the user ID of the API user. It is compared with the user ID of the object (in this case, an instance); more precisely, it is compared with the user_id field of that object in the database. If the two values are equal, permission is granted." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:112(para) -msgid "An admin user always has permission to call APIs. This is how /etc/keystone/policy.json makes this policy explicit: The first line defines an alias for \"user is an admin user\". The is_admin flag is only used when setting up the Identity service for the first time. It indicates that the user has admin privileges granted by the service token (--os-token parameter of the command line client)." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:128(para) -msgid "The second line creates an alias for \"user owns the object\" by comparing the API's user ID with the object's user ID." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:132(para) -msgid "Line 3 defines a third alias admin_or_owner, combining the two first aliases with the Boolean operator or." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:136(para) -msgid "Line 4 sets up the policy that a password can only be modified by its owner or an admin user." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:141(para) -msgid "As a final example, let's examine a more complex rule: This rule determines who can use the Identity API \"delete EC2 credential\". Here, boolean operators and parentheses combine three simpler rules. admin_required and owner are the same aliases as in the previous example. user_id:%(target.credential.user_id)s compares the API user with the user ID of the credential object associated with the target." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:158(title) -msgid "Syntax" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:160(para) -msgid "A policy.json file consists of policies and aliases of the form target:rule or alias:definition, separated by commas and enclosed in curly braces: " -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:175(para) -msgid "Targets are APIs and are written \"service:API\" or simply \"API\". For example, \"compute:create\" or \"add_image\"." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:179(para) -msgid "Rules determine whether the API call is allowed." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:183(para) -msgid "always true. The action is always permitted. This can be written as \"\" (empty string), [], or \"@\"." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:185(para) -msgid "always false. The action is never permitted. Written as \"!\"." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:186(para) -msgid "a special check" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:187(para) -msgid "a comparison of two values" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:188(para) -msgid "boolean expressions based on simpler rules" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:180(para) -msgid "Rules can be: " -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:195(para) -msgid "<role>:<role name>, a test whether the API credentials contain this role." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:197(para) -msgid "<rule>:<rule name>, the definition of an alias." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:199(para) -msgid "http:<target URL>, which delegates the check to a remote server. The API is authorized when the server returns True." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:192(para) -msgid "Special checks are " -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:205(para) -msgid "Developers can define additional special checks." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:207(para) -msgid "Two values are compared in the following way: " -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:215(para) -msgid "constants: Strings, numbers, true, false" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:216(para) -msgid "API attributes" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:217(para) -msgid "target object attributes" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:218(para) -msgid "the flag is_admin" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:212(para) -msgid "Possible values are " -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:222(para) -msgid "API attributes can be project_id, user_id or domain_id." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:225(para) -msgid "Target object attributes are fields from the object description in the database. For example in the case of the \"compute:start\" API, the object is the instance to be started. The policy for starting instances could use the %(project_id)sattribute, that is the project that owns the instance. The trailing s indicates this is a string." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:231(para) -msgid "is_admin indicates that administrative privileges are granted via the admin token mechanism (the --os-token option of the command). The admin token allows initialisation of the identity database before the admin role exists." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:237(para) -msgid "The alias construct exists for convenience. An alias is short name for a complex or hard to understand rule. It is defined in the same way as a policy: Once an alias is defined, use the rule keyword to use it in a policy rule." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:248(title) -msgid "Older syntax" -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:250(para) -msgid "You may encounter older policy.json files that feature a different syntax, where JavaScript arrays are used instead of boolean operators. For example, the EC2 credentials rule above would have been written as follows: The rule is an array of arrays. The innermost arrays are or'ed together, whereas elements inside the innermost arrays are and'ed." -msgstr "" - -#: ./doc/config-reference/app_policy_json.xml:261(para) -msgid "While the old syntax is still supported, we recommend using the newer, more intuitive syntax." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:7(title) -msgid "Identity service" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:8(para) -msgid "This chapter details the OpenStack Identity service configuration options. For installation prerequisites and step-by-step walkthroughs, see the OpenStack Installation Guide for your distribution (docs.openstack.org) and Cloud Administrator Guide." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:16(title) -msgid "Caching layer" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:17(para) -msgid "Identity supports a caching layer that is above the configurable subsystems, such as token or assignment. The majority of the caching configuration options are set in the [cache] section. However, each section that has the capability to be cached usually has a option that will toggle caching for that specific section. By default, caching is globally disabled. Options are as follows:" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:28(para) -msgid "Current functional backends are:" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:30(para) -msgid "dogpile.cache.memcached - Memcached backend using the standard python-memcached library" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:32(para) -msgid "dogpile.cache.pylibmc - Memcached backend using the pylibmc library" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:34(para) -msgid "dogpile.cache.bmemcached - Memcached using python-binary-memcached library." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:36(para) -msgid "dogpile.cache.redis - Redis backend" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:37(para) -msgid "dogpile.cache.dbm - Local DBM file backend" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:38(para) -msgid "dogpile.cache.memory - In-memory cache, not suitable for use outside of testing as it does not cleanup it's internal cache on cache expiration and does not share cache between processes. This means that caching and cache invalidation will not be consistent or reliable." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:44(para) -msgid "dogpile.cache.mongo - MongoDB as caching backend." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:51(title) -msgid "Identity service configuration file" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:52(para) -msgid "The Identity service is configured in the /etc/keystone/keystone.conf file." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:54(para) -msgid "The following tables provide a comprehensive list of the Identity service options." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:96(title) -msgid "Domain-specific configuration" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:97(para) -msgid "The Identity service supports domain-specific Identity drivers which allow a domain to have its own LDAP or SQL back end. By default, domain-specific drivers are disabled." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:101(para) -msgid "Domain-specific Identity configuration options can be stored in domain-specific configuration files, or in the Identity SQL database using API REST calls." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:105(para) -msgid "Storing and managing configuration options in a SQL database is experimental in Kilo." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:109(title) -msgid "Enable drivers for domain-specific configuration files" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:110(para) ./doc/config-reference/ch_identityconfigure.xml:153(para) -msgid "To enable domain-specific drivers, set these options in the /etc/keystone/keystone.conf file:" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:116(para) -msgid "When you enable domain-specific drivers, Identity looks in the directory for configuration files that are named as keystone.DOMAIN_NAME.conf. Any domain without a domain-specific configuration file uses options in the primary configuration file." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:124(title) -msgid "Domain-specific configuration file" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:125(para) -msgid "Any options that you define in the domain-specific configuration file override options in the /etc/keystone/keystone.conf configuration file." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:128(para) -msgid "Domains configured for the service user or project use the Identity API v3 to retrieve the service token." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:130(para) -msgid "To configure the domain for the service user, set the following options in the [DEFAULT] section of the /etc/keystone/domains/keystone.DOMAIN_NAME.conf file:" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:134(replaceable) -msgid "USER_DOMAIN_ID" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:135(replaceable) -msgid "USER_DOMAIN_NAME" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:136(para) -msgid "Replace USER_DOMAIN_ID with the Identity service account user domain ID, and USER_DOMAIN_NAME with the Identity service account user domain name." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:140(para) -msgid "To configure the domain for the project, set the following options in the [DEFAULT] section of the /etc/keystone/domains/keystone.DOMAIN_NAME.conf file:" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:144(replaceable) -msgid "PROJECT_DOMAIN_ID" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:145(replaceable) -msgid "PROJECT_DOMAIN_NAME" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:146(para) -msgid "Replace PROJECT_DOMAIN_ID with the Identity service account project domain ID, and PROJECT_DOMAIN_NAME with the Identity service account project domain name." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:152(title) -msgid "Enable drivers for storing configuration options in SQL database" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:158(para) -msgid "Any domain-specific configuration options specified through the Identity v3 API will override domain-specific configuration files in the /etc/keystone/domains directory." -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:163(title) -msgid "Migrate domain-specific configuration files to the SQL database" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:164(para) -msgid "You can use the command to migrate configuration options in domain-specific configuration files to the SQL database:" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:168(para) -msgid "To upload options from a specific domain-configuration file, specify the domain name:" -msgstr "" - -#: ./doc/config-reference/ch_identityconfigure.xml:170(replaceable) -msgid "DOMAIN_NAME" -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:7(title) -msgid "Firewalls and default ports" -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:8(para) -msgid "On some deployments, such as ones where restrictive firewalls are in place, you might need to manually configure a firewall to permit OpenStack service traffic." -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:11(para) -msgid "To manually configure a firewall, you must permit traffic through the ports that each OpenStack service uses. This table lists the default ports that each OpenStack service uses:" -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:16(para) -msgid "To function properly, some OpenStack components depend on other, non-OpenStack services. For example, the OpenStack dashboard uses HTTP for non-secure communication. In this case, you must configure the firewall to allow traffic to and from HTTP." -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:21(para) -msgid "This table lists the ports that other OpenStack components use:" -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:24(para) -msgid "On some deployments, the default port used by a service may fall within the defined local port range of a host. To check a host's local port range:" -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:28(para) -msgid "If a service's default port falls within this range, run the following program to check if the port has already been assigned to another application:" -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:31(replaceable) ./doc/config-reference/compute/section_compute-cells.xml:321(replaceable) -msgid "PORT" -msgstr "" - -#: ./doc/config-reference/app_firewalls-ports.xml:32(para) -msgid "Configure the service to use a different port if the default port is already being used by another application." -msgstr "" - -#: ./doc/config-reference/ch_networkingconfigure.xml:7(title) -msgid "Networking" -msgstr "" - -#: ./doc/config-reference/ch_networkingconfigure.xml:8(para) -msgid "This chapter explains the OpenStack Networking configuration options. For installation prerequisites, steps, and use cases, see the OpenStack Installation Guide for your distribution (docs.openstack.org) and Cloud Administrator Guide." -msgstr "" - -#: ./doc/config-reference/ch_telemetryconfigure.xml:8(para) -msgid "The Telemetry service collects measurements within OpenStack. Its various agents and services are configured in the /etc/ceilometer/ceilometer.conf file." -msgstr "" - -#: ./doc/config-reference/ch_telemetryconfigure.xml:11(para) -msgid "To install Telemetry, see the OpenStack Installation Guide for your distribution (docs.openstack.org)." -msgstr "" - -#: ./doc/config-reference/ch_blockstorageconfigure.xml:7(title) -msgid "Block Storage" -msgstr "" - -#: ./doc/config-reference/ch_blockstorageconfigure.xml:8(para) -msgid "The OpenStack Block Storage service works with many different storage drivers that you can configure by using these instructions." -msgstr "" - -#: ./doc/config-reference/ch_dataprocessingserviceconfigure.xml:8(para) -msgid "The Data processing service (sahara) provides a scalable data-processing stack and associated management interfaces." -msgstr "" - -#: ./doc/config-reference/ch_dataprocessingserviceconfigure.xml:13(para) -msgid "The following tables provide a comprehensive list of the Data processing service configuration options." -msgstr "" - -#: ./doc/config-reference/ch_imageservice.xml:7(title) -msgid "Image service" -msgstr "" - -#: ./doc/config-reference/ch_imageservice.xml:8(para) -msgid "Compute relies on an external image service to store virtual machine images and maintain a catalog of available images. By default, Compute is configured to use the OpenStack Image service (glance), which is currently the only supported image service." -msgstr "" - -#: ./doc/config-reference/ch_imageservice.xml:15(para) -msgid "If your installation requires euca2ools to register new images, you must run the nova-objectstore service. This service provides an Amazon S3 front-end for Glance, which is required by euca2ools." -msgstr "" - -#: ./doc/config-reference/ch_imageservice.xml:20(para) -msgid "To customize the Compute Service, use the configuration option settings documented in and ." -msgstr "" - -#: ./doc/config-reference/ch_imageservice.xml:24(para) -msgid "You can modify many options in the OpenStack Image service. The following tables provide a comprehensive list." -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:8(title) ./doc/config-reference/bk-config-ref.xml:13(titleabbrev) -msgid "OpenStack Configuration Reference" -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:21(orgname) ./doc/config-reference/bk-config-ref.xml:28(holder) -msgid "OpenStack Foundation" -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:25(year) -msgid "2013" -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:26(year) -msgid "2014" -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:27(year) -msgid "2015" -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:30(productname) -msgid "OpenStack" -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:31(releaseinfo) -msgid "mitaka" -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:35(remark) -msgid "Copyright details are filled in by the template." -msgstr "" - -#: ./doc/config-reference/bk-config-ref.xml:40(para) -msgid "This document is for system administrators who want to look up configuration options. It contains lists of configuration options available with OpenStack and uses auto-generation to generate options and the descriptions from the code for each project. It includes sample configuration files." -msgstr "" - -#: ./doc/config-reference/ch_baremetalconfigure.xml:7(title) -msgid "Bare metal" -msgstr "" - -#: ./doc/config-reference/ch_baremetalconfigure.xml:8(para) -msgid "The Bare metal service is capable of managing and provisioning physical machines. The configuration file of this module is /etc/ironic/ironic.conf." -msgstr "" - -#: ./doc/config-reference/ch_baremetalconfigure.xml:11(para) -msgid "The following tables provide a comprehensive list of the Bare metal service configuration options." -msgstr "" - -#: ./doc/config-reference/ch_dashboardconfigure.xml:7(title) -msgid "Dashboard" -msgstr "" - -#: ./doc/config-reference/ch_dashboardconfigure.xml:8(para) -msgid "This chapter describes how to configure the OpenStack dashboard with Apache web server." -msgstr "" - -#: ./doc/config-reference/ch_sharedfilesystemsconfigure.xml:7(title) -msgid "Shared File Systems" -msgstr "" - -#: ./doc/config-reference/ch_sharedfilesystemsconfigure.xml:8(para) -msgid "The Shared File Systems service works with many different drivers that you can configure by using these instructions." -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/compute/section_compute-scheduler.xml:130(None) -msgid "@@image: '../../common/figures/filteringWorkflow1.png'; md5=c144af5cbdee1bd17a7bde0bea5b5fe7" -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/compute/section_compute-scheduler.xml:849(None) -msgid "@@image: '../../common/figures/nova-weighting-hosts.png'; md5=000eab4cf0deb1da2e692e023065a6ae" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:12(title) -msgid "Scheduling" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:13(para) -msgid "Compute uses the nova-scheduler service to determine how to dispatch compute requests. For example, the nova-scheduler service determines on which host a VM should launch. In the context of filters, the term host means a physical node that has a nova-compute service running on it. You can configure the scheduler through a variety of options." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:22(para) -msgid "Compute is configured with the following default scheduler options in the /etc/nova/nova.conf file:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:30(para) -msgid "By default, the is configured as a filter scheduler, as described in the next section. In the default configuration, this scheduler considers hosts that meet all the following criteria:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:36(para) -msgid "Have not been attempted for scheduling purposes (RetryFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:40(para) -msgid "Are in the requested availability zone (AvailabilityZoneFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:44(para) -msgid "Have sufficient RAM available (RamFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:48(para) -msgid "Have sufficient disk space available for root and ephemeral storage (DiskFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:52(para) -msgid "Can service the request (ComputeFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:56(para) -msgid "Satisfy the extra specs associated with the instance type (ComputeCapabilitiesFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:61(para) -msgid "Satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties (ImagePropertiesFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:67(para) -msgid "Are on a different host than other instances of a group (if requested) (ServerGroupAntiAffinityFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:73(para) -msgid "Are in a set of group hosts (if requested) (ServerGroupAffinityFilter)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:78(para) -msgid "The scheduler caches its list of available hosts; use the option to specify how often the list is updated." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:82(para) -msgid "Do not configure to be much smaller than ; otherwise, hosts appear to be dead while the host list is being cached." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:88(para) -msgid "For information about the volume scheduler, see the Block Storage section of OpenStack Cloud Administrator Guide." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:93(para) -msgid "The scheduler chooses a new host when an instance is migrated." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:95(para) -msgid "When evacuating instances from a host, the scheduler service honors the target host defined by the administrator on the evacuate command. If a target is not defined by the administrator, the scheduler determines the target host. For information about instance evacuation, see Evacuate instances section of the OpenStack Cloud Administrator Guide." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:105(title) -msgid "Filter scheduler" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:106(para) -msgid "The filter scheduler (nova.scheduler.filter_scheduler.FilterScheduler) is the default scheduler for scheduling virtual machine instances. It supports filtering and weighting to make informed decisions on where a new instance should be created." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:115(title) -msgid "Filters" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:116(para) -msgid "When the filter scheduler receives a request for a resource, it first applies filters to determine which hosts are eligible for consideration when dispatching a resource. Filters are binary: either a host is accepted by the filter, or it is rejected. Hosts that are accepted by the filter are then processed by a different algorithm to decide which hosts to use for that request, described in the Weights section." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:125(title) -msgid "Filtering" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:134(para) -msgid "The configuration option in nova.conf provides the Compute service with the list of the filters that are used by the scheduler. The default setting specifies all of the filter that are included with the Compute service:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:141(para) -msgid "This configuration option can be specified multiple times. For example, if you implemented your own custom filter in Python called myfilter.MyFilter and you wanted to use both the built-in filters and your custom filter, your nova.conf file would contain:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:150(para) -msgid "The scheduler_default_filters configuration option in nova.conf defines the list of filters that are applied by the nova-scheduler service. The default filters are:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:157(para) -msgid "The following sections describe the available filters." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:160(title) -msgid "AggregateCoreFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:161(para) -msgid "Filters host by CPU core numbers with a per-aggregate cpu_allocation_ratio value. If the per-aggregate value is not found, the value falls back to the global setting. If the host is in more than one aggregate and more than one value is found, the minimum value will be used. For information about how to use this filter, see . See also ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:173(title) -msgid "AggregateDiskFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:174(para) -msgid "Filters host by disk allocation with a per-aggregate disk_allocation_ratio value. If the per-aggregate value is not found, the value falls back to the global setting. If the host is in more than one aggregate and more than one value is found, the minimum value will be used. For information about how to use this filter, see . See also ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:186(title) -msgid "AggregateImagePropertiesIsolation" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:187(para) -msgid "Matches properties defined in an image's metadata against those of aggregates to determine host matches:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:192(para) -msgid "If a host belongs to an aggregate and the aggregate defines one or more metadata that matches an image's properties, that host is a candidate to boot the image's instance." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:198(para) -msgid "If a host does not belong to any aggregate, it can boot instances from all images." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:202(para) -msgid "For example, the following aggregate myWinAgg has the Windows operating system as metadata (named 'windows'):" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:211(para) -msgid "In this example, because the following Win-2012 image has the windows property, it boots on the sf-devel host (all other filters being equal):" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:224(para) -msgid "You can configure the AggregateImagePropertiesIsolation filter by using the following options in the nova.conf file:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:236(title) -msgid "AggregateInstanceExtraSpecsFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:237(para) -msgid "Matches properties defined in extra specs for an instance type against admin-defined properties on a host aggregate. Works with specifications that are scoped with aggregate_instance_extra_specs. Multiple values can be given, as a comma-separated list. For backward compatibility, also works with non-scoped specifications; this action is highly discouraged because it conflicts with ComputeCapabilitiesFilter filter when you enable both filters. For information about how to use this filter, see the host aggregates section." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:252(title) -msgid "AggregateIoOpsFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:253(para) -msgid "Filters host by disk allocation with a per-aggregate max_io_ops_per_host value. If the per-aggregate value is not found, the value falls back to the global setting. If the host is in more than one aggregate and more than one value is found, the minimum value will be used. For information about how to use this filter, see . See also ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:265(title) -msgid "AggregateMultiTenancyIsolation" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:266(para) -msgid "Isolates tenants to specific host aggregates. If a host is in an aggregate that has the filter_tenant_id metadata key, the host creates instances from only that tenant or list of tenants. A host can be in different aggregates. If a host does not belong to an aggregate with the metadata key, the host can create instances from all tenants." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:277(title) -msgid "AggregateNumInstancesFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:278(para) -msgid "Filters host by number of instances with a per-aggregate max_instances_per_host value. If the per-aggregate value is not found, the value falls back to the global setting. If the host is in more than one aggregate and thus more than one value is found, the minimum value will be used. For information about how to use this filter, see . See also ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:291(title) -msgid "AggregateRamFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:292(para) -msgid "Filters host by RAM allocation of instances with a per-aggregate ram_allocation_ratio value. If the per-aggregate value is not found, the value falls back to the global setting. If the host is in more than one aggregate and thus more than one value is found, the minimum value will be used. For information about how to use this filter, see . See also ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:305(title) -msgid "AggregateTypeAffinityFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:306(para) -msgid "This filter passes hosts if no instance_type key is set or the instance_type aggregate metadata value contains the name of the instance_type requested. The value of the instance_type metadata entry is a string that may contain either a single instance_type name or a comma-separated list of instance_type names, such as 'm1.nano' or \"m1.nano,m1.small.\" For information about how to use this filter, see . See also ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:322(title) -msgid "AllHostsFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:323(para) -msgid "This is a no-op filter. It does not eliminate any of the available hosts." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:327(title) -msgid "AvailabilityZoneFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:328(para) -msgid "Filters hosts by availability zone. You must enable this filter for the scheduler to respect availability zones in requests." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:333(title) -msgid "ComputeCapabilitiesFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:334(para) -msgid "Matches properties defined in extra specs for an instance type against compute capabilities." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:336(para) -msgid "If an extra specs key contains a colon (:), anything before the colon is treated as a namespace and anything after the colon is treated as the key to be matched. If a namespace is present and is not capabilities, the filter ignores the namespace. For backward compatibility, also treats the extra specs key as the key to be matched if no namespace is present; this action is highly discouraged because it conflicts with AggregateInstanceExtraSpecsFilter filter when you enable both filters." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:350(title) -msgid "ComputeFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:351(para) -msgid "Passes all hosts that are operational and enabled." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:353(para) -msgid "In general, you should always enable this filter." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:356(title) -msgid "CoreFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:357(para) -msgid "Only schedules instances on hosts if sufficient CPU cores are available. If this filter is not set, the scheduler might over-provision a host based on cores. For example, the virtual cores running on an instance may exceed the physical cores." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:362(para) -msgid "You can configure this filter to enable a fixed amount of vCPU overcommitment by using the configuration option in nova.conf. The default setting is:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:369(para) -msgid "With this setting, if 8 vCPUs are on a node, the scheduler allows instances up to 128 vCPU to be run on that node." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:372(para) -msgid "To disallow vCPU overcommitment set:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:375(para) -msgid "The Compute API always returns the actual number of CPU cores available on a compute node regardless of the value of the configuration key. As a result changes to the are not reflected via the command line clients or the dashboard. Changes to this configuration key are only taken into account internally in the scheduler." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:388(title) -msgid "NUMATopologyFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:389(para) -msgid "Filters hosts based on the NUMA topology that was specified for the instance through the use of flavor extra_specsin combination with the image properties, as described in detail in the related nova-spec document: Filter will try to match the exact NUMA cells of the instance to those of the host. It will consider the standard over-subscription limits each cell, and provide limits to the compute host accordingly." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:401(para) -msgid "If instance has no topology defined, it will be considered for any host. If instance has a topology defined, it will be considered only for NUMA capable hosts." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:409(title) -msgid "DifferentHostFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:410(para) -msgid "Schedules the instance on a different host from a set of instances. To take advantage of this filter, the requester must pass a scheduler hint, using different_host as the key and a list of instance UUIDs as the value. This filter is the opposite of the SameHostFilter. Using the command-line tool, use the --hint flag. For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:422(para) -msgid "With the API, use the os:scheduler_hints key. For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:428(title) -msgid "DiskFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:429(para) -msgid "Only schedules instances on hosts if there is sufficient disk space available for root and ephemeral storage." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:432(para) -msgid "You can configure this filter to enable a fixed amount of disk overcommitment by using the disk_allocation_ratio configuration option in the nova.conf configuration file. The default setting disables the possibility of the overcommitment and allows launching a VM only if there is a sufficient amount of disk space available on a host:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:442(para) -msgid "DiskFilter always considers the value of the property and not the one of the property of a hypervisor's statistics:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:463(para) -msgid "As it can be viewed from the command output above, the amount of the available disk space can be less than the amount of the free disk space. It happens because the property accounts for the virtual size rather than the actual size of images. If you use an image format that is sparse or copy on write so that each virtual instance does not require a 1:1 allocation of a virtual disk to a physical storage, it may be useful to allow the overcommitment of disk space." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:473(para) -msgid "To enable scheduling instances while overcommitting disk resources on the node, adjust the value of the disk_allocation_ratio configuration option to greater than 1.0:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:479(para) -msgid "If the value is set to >1, we recommend keeping track of the free disk space, as the value approaching 0 may result in the incorrect functioning of instances using it at the moment." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:486(title) -msgid "GroupAffinityFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:488(para) -msgid "This filter is deprecated in favor of ServerGroupAffinityFilter." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:492(para) -msgid "The GroupAffinityFilter ensures that an instance is scheduled on to a host from a set of group hosts. To take advantage of this filter, the requester must pass a scheduler hint, using group as the key and an arbitrary name as the value. Using the command-line tool, use the --hint flag. For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:500(replaceable) ./doc/config-reference/compute/section_compute-scheduler.xml:521(replaceable) ./doc/config-reference/compute/section_compute-scheduler.xml:761(replaceable) ./doc/config-reference/compute/section_compute-scheduler.xml:776(replaceable) -msgid "IMAGE_ID" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:501(para) -msgid "This filter should not be enabled at the same time as GroupAntiAffinityFilter or neither filter will work properly." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:507(title) -msgid "GroupAntiAffinityFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:509(para) -msgid "This filter is deprecated in favor of ServerGroupAntiAffinityFilter." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:513(para) -msgid "The GroupAntiAffinityFilter ensures that each instance in a group is on a different host. To take advantage of this filter, the requester must pass a scheduler hint, using group as the key and an arbitrary name as the value. Using the command-line tool, use the --hint flag. For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:522(para) -msgid "This filter should not be enabled at the same time as GroupAffinityFilter or neither filter will work properly." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:528(title) -msgid "ImagePropertiesFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:529(para) -msgid "Filters hosts based on properties defined on the instance's image. It passes hosts that can support the specified image properties contained in the instance. Properties include the architecture, hypervisor type, hypervisor version (for Xen hypervisor type only), and virtual machine mode." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:535(para) -msgid "For example, an instance might require a host that runs an ARM-based processor, and QEMU as the hypervisor. You can decorate an image with these properties by using:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:540(para) -msgid "The image properties that the filter checks for are:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:543(para) -msgid "architecture: describes the machine architecture required by the image. Examples are i686, x86_64, arm, and ppc64." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:550(para) -msgid "hypervisor_type: describes the hypervisor required by the image. Examples are xen, qemu, and xenapi." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:554(para) -msgid "qemu is used for both QEMU and KVM hypervisor types." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:558(para) -msgid "hypervisor_version_requires: describes the hypervisor version required by the image. The property is supported for Xen hypervisor type only. It can be used to enable support for multiple hypervisor versions, and to prevent instances with newer Xen tools from being provisioned on an older version of a hypervisor. If available, the property value is compared to the hypervisor version of the compute host." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:567(para) -msgid "To filter the hosts by the hypervisor version, add the hypervisor_version_requires property on the image as metadata and pass an operator and a required hypervisor version as its value:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:574(para) -msgid "vm_mode: describes the hypervisor application binary interface (ABI) required by the image. Examples are xen for Xen 3.0 paravirtual ABI, hvm for native ABI, uml for User Mode Linux paravirtual ABI, exe for container virt executable ABI." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:585(title) -msgid "IsolatedHostsFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:586(para) -msgid "Allows the admin to define a special (isolated) set of images and a special (isolated) set of hosts, such that the isolated images can only run on the isolated hosts, and the isolated hosts can only run isolated images. The flag restrict_isolated_hosts_to_isolated_images can be used to force isolated hosts to only run isolated images." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:594(para) -msgid "The admin must specify the isolated set of images and hosts in the nova.conf file using the isolated_hosts and isolated_images configuration options. For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:603(title) -msgid "IoOpsFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:604(para) -msgid "The IoOpsFilter filters hosts by concurrent I/O operations on it. Hosts with too many concurrent I/O operations will be filtered out. The option specifies the maximum number of I/O intensive instances allowed to run on a host. A host will be ignored by the scheduler if more than instances in build, resize, snapshot, migrate, rescue or unshelve task states are running on it." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:617(title) -msgid "JsonFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:622(para) -msgid "=" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:625(para) -msgid "<" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:628(para) -msgid ">" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:631(para) -msgid "in" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:634(para) -msgid "<=" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:637(para) -msgid ">=" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:640(para) -msgid "not" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:643(para) -msgid "or" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:646(para) -msgid "and" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:650(code) -msgid "$free_ram_mb" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:653(code) -msgid "$free_disk_mb" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:656(code) -msgid "$total_usable_ram_mb" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:659(code) -msgid "$vcpus_total" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:662(code) -msgid "$vcpus_used" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:618(para) -msgid "The JsonFilter allows a user to construct a custom filter by passing a scheduler hint in JSON format. The following operators are supported:The filter supports the following variables:Using the command-line tool, use the --hint flag:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:669(para) ./doc/config-reference/compute/section_compute-scheduler.xml:744(para) ./doc/config-reference/compute/section_compute-scheduler.xml:808(para) -msgid "With the API, use the os:scheduler_hints key:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:674(title) -msgid "MetricsFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:675(para) -msgid "Filters hosts based on meters weight_setting. Only hosts with the available meters are passed so that the metrics weigher will not fail due to these hosts." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:682(title) -msgid "NumInstancesFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:683(para) -msgid "Hosts that have more instances running than specified by the option are filtered out when this filter is in place." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:690(title) -msgid "PciPassthroughFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:691(para) -msgid "The filter schedules instances on a host if the host has devices that meet the device requests in the extra_specs attribute for the flavor." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:698(title) -msgid "RamFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:699(para) -msgid "Only schedules instances on hosts that have sufficient RAM available. If this filter is not set, the scheduler may over provision a host based on RAM (for example, the RAM allocated by virtual machine instances may exceed the physical RAM)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:704(para) -msgid "You can configure this filter to enable a fixed amount of RAM overcommitment by using the ram_allocation_ratio configuration option in nova.conf. The default setting is:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:711(para) -msgid "This setting enables 1.5GB instances to run on any compute node with 1GB of free RAM." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:715(title) -msgid "RetryFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:716(para) -msgid "Filters out hosts that have already been attempted for scheduling purposes. If the scheduler selects a host to respond to a service request, and the host fails to respond to the request, this filter prevents the scheduler from retrying that host for the service request." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:722(para) -msgid "This filter is only useful if the scheduler_max_attempts configuration option is set to a value greater than zero." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:726(para) -msgid "If there are multiple force hosts/nodes, this filter helps to retry on the force hosts/nodes if a VM fails to boot." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:731(title) -msgid "SameHostFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:732(para) -msgid "Schedules the instance on the same host as another instance in a set of instances. To take advantage of this filter, the requester must pass a scheduler hint, using same_host as the key and a list of instance UUIDs as the value. This filter is the opposite of the DifferentHostFilter. Using the command-line tool, use the --hint flag:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:749(title) -msgid "ServerGroupAffinityFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:750(para) -msgid "The ServerGroupAffinityFilter ensures that an instance is scheduled on to a host from a set of group hosts. To take advantage of this filter, the requester must create a server group with an affinity policy, and pass a scheduler hint, using group as the key and the server group UUID as the value. Using the command-line tool, use the --hint flag. For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:761(replaceable) ./doc/config-reference/compute/section_compute-scheduler.xml:776(replaceable) -msgid "SERVER_GROUP_UUID" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:764(title) -msgid "ServerGroupAntiAffinityFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:765(para) -msgid "The ServerGroupAntiAffinityFilter ensures that each instance in a group is on a different host. To take advantage of this filter, the requester must create a server group with an anti-affinity policy, and pass a scheduler hint, using group as the key and the server group UUID as the value. Using the command-line tool, use the --hint flag. For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:779(title) -msgid "SimpleCIDRAffinityFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:780(para) -msgid "Schedules the instance based on host IP subnet range. To take advantage of this filter, the requester must specify a range of valid IP address in CIDR format, by passing two scheduler hints:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:786(literal) -msgid "build_near_host_ip" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:788(para) -msgid "The first IP address in the subnet (for example, 192.168.1.1)" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:794(literal) -msgid "cidr" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:796(para) -msgid "The CIDR that corresponds to the subnet (for example, /24)" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:802(para) -msgid "Using the command-line tool, use the --hint flag. For example, to specify the IP subnet 192.168.1.1/24" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:813(title) -msgid "TrustedFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:814(para) -msgid "Filters hosts based on their trust. Only passes hosts that meet the trust requirements specified in the instance properties." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:821(title) -msgid "TypeAffinityFilter" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:822(para) -msgid "Dynamically limits hosts to one instance type. An instance can only be launched on a host, if no instance with different instances types are running on it, or if the host has no running instances at all." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:831(title) -msgid "Weights" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:833(para) -msgid "When resourcing instances, the filter scheduler filters and weights each host in the list of acceptable hosts. Each time the scheduler selects a host, it virtually consumes resources on it, and subsequent selections are adjusted accordingly. This process is useful when the customer asks for the same large amount of instances, because weight is computed for each requested instance." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:840(para) -msgid "All weights are normalized before being summed up; the host with the largest weight is given the highest priority." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:844(title) -msgid "Weighting hosts" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:853(para) -msgid "If cells are used, cells are weighted by the scheduler in the same manner as hosts." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:855(para) -msgid "Hosts and cells are weighted based on the following options in the /etc/nova/nova.conf file:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:859(caption) -msgid "Host weighting options" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:865(th) ./doc/config-reference/compute/section_compute-scheduler.xml:977(th) -msgid "Section" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:866(th) ./doc/config-reference/compute/section_compute-scheduler.xml:978(th) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:216(td) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:298(td) ./doc/config-reference/conf-changes/cinder.xml:759(td) ./doc/config-reference/conf-changes/manila.xml:323(td) ./doc/config-reference/conf-changes/keystone.xml:167(td) ./doc/config-reference/conf-changes/ironic.xml:271(td) ./doc/config-reference/conf-changes/sahara.xml:215(td) ./doc/config-reference/conf-changes/nova.xml:271(td) ./doc/config-reference/conf-changes/neutron.xml:279(td) ./doc/config-reference/conf-changes/ceilometer.xml:259(td) ./doc/config-reference/conf-changes/trove.xml:327(td) ./doc/config-reference/conf-changes/glance.xml:187(td) ./doc/config-reference/conf-changes/heat.xml:255(td) -msgid "Option" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:867(th) ./doc/config-reference/compute/section_compute-scheduler.xml:979(th) ./doc/config-reference/dashboard/section_dashboard-log-files.xml:22(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:314(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:432(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:862(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:980(th) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:206(th) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:219(td) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:301(td) ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:80(th) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:90(th) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:191(th) -msgid "Description" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:872(td) ./doc/config-reference/compute/section_compute-scheduler.xml:882(td) ./doc/config-reference/compute/section_compute-scheduler.xml:894(td) ./doc/config-reference/compute/section_compute-scheduler.xml:903(td) ./doc/config-reference/compute/section_nova-conf.xml:32(literal) -msgid "[DEFAULT]" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:873(literal) ./doc/config-reference/compute/section_compute-scheduler.xml:1000(literal) -msgid "ram_weight_multiplier" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:874(td) -msgid "By default, the scheduler spreads instances across all hosts evenly. Set the option to a negative number if you prefer stacking instead of spreading. Use a floating-point value." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:883(literal) -msgid "scheduler_host_subset_size" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:884(td) -msgid "New instances are scheduled on a host that is chosen randomly from a subset of the N best hosts. This property defines the subset size from which a host is chosen. A value of 1 chooses the first host returned by the weighting functions. This value must be at least 1. A value less than 1 is ignored, and 1 is used instead. Use an integer value." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:895(literal) ./doc/config-reference/compute/section_compute-scheduler.xml:1010(literal) -msgid "scheduler_weight_classes" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:897(literal) -msgid "nova.scheduler.weights.all_weighers" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:896(td) -msgid "Defaults to , which selects the RamWeigher and MetricsWeigher. Hosts are then weighted and sorted with the largest weight winning." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:904(literal) -msgid "io_ops_weight_multiplier" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:905(td) -msgid "Multiplier used for weighing host I/O operations. A negative value means a preference to choose light workload compute hosts." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:910(td) ./doc/config-reference/compute/section_compute-scheduler.xml:916(td) ./doc/config-reference/compute/section_compute-scheduler.xml:926(td) ./doc/config-reference/compute/section_compute-scheduler.xml:948(td) ./doc/config-reference/compute/section_nova-conf.xml:142(literal) -msgid "[metrics]" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:911(literal) -msgid "weight_multiplier" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:912(td) -msgid "Multiplier for weighting meters. Use a floating-point value." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:917(literal) -msgid "weight_setting" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:921(literal) -msgid "name1.value * 1.0 + name2.value * -1.0" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:918(td) -msgid "Determines how meters are weighted. Use a comma-separated list of metricName=ratio. For example: \"name1=1.0, name2=-1.0\" results in: " -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:927(literal) -msgid "required" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:930(para) -msgid "TrueRaises an exception. To avoid the raised exception, you should use the scheduler filter MetricFilter to filter out hosts with unavailable meters." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:939(para) -msgid "FalseTreated as a negative factor in the weighting process (uses the option)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:928(para) -msgid "Specifies how to treat unavailable meters:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:949(literal) -msgid "weight_of_unavailable" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:950(td) -msgid "If is set to False, and any one of the meters set by is unavailable, the value is returned to the scheduler." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:959(para) ./doc/config-reference/compute/section_compute-scheduler.xml:1019(para) -msgid "For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:971(caption) -msgid "Cell weighting options" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:984(td) ./doc/config-reference/compute/section_compute-scheduler.xml:992(td) ./doc/config-reference/compute/section_compute-scheduler.xml:999(td) ./doc/config-reference/compute/section_compute-scheduler.xml:1009(td) ./doc/config-reference/compute/section_nova-conf.xml:49(literal) -msgid "[cells]" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:985(literal) -msgid "mute_weight_multiplier" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:986(td) -msgid "Multiplier to weight mute children (hosts which have not sent capacity or capacity updates for some time). Use a negative, floating-point value." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:993(literal) -msgid "offset_weight_multiplier" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:994(td) -msgid "Multiplier to weight cells, so you can specify a preferred cell. Use a floating point value." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1001(td) -msgid "By default, the scheduler spreads instances across all cells evenly. Set the option to a negative number if you prefer stacking instead of spreading. Use a floating-point value." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1012(literal) -msgid "nova.cells.weights.all_weighers" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1011(td) -msgid "Defaults to , which maps to all cell weighers included with Compute. Cells are then weighted and sorted with the largest weight winning." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1027(title) -msgid "Chance scheduler" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1029(para) -msgid "As an administrator, you work with the filter scheduler. However, the Compute service also uses the Chance Scheduler, nova.scheduler.chance.ChanceScheduler, which randomly selects from lists of filtered hosts." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1037(title) -msgid "Utilization aware scheduling" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1039(para) -msgid "It is possible to schedule VMs using advanced scheduling decisions. These decisions are made based on enhanced usage statistics encompassing data like memory cache utilization, memory bandwidth utilization, or network bandwidth utilization. This is disabled by default. The administrator can configure how the metrics are weighted in the configuration file by using the weight_setting configuration option in the nova.conf configuration file. For example to configure metric1 with ratio1 and metric2 with ratio2:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1057(title) ./doc/config-reference/compute/section_hypervisor_vmware.xml:1046(title) -msgid "Configuration reference" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-scheduler.xml:1058(para) -msgid "To customize the Compute scheduler, use the configuration option settings documented in ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-backing-storage.xml:6(title) -msgid "Configure Compute backing storage" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-backing-storage.xml:7(para) -msgid "Backing Storage is the storage used to provide the expanded operating system image, and any ephemeral storage. Inside the virtual machine, this is normally presented as two virtual hard disks (for example, /dev/vda and /dev/vdb respectively). However, inside OpenStack, this can be derived from one of three methods: LVM, QCOW or RAW, chosen using the images_type option in nova.conf on the compute node." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-backing-storage.xml:17(para) -msgid "QCOW is the default backing store. It uses a copy-on-write philosophy to delay allocation of storage until it is actually needed. This means that the space required for the backing of an image can be significantly less on the real disk than what seems available in the virtual machine operating system." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-backing-storage.xml:24(para) -msgid "RAW creates files without any sort of file formatting, effectively creating files with the plain binary one would normally see on a real disk. This can increase performance, but means that the entire size of the virtual disk is reserved on the physical disk." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-backing-storage.xml:30(para) -msgid "Local LVM volumes can also be used. Set images_volume_group = nova_local where nova_local is the name of the LVM group you have created." -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:42(None) -msgid "@@image: '../../common/figures/vmware-nova-driver-architecture.jpg'; md5=b00e346b00fe7a1ccb43e749d9f35720" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:11(title) -msgid "VMware vSphere" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:14(title) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:11(title) -msgid "Introduction" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:15(para) -msgid "OpenStack Compute supports the VMware vSphere product family and enables access to advanced features such as vMotion, High Availability, and Dynamic Resource Scheduling (DRS)." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:18(para) -msgid "This section describes how to configure VMware-based virtual machine images for launch. vSphere versions 4.1 and later are supported." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:21(para) -msgid "The VMware vCenter driver enables the nova-compute service to communicate with a VMware vCenter server that manages one or more ESX host clusters. The driver aggregates the ESX hosts in each cluster to present one large hypervisor entity for each cluster to the Compute scheduler. Because individual ESX hosts are not exposed to the scheduler, Compute schedules to the granularity of clusters and vCenter uses DRS to select the actual ESX host within the cluster. When a virtual machine makes its way into a vCenter cluster, it can use all vSphere features." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:31(para) -msgid "The following sections describe how to configure the VMware vCenter driver." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:35(title) -msgid "High-level architecture" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:36(para) -msgid "The following diagram shows a high-level view of the VMware driver architecture:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:39(title) -msgid "VMware driver architecture" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:46(para) -msgid "As the figure shows, the OpenStack Compute Scheduler sees three hypervisors that each correspond to a cluster in vCenter. Nova-compute contains the VMware driver. You can run with multiple nova-compute services. While Compute schedules at the granularity of a cluster, the VMware driver inside nova-compute interacts with the vCenter APIs to select an appropriate ESX host within the cluster. Internally, vCenter uses DRS for placement." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:54(para) -msgid "The VMware vCenter driver also interacts with the OpenStack Image service to copy VMDK images from the Image service back-end store. The dotted line in the figure represents VMDK images being copied from the OpenStack Image service to the vSphere data store. VMDK images are cached in the data store so the copy operation is only required the first time that the VMDK image is used." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:61(para) -msgid "After OpenStack boots a VM into a vSphere cluster, the VM becomes visible in vCenter and can access vSphere advanced features. At the same time, the VM is visible in the OpenStack dashboard and you can manage it as you would any other OpenStack VM. You can perform advanced vSphere operations in vCenter while you configure OpenStack resources such as VMs through the OpenStack dashboard." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:68(para) -msgid "The figure does not show how networking fits into the architecture. Both nova-network and the OpenStack Networking Service are supported. For details, see ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:74(title) -msgid "Configuration overview" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:75(para) -msgid "To get started with the VMware vCenter driver, complete the following high-level steps:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:79(para) -msgid "Configure vCenter. See ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:82(para) -msgid "Configure the VMware vCenter driver in the nova.conf file. See ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:86(para) -msgid "Load desired VMDK images into the OpenStack Image Service. See ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:90(para) -msgid "Configure networking with either nova-network or the OpenStack Networking Service. See ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:96(title) -msgid "Prerequisites and limitations" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:97(para) -msgid "Use the following list to prepare a vSphere environment that runs with the VMware vCenter driver:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:101(para) -msgid "Copying VMDK files (vSphere 5.1 only). In vSphere 5.1, copying large image files (for example, 12GB and greater) from Glance can take a long time. To improve performance, VMware recommends that you upgrade to VMware vCenter Server 5.1 Update 1 or later. For more information, see the Release Notes." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:109(para) -msgid "DRS. For any cluster that contains multiple ESX hosts, enable DRS and enable fully automated placement." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:114(para) -msgid "Shared storage. Only shared storage is supported and data stores must be shared among all hosts in a cluster. It is recommended to remove data stores not intended for OpenStack from clusters being configured for OpenStack." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:121(para) -msgid "Clusters and data stores. Do not use OpenStack clusters and data stores for other purposes. If you do, OpenStack displays incorrect usage information." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:127(para) -msgid "Networking. The networking configuration depends on the desired networking model. See ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:132(para) -msgid "Security groups. If you use the VMware driver with OpenStack Networking and the NSX plug-in, security groups are supported. If you use nova-network, security groups are not supported." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:137(para) -msgid "The NSX plug-in is the only plug-in that is validated for vSphere." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:141(para) -msgid "VNC. The port range 5900 - 6105 (inclusive) is automatically enabled for VNC connections on every ESX host in all clusters under OpenStack control. For more information about using a VNC client to connect to virtual machine, see http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1246." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:146(para) -msgid "In addition to the default VNC port numbers (5900 to 6000) specified in the above document, the following ports are also used: 6101, 6102, and 6105." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:149(para) -msgid "You must modify the ESXi firewall configuration to allow the VNC ports. Additionally, for the firewall modifications to persist after a reboot, you must create a custom vSphere Installation Bundle (VIB) which is then installed onto the running ESXi host or added to a custom image profile used to install ESXi hosts. For details about how to create a VIB for persisting the firewall configuration modifications, see http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2007381." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:158(para) -msgid "The VIB can be downloaded from https://github.com/openstack-vmwareapi-team/Tools." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:163(para) -msgid "To use multiple vCenter installations with OpenStack, each vCenter must be assigned to a separate availability zone. This is required as the OpenStack Block Storage VMDK driver does not currently work across multiple vCenter installations." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:172(title) -msgid "VMware vCenter service account" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:173(para) -msgid "OpenStack integration requires a vCenter service account with the following minimum permissions. Apply the permissions to the Datacenter root object, and select the Propagate to Child Objects option." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:177(caption) -msgid "vCenter permissions tree" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:184(td) -msgid "All Privileges" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:191(td) -msgid "Datastore" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:198(td) -msgid "Allocate space" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:204(td) -msgid "Browse datastore" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:210(td) -msgid "Low level file operation" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:216(td) -msgid "Remove file" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:221(td) -msgid "Extension" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:228(td) -msgid "Register extension" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:233(td) -msgid "Folder" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:240(td) -msgid "Create folder" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:245(td) -msgid "Host" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:252(td) ./doc/config-reference/compute/section_hypervisor_vmware.xml:318(td) ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:14(title) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:33(title) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:46(title) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:50(title) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:85(title) -msgid "Configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:259(td) -msgid "Maintenance" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:265(td) ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:20(title) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:13(title) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:184(title) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:159(title) -msgid "Network configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:271(td) -msgid "Storage partition configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:275(td) -msgid "Network" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:282(td) -msgid "Assign network" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:287(td) -msgid "Resource" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:294(td) -msgid "Assign virtual machine to resource pool" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:300(td) -msgid "Migrate powered off virtual machine" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:306(td) -msgid "Migrate powered on virtual machine" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:311(td) -msgid "Virtual Machine" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:325(td) -msgid "Add existing disk" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:331(td) -msgid "Add new disk" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:337(td) -msgid "Add or remove device" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:343(td) -msgid "Advanced" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:349(td) -msgid "CPU count" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:355(td) -msgid "Disk change tracking" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:361(td) -msgid "Host USB device" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:367(td) -msgid "Memory" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:373(td) -msgid "Raw device" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:379(td) -msgid "Remove disk" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:385(td) -msgid "Rename" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:391(td) -msgid "Swapfile placement" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:396(td) -msgid "Interaction" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:403(td) -msgid "Configure CD media" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:409(td) -msgid "Power Off" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:415(td) -msgid "Power On" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:421(td) -msgid "Reset" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:427(td) -msgid "Suspend" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:432(td) -msgid "Inventory" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:439(td) -msgid "Create from existing" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:445(td) -msgid "Create new" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:451(td) -msgid "Move" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:457(td) -msgid "Remove" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:463(td) -msgid "Unregister" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:468(td) -msgid "Provisioning" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:475(td) -msgid "Clone virtual machine" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:481(td) -msgid "Customize" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:486(td) -msgid "Sessions" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:493(td) -msgid "Validate session" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:499(td) -msgid "View and stop sessions" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:504(td) -msgid "Snapshot management" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:511(td) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:71(para) -msgid "Create snapshot" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:517(td) -msgid "Remove snapshot" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:521(td) -msgid "vApp" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:528(td) -msgid "Export" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:534(td) -msgid "Import" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:541(title) -msgid "VMware vCenter driver" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:542(para) -msgid "Use the VMware vCenter driver (VMwareVCDriver) to connect OpenStack Compute with vCenter. This recommended configuration enables access through vCenter to advanced vSphere features like vMotion, High Availability, and Dynamic Resource Scheduling (DRS)." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:548(title) -msgid "VMwareVCDriver configuration options" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:549(para) -msgid "When you use the VMwareVCDriver (vCenter versions 5.1 and later) with OpenStack Compute, add the following VMware-specific configuration options to the nova.conf file:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:565(para) -msgid "vSphere vCenter versions 5.0 and earlier: You must specify the location of the WSDL files by adding the wsdl_location=http://127.0.0.1:8080/vmware/SDK/wsdl/vim25/vimService.wsdl setting to the above configuration. For more information, see vSphere 5.0 and earlier additional set up." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:574(para) -msgid "Clusters: The vCenter driver can support multiple clusters. To use more than one cluster, simply add multiple lines in nova.conf with the appropriate cluster name. Clusters and data stores used by the vCenter driver should not contain any VMs other than those created by the driver." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:583(para) -msgid "Data stores: The setting specifies the data stores to use with Compute. For example, selects all the data stores that have a name starting with \"nas\". If this line is omitted, Compute uses the first data store returned by the vSphere API. It is recommended not to use this field and instead remove data stores that are not intended for OpenStack." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:593(para) -msgid "Reserved host memory: The option value is 512MB by default. However, VMware recommends that you set this option to 0MB because the vCenter driver reports the effective memory available to the virtual machines." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:601(para) -msgid "The vCenter driver generates instance name by instance ID. Instance name template is ignored." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:605(para) -msgid "The minimum supported vCenter version is 5.1.0. In OpenStack Liberty release this will be logged as a warning. In OpenStack \"M\" release this will be enforced." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:613(para) -msgid "A nova-compute service can control one or more clusters containing multiple ESX hosts, making nova-compute a critical service from a high availability perspective. Because the host that runs nova-compute can fail while the vCenter and ESX still run, you must protect the nova-compute service against host failures." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:622(para) -msgid "Many nova.conf options are relevant to libvirt but do not apply to this driver." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:625(para) -msgid "You must complete additional configuration for environments that use vSphere 5.0 and earlier. See ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:630(title) -msgid "Images with VMware vSphere" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:631(para) -msgid "The vCenter driver supports images in the VMDK format. Disks in this format can be obtained from VMware Fusion or from an ESX environment. It is also possible to convert other formats, such as qcow2, to the VMDK format using the utility. After a VMDK disk is available, load it into the OpenStack Image service. Then, you can use it with the VMware vCenter driver. The following sections provide additional details on the supported disks and the commands used for conversion and upload." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:641(title) -msgid "Supported image types" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:642(para) -msgid "Upload images to the OpenStack Image service in VMDK format. The following VMDK disk types are supported:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:646(para) -msgid "VMFS Flat Disks (includes thin, thick, zeroedthick, and eagerzeroedthick). Note that once a VMFS thin disk is exported from VMFS to a non-VMFS location, like the OpenStack Image service, it becomes a preallocated flat disk. This impacts the transfer time from the OpenStack Image service to the data store when the full preallocated flat disk, rather than the thin disk, must be transferred." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:656(para) -msgid "Monolithic Sparse disks. Sparse disks get imported from the OpenStack Image service into ESX as thin provisioned disks. Monolithic Sparse disks can be obtained from VMware Fusion or can be created by converting from other virtual disk formats using the qemu-img utility." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:665(para) -msgid "The following table shows the property that applies to each of the supported VMDK disk types:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:669(caption) -msgid "OpenStack Image service disk type settings" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:672(th) -msgid "vmware_disktype property" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:673(th) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:51(title) -msgid "VMDK disk type" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:678(td) -msgid "sparse" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:680(para) -msgid "Monolithic Sparse" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:684(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:89(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:91(td) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:641(literal) -msgid "thin" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:686(para) -msgid "VMFS flat, thin provisioned" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:690(td) -msgid "preallocated (default)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:692(para) -msgid "VMFS flat, thick/zeroedthick/eagerzeroedthick" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:698(para) -msgid "The property is set when an image is loaded into the OpenStack Image service. For example, the following command creates a Monolithic Sparse image by setting to sparse:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:707(para) -msgid "Specifying thin does not provide any advantage over preallocated with the current version of the driver. Future versions might restore the thin properties of the disk after it is downloaded to a vSphere data store." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:714(title) -msgid "Convert and load images" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:715(para) -msgid "Using the qemu-img utility, disk images in several formats (such as, qcow2) can be converted to the VMDK format." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:718(para) -msgid "For example, the following command can be used to convert a qcow2 Ubuntu Trusty cloud image:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:723(para) -msgid "VMDK disks converted through qemu-img are always monolithic sparse VMDK disks with an IDE adapter type. Using the previous example of the Ubuntu Trusty image after the qemu-img conversion, the command to upload the VMDK disk should be something like:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:734(para) -msgid "Note that the is set to sparse and the vmware_adaptertype is set to ide in the previous command." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:737(para) -msgid "If the image did not come from the qemu-img utility, the vmware_disktype and vmware_adaptertype might be different. To determine the image adapter type from an image file, use the following command and look for the line:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:744(para) -msgid "Assuming a preallocated disk type and an iSCSI lsiLogic adapter type, the following command uploads the VMDK disk:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:752(para) -msgid "Currently, OS boot VMDK disks with an IDE adapter type cannot be attached to a virtual SCSI controller and likewise disks with one of the SCSI adapter types (such as, busLogic, lsiLogic, lsiLogicsas, paraVirtual) cannot be attached to the IDE controller. Therefore, as the previous examples show, it is important to set the property correctly. The default adapter type is lsiLogic, which is SCSI, so you can omit the property if you are certain that the image adapter type is lsiLogic." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:764(title) -msgid "Tag VMware images" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:765(para) -msgid "In a mixed hypervisor environment, OpenStack Compute uses the tag to match images to the correct hypervisor type. For VMware images, set the hypervisor type to vmware. Other valid hypervisor types include: hyperv, ironic, lxc, qemu, uml, and xen. Note that qemu is used for both QEMU and KVM hypervisor types." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:783(title) -msgid "Optimize images" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:784(para) -msgid "Monolithic Sparse disks are considerably faster to download but have the overhead of an additional conversion step. When imported into ESX, sparse disks get converted to VMFS flat thin provisioned disks. The download and conversion steps only affect the first launched instance that uses the sparse disk image. The converted disk image is cached, so subsequent instances that use this disk image can simply use the cached version." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:792(para) -msgid "To avoid the conversion step (at the cost of longer download times) consider converting sparse disks to thin provisioned or preallocated disks before loading them into the OpenStack Image service." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:796(para) -msgid "Use one of the following tools to pre-convert sparse disks." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:800(emphasis) -msgid "vSphere CLI tools" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:803(para) -msgid "Sometimes called the remote CLI or rCLI." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:804(para) -msgid "Assuming that the sparse disk is made available on a data store accessible by an ESX host, the following command converts it to preallocated format:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:808(para) -msgid "Note that the vifs tool from the same CLI package can be used to upload the disk to be converted. The vifs tool can also be used to download the converted disk if necessary." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:814(emphasis) -msgid "vmkfstools directly on the ESX host" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:816(para) -msgid "If the SSH service is enabled on an ESX host, the sparse disk can be uploaded to the ESX data store through scp and the vmkfstools local to the ESX host can use used to perform the conversion. After you log in to the host through ssh, run this command:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:823(emphasis) -msgid "vmware-vdiskmanager" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:824(para) -msgid "vmware-vdiskmanager is a utility that comes bundled with VMware Fusion and VMware Workstation. The following example converts a sparse disk to preallocated format:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:831(para) -msgid "In the previous cases, the converted vmdk is actually a pair of files:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:833(para) -msgid "The descriptor file converted.vmdk." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:837(para) -msgid "The actual virtual disk data file converted-flat.vmdk." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:840(para) -msgid "The file to be uploaded to the OpenStack Image Service is converted-flat.vmdk." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:845(title) -msgid "Image handling" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:846(para) -msgid "The ESX hypervisor requires a copy of the VMDK file in order to boot up a virtual machine. As a result, the vCenter OpenStack Compute driver must download the VMDK via HTTP from the OpenStack Image service to a data store that is visible to the hypervisor. To optimize this process, the first time a VMDK file is used, it gets cached in the data store. A cached image is stored in a folder named after the image ID. Subsequent virtual machines that need the VMDK use the cached version and don't have to copy the file again from the OpenStack Image service." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:856(para) -msgid "Even with a cached VMDK, there is still a copy operation from the cache location to the hypervisor file directory in the shared data store. To avoid this copy, boot the image in linked_clone mode. To learn how to enable this mode, see ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:860(para) -msgid "You can also use the vmware_linked_clone property in the OpenStack Image service to override the linked_clone mode on a per-image basis." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:864(para) -msgid "If spawning a virtual machine image from ISO with a VMDK disk, the image is created and attached to the virtual machine as a blank disk. In that case vmware_linked_clone property for the image is just ignored." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:870(para) -msgid "If multiple compute nodes are running on the same host, or have a shared file system, you can enable them to use the same cache folder on the back-end data store. To configure this action, set the option in the nova.conf file. Its value stands for the name prefix of the folder where cached images are stored." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:876(para) -msgid "This can take effect only if compute nodes are running on the same host, or have a shared file system." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:878(para) -msgid "You can automatically purge unused images after a specified period of time. To configure this action, set these options in the DEFAULT section in the nova.conf file:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:885(para) -msgid "Set this option to to specify that unused images should be removed after the duration specified in the option. The default is ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:892(para) -msgid "Specifies the duration in seconds after which an unused image is purged from the cache. The default is (24 hours)." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:900(title) -msgid "Networking with VMware vSphere" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:901(para) -msgid "The VMware driver supports networking with the nova-network service or the OpenStack Networking Service. Depending on your installation, complete these configuration steps before you provision VMs:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:907(para) -msgid " The nova-network service with the FlatManager or FlatDHCPManager. Create a port group with the same name as the flat_network_bridge value in the nova.conf file. The default value is br100. If you specify another value, the new value must be a valid Linux bridge identifier that adheres to Linux bridge naming conventions." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:916(para) -msgid "All VM NICs are attached to this port group." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:917(para) -msgid "Ensure that the flat interface of the node that runs the nova-network service has a path to this network." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:921(para) -msgid "When configuring the port binding for this port group in vCenter, specify ephemeral for the port binding type. For more information, see Choosing a port binding type in ESX/ESXi in the VMware Knowledge Base." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:929(para) -msgid "The nova-network service with the VlanManager. Set the vlan_interface configuration option to match the ESX host interface that handles VLAN-tagged VM traffic." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:934(para) -msgid "OpenStack Compute automatically creates the corresponding port groups." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:938(para) -msgid "If you are using the OpenStack Networking Service: Before provisioning VMs, create a port group with the same name as the vmware.integration_bridge value in nova.conf (default is br-int). All VM NICs are attached to this port group for management by the OpenStack Networking plug-in." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:949(title) -msgid "Volumes with VMware vSphere" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:950(para) -msgid "The VMware driver supports attaching volumes from the OpenStack Block Storage service. The VMware VMDK driver for OpenStack Block Storage is recommended and should be used for managing volumes based on vSphere data stores. For more information about the VMware VMDK driver, see VMware VMDK Driver. Also an iSCSI volume driver provides limited support and can be used only for attachments." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:959(title) -msgid "vSphere 5.0 and earlier additional set up" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:960(para) -msgid "Users of vSphere 5.0 or earlier must host their WSDL files locally. These steps are applicable for vCenter 5.0 or ESXi 5.0 and you can either mirror the WSDL from the vCenter or ESXi server that you intend to use or you can download the SDK directly from VMware. These workaround steps fix a known issue with the WSDL that was resolved in later versions." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:966(para) -msgid "When setting the VMwareVCDriver configuration options, you must include the wsdl_location option. For more information, see VMwareVCDriver configuration options above." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:972(title) -msgid "To mirror WSDL from vCenter (or ESXi)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:974(para) -msgid "Set the VMWAREAPI_IP shell variable to the IP address for your vCenter or ESXi host from where you plan to mirror files. For example:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:980(para) -msgid "Create a local file system directory to hold the WSDL files:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:985(para) -msgid "Change into the new directory. " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:989(para) -msgid "Use your OS-specific tools to install a command-line tool that can download files like ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:994(para) -msgid "Download the files to the local file cache:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:1004(para) -msgid "Because the reflect-types.xsd and reflect-messagetypes.xsd files do not fetch properly, you must stub out these files. Use the following XML listing to replace the missing file content. The XML parser underneath Python can be very particular and if you put a space in the wrong place, it can break the parser. Copy the following contents and formatting carefully." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:1021(para) -msgid "Now that the files are locally present, tell the driver to look for the SOAP service WSDLs in the local file system and not on the remote vSphere server. Add the following setting to the nova.conf file for your nova-compute node:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:1031(para) -msgid "Alternatively, download the version appropriate SDK from http://www.vmware.com/support/developer/vc-sdk/ and copy it to the /opt/stack/vmware file. Make sure that the WSDL is available, in for example /opt/stack/vmware/SDK/wsdl/vim25/vimService.wsdl. You must point nova.conf to fetch this WSDL file from the local file system by using a URL." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:1038(para) -msgid "When using the VMwareVCDriver (vCenter) with OpenStack Compute with vSphere version 5.0 or earlier, nova.conf must include the following extra config option:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_vmware.xml:1047(para) -msgid "To customize the VMware driver, use the configuration option settings documented in ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:12(title) -msgid "Hyper-V virtualization platform" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:21(emphasis) -msgid "Windows Server 2008 R2" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:22(para) -msgid "Both Server and Server Core with the Hyper-V role enabled (Shared Nothing Live migration is not supported using 2008 R2)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:26(emphasis) -msgid "Windows Server 2012 and Windows Server 2012 R2" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:27(para) -msgid "Server and Core (with the Hyper-V role enabled), and Hyper-V Server" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:13(para) -msgid "It is possible to use Hyper-V as a compute node within an OpenStack Deployment. The nova-compute service runs as \"openstack-compute,\" a 32-bit service directly upon the Windows platform with the Hyper-V role enabled. The necessary Python components as well as the nova-compute service are installed directly onto the Windows platform. Windows Clustering Services are not needed for functionality within the OpenStack infrastructure. The use of the Windows Server 2012 platform is recommend for the best experience and is the platform for active development. The following Windows platforms have been tested as compute nodes:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:32(title) -msgid "Hyper-V configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:33(para) -msgid "The only OpenStack services required on a Hyper-V node are nova-compute and neutron-hyperv-agent. Regarding the resources needed for this host you have to consider that Hyper-V will require 16GB - 20GB of disk space for the OS itself, including updates. Two NICs are required, one connected to the management network and one to the guest data network." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:41(para) -msgid "The following sections discuss how to prepare the Windows Hyper-V node for operation as an OpenStack compute node. Unless stated otherwise, any configuration information should work for the Windows 2008 R2, 2012 and 2012 R2 platforms." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:47(title) -msgid "Local storage considerations" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:48(para) -msgid "The Hyper-V compute node needs to have ample storage for storing the virtual machine images running on the compute nodes. You may use a single volume for all, or partition it into an OS volume and VM volume. It is up to the individual deploying to decide." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:54(title) -msgid "Configure NTP" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:55(para) -msgid "Network time services must be configured to ensure proper operation of the OpenStack nodes. To set network time on your Windows host you must run the following commands:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:59(para) -msgid "Keep in mind that the node will have to be time synchronized with the other nodes of your OpenStack environment, so it is important to use the same NTP server. Note that in case of an Active Directory environment, you may do this only for the AD Domain Controller." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:65(title) -msgid "Configure Hyper-V virtual switching" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:66(para) -msgid "Information regarding the Hyper-V virtual Switch can be located here: http://technet.microsoft.com/en-us/library/hh831823.aspx" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:70(para) -msgid "To quickly enable an interface to be used as a Virtual Interface the following PowerShell may be used:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:73(replaceable) -msgid "YOUR_BRIDGE_NAME" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:74(para) -msgid "It is very important to make sure that when you are using an Hyper-V node with only 1 NIC the -AllowManagementOS option is set on True, otherwise you will lose connectivity to the Hyper-V node." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:79(title) -msgid "Enable iSCSI initiator service" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:80(para) -msgid "To prepare the Hyper-V node to be able to attach to volumes provided by cinder you must first make sure the Windows iSCSI initiator service is running and started automatically." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:87(title) -msgid "Configure shared nothing live migration" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:88(para) -msgid "Detailed information on the configuration of live migration can be found here: http://technet.microsoft.com/en-us/library/jj134199.aspx" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:91(para) -msgid "The following outlines the steps of shared nothing live migration." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:94(para) -msgid "The target hosts ensures that live migration is enabled and properly configured in Hyper-V." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:98(para) -msgid "The target hosts checks if the image to be migrated requires a base VHD and pulls it from the Image service if not already available on the target host." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:104(para) -msgid "The source hosts ensures that live migration is enabled and properly configured in Hyper-V." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:108(para) -msgid "The source hosts initiates a Hyper-V live migration." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:111(para) -msgid "The source hosts communicates to the manager the outcome of the operation." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:119(literal) -msgid "instances_shared_storage = False" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:120(para) -msgid "This needed to support \"shared nothing\" Hyper-V live migrations. It is used in nova/compute/manager.py" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:124(literal) -msgid "limit_cpu_features = True" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:125(para) -msgid "This flag is needed to support live migration to hosts with different CPU features. This flag is checked during instance creation in order to limit the CPU features used by the VM." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:131(literal) -msgid "instances_path = DRIVELETTER:\\PATH\\TO\\YOUR\\INSTANCES" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:115(para) -msgid "The following two configuration options/flags are needed in order to support Hyper-V live migration and must be added to your nova.conf on the Hyper-V compute node:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:135(para) -msgid "Additional Requirements:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:138(para) -msgid "Hyper-V 2012 R2 or Windows Server 2012 R2 with Hyper-V role enabled" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:141(para) -msgid "A Windows domain controller with the Hyper-V compute nodes as domain members" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:145(para) -msgid "The instances_path command-line option/flag needs to be the same on all hosts." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:149(para) -msgid "The openstack-compute service deployed with the setup must run with domain credentials. You can set the service credentials with:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:157(emphasis) -msgid "How to setup live migration on Hyper-V" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:158(para) -msgid "To enable 'shared nothing live' migration, run the 3 PowerShell instructions below on each Hyper-V host:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:161(replaceable) ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:359(replaceable) ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:374(replaceable) ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:384(replaceable) ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:400(replaceable) ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:108(replaceable) ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:109(replaceable) ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:111(replaceable) -msgid "IP_ADDRESS" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:164(para) -msgid "Please replace the IP_ADDRESS with the address of the interface which will provide live migration." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:166(emphasis) -msgid "Additional Reading" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:167(para) -msgid "Here's an article that clarifies the various live migration options in Hyper-V:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:171(link) -msgid "http://ariessysadmin.blogspot.ro/2012/04/hyper-v-live-migration-of-windows.html" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:176(title) -msgid "Install nova-compute using OpenStack Hyper-V installer" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:178(para) -msgid "In case you want to avoid all the manual setup, you can use Cloudbase Solutions' installer. You can find it here:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:182(link) -msgid "https://www.cloudbase.it/downloads/HyperVNovaCompute_Beta.msi" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:184(para) -msgid "It installs an independent Python environment, in order to avoid conflicts with existing applications, generates dynamically a nova.conf file based on the parameters provided by you." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:186(para) -msgid "The installer can also be used for an automated and unattended mode for deployments on a massive number of servers. More details about how to use the installer and its features can be found here:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:189(link) -msgid "https://www.cloudbase.it" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:192(title) ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:53(title) ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:88(title) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:69(title) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:23(title) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:10(title) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:82(title) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:25(title) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:120(title) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:11(title) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:11(title) -msgid "Requirements" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:194(title) -msgid "Python" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:195(para) -msgid "Python 2.7 32bit must be installed as most of the libraries are not working properly on the 64bit version." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:198(title) -msgid "Setting up Python prerequisites" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:200(para) -msgid "Download and then install it using the MSI installer from here:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:203(link) -msgid "http://www.python.org/ftp/python/2.7.3/python-2.7.3.msi" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:212(para) -msgid "Make sure that the Python and Python\\Scripts paths are set up in the PATH environment variable." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:222(title) -msgid "Python dependencies" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:223(para) -msgid "The following packages need to be downloaded and manually installed:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:227(package) -msgid "setuptools" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:230(link) -msgid "http://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11.win32-py2.7.exel" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:235(package) -msgid "pip" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:238(link) -msgid "http://pip.readthedocs.org/en/latest/installing.html" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:243(package) -msgid "PyMySQL" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:245(link) -msgid "http://codegood.com/download/10/" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:250(package) -msgid "PyWin32" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:253(link) -msgid "http://sourceforge.net/projects/pywin32/files/pywin32/Build%20217/pywin32-217.win32-py2.7.exe" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:258(package) -msgid "Greenlet" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:261(link) -msgid "http://www.lfd.uci.edu/~gohlke/pythonlibs/#greenlet" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:266(package) -msgid "PyCryto" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:269(link) -msgid "http://www.voidspace.org.uk/downloads/pycrypto26/pycrypto-2.6.win32-py2.7.exe" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:274(para) -msgid "The following packages must be installed with pip:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:277(package) -msgid "ecdsa" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:280(package) -msgid "amqp" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:283(package) -msgid "wmi" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:291(title) -msgid "Other dependencies" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:292(para) -msgid "qemu-img is required for some of the image related operations. You can get it from here: http://qemu.weilnetz.de/. You must make sure that the qemu-img path is set in the PATH environment variable." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:297(para) -msgid "Some Python packages need to be compiled, so you may use MinGW or Visual Studio. You can get MinGW from here: http://sourceforge.net/projects/mingw/. You must configure which compiler to be used for this purpose by using the distutils.cfg file in $Python27\\Lib\\distutils, which can contain:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:306(para) -msgid "As a last step for setting up MinGW, make sure that the MinGW binaries' directories are set up in PATH." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:311(title) -msgid "Install Nova-compute" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:313(title) -msgid "Download the nova code" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:316(para) -msgid "Use Git to download the necessary source code. The installer to run Git on Windows can be downloaded here:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:319(link) ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:88(link) -msgid "https://github.com/msysgit/msysgit/releases/download/Git-1.9.2-preview20140411/Git-1.9.2-preview20140411.exe" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:323(para) -msgid "Download the installer. Once the download is complete, run the installer and follow the prompts in the installation wizard. The default should be acceptable for the needs of the document." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:332(para) -msgid "Run the following to clone the Nova code." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:338(title) -msgid "Install nova-compute service" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:339(para) -msgid "To install Nova-compute, run:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:347(title) -msgid "Configure nova-compute" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:348(para) -msgid "The nova.conf file must be placed in C:\\etc\\nova for running OpenStack on Hyper-V. Below is a sample nova.conf for Windows:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:389(replaceable) -msgid "IP_ADDRESS:35357" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:401(para) -msgid " contains a reference of all options for hyper-v." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:405(title) -msgid "Prepare images for use with Hyper-V" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:406(para) -msgid "Hyper-V currently supports only the VHD and VHDX file format for virtual machine instances. Detailed instructions for installing virtual machines on Hyper-V can be found here:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:410(link) -msgid "http://technet.microsoft.com/en-us/library/cc772480.aspx" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:411(para) -msgid "Once you have successfully created a virtual machine, you can then upload the image to glance using the native glance-client:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:413(replaceable) -msgid "VM_IMAGE_NAME" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:415(para) -msgid "VHD and VHDX files sizes can be bigger than their maximum internal size, as such you need to boot instances using a flavor with a slightly bigger disk size than the internal size of the disk file. To create VHDs, use the following PowerShell cmdlet:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:417(replaceable) -msgid "DISK_NAME.vhd" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:417(replaceable) -msgid "VHD_SIZE" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:421(title) -msgid "Run Compute with Hyper-V" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:422(para) -msgid "To start the nova-compute service, run this command from a console in the Windows server:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:428(title) -msgid "Troubleshoot Hyper-V configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:432(para) -msgid "I ran the command from my controller; however, I'm not seeing smiley faces for Hyper-V compute nodes, what do I do?" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:436(emphasis) -msgid "Verify that you are synchronized with a network time source. For instructions about how to configure NTP on your Hyper-V compute node, see ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:443(para) -msgid "How do I restart the compute service?" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_hyper-v.xml:450(para) -msgid "How do I restart the iSCSI initiator service?" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:6(title) -msgid "Hypervisors" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:7(para) -msgid "OpenStack Compute supports many hypervisors, which might make it difficult for you to choose one. Most installations use only one hypervisor. However, you can use and to schedule different hypervisors within the same installation. The following links help you choose a hypervisor. See http://docs.openstack.org/developer/nova/support-matrix.html for a detailed list of features and support across the hypervisors." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:18(para) -msgid "The following hypervisors are supported:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:21(para) -msgid "KVM - Kernel-based Virtual Machine. The virtual disk formats that it supports is inherited from QEMU since it uses a modified QEMU program to launch the virtual machine. The supported formats include raw images, the qcow2, and VMware formats." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:30(para) -msgid "LXC - Linux Containers (through libvirt), used to run Linux-based virtual machines." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:35(para) -msgid "QEMU - Quick EMUlator, generally only used for development purposes." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:40(para) -msgid "UML - User Mode Linux, generally only used for development purposes." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:46(para) -msgid "VMware vSphere 4.1 update 1 and newer, runs VMware-based Linux and Windows images through a connection with a vCenter server or directly with an ESXi host." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:52(para) -msgid "Xen (using libvirt) - Xen Project Hypervisor using libvirt as management interface into nova-compute to run Linux, Windows, FreeBSD and NetBSD virtual machines." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:57(para) -msgid "XenServer - XenServer, Xen Cloud Platform (XCP) and other XAPI based Xen variants runs Linux or Windows virtual machines. You must install the nova-compute service in a para-virtualized VM." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:65(para) -msgid " Hyper-V - Server virtualization with Microsoft's Hyper-V, use to run Windows, Linux, and FreeBSD virtual machines. Runs nova-compute natively on the Windows virtualization platform." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:75(title) -msgid "Hypervisor configuration basics" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:76(para) -msgid "The node where the nova-compute service is installed and operates on the same node that runs all of the virtual machines. This is referred to as the compute node in this guide." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:80(para) -msgid "By default, the selected hypervisor is KVM. To change to another hypervisor, change the virt_type option in the [libvirt] section of nova.conf and restart the nova-compute service." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:84(para) -msgid "Here are the general nova.conf options that are used to configure the compute node's hypervisor: ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-hypervisors.xml:87(para) -msgid "Specific options for particular hypervisors can be found in the following sections." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:6(title) ./doc/config-reference/compute/section_compute-configure-xapi.xml:75(title) -msgid "XenAPI configuration reference" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:7(para) -msgid "The following section discusses some commonly changed options when using the XenAPI driver. The table below provides a complete reference of all configuration options available for configuring XAPI with OpenStack." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:13(para) -msgid "The recommended way to use XAPI with OpenStack is through the XenAPI driver. To enable the XenAPI driver, add the following configuration options to /etc/nova/nova.conf and restart OpenStack Compute:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:21(replaceable) -msgid "your_xenapi_management_ip_address" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:23(replaceable) -msgid "your_password" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:24(para) -msgid "These connection details are used by OpenStack Compute service to contact your hypervisor and are the same details you use to connect XenCenter, the XenServer management console, to your XenServer node." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:30(para) -msgid "The connection_url is generally the management network IP address of the XenServer." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:36(title) ./doc/config-reference/networking/section_networking-options-reference.xml:20(title) -msgid "Agent" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:37(para) -msgid "The agent is a piece of software that runs on the instances, and communicates with OpenStack. In case of the XenAPI driver, the agent communicates with OpenStack through XenStore (see the Xen Project Wiki for more information on XenStore)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:44(para) -msgid "If you don't have the guest agent on your VMs, it takes a long time for OpenStack Compute to detect that the VM has successfully started. Generally a large timeout is required for Windows instances, but you may want to adjust: agent_version_timeout within the [xenserver] section." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:54(title) -msgid "VNC proxy address" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:55(para) -msgid "Assuming you are talking to XAPI through a management network, and XenServer is on the address: 10.10.1.34 specify the same address for the vnc proxy address: vncserver_proxyclient_address=10.10.1.34" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:63(title) -msgid "Storage" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:64(para) -msgid "You can specify which Storage Repository to use with nova by editing the following flag. To use the local-storage setup by the default installer: Another alternative is to use the \"default\" storage (for example if you have attached NFS or any other shared storage): " -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-xapi.xml:76(para) -msgid "To customize the XenAPI driver, use the configuration option settings documented in ." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-ami-setup.xml:8(title) -msgid "Prepare for AMI type images" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-ami-setup.xml:9(para) -msgid "To support AMI type images in your OpenStack installation, you must create the /boot/guest directory on dom0. One of the OpenStack XAPI plugins will extract the kernel and ramdisk from AKI and ARI images and put them to that directory." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-ami-setup.xml:15(para) -msgid "OpenStack maintains the contents of this directory and its size should not increase during normal operation. However, in case of power failures or accidental shutdowns, some files might be left over. To prevent these files from filling up dom0's filesystem, set up this directory as a symlink that points to a subdirectory of the local SR." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-ami-setup.xml:22(para) -msgid "Run these commands in dom0 to achieve this setup:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:8(title) -msgid "QEMU" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:9(para) -msgid "From the perspective of the Compute service, the QEMU hypervisor is very similar to the KVM hypervisor. Both are controlled through libvirt, both support the same feature set, and all virtual machine images that are compatible with KVM are also compatible with QEMU. The main difference is that QEMU does not support native virtualization. Consequently, QEMU has worse performance than KVM and is a poor choice for a production deployment." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:16(para) -msgid "Running on older hardware that lacks virtualization support." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:20(para) -msgid "Running the Compute service inside of a virtual machine for development or testing purposes, where the hypervisor does not support native virtualization for guests." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:14(para) -msgid "The typical uses cases for QEMU are" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:26(para) -msgid "To enable QEMU, add these settings to nova.conf:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:31(para) -msgid "For some operations you may also have to install the utility:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:33(para) -msgid "On Ubuntu: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:36(para) -msgid "On Red Hat Enterprise Linux, Fedora, or CentOS: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:39(para) -msgid "On openSUSE: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:42(para) -msgid "The QEMU hypervisor supports the following virtual machine image formats:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:45(para) ./doc/config-reference/compute/section_hypervisor_kvm.xml:27(para) -msgid "Raw" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:48(para) ./doc/config-reference/compute/section_hypervisor_kvm.xml:30(para) -msgid "QEMU Copy-on-write (qcow2)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_qemu.xml:51(para) ./doc/config-reference/compute/section_hypervisor_kvm.xml:36(para) -msgid "VMware virtual machine disk format (vmdk)" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-db.xml:6(title) -msgid "Database configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-db.xml:7(para) -msgid "You can configure OpenStack Compute to use any SQLAlchemy-compatible database. The database name is nova. The nova-conductor service is the only service that writes to the database. The other Compute services access the database through the nova-conductor service." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-db.xml:14(para) -msgid "To ensure that the database schema is current, run the following command:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-db.xml:16(para) -msgid "If nova-conductor is not used, entries to the database are mostly written by the nova-scheduler service, although all services must be able to update entries in the database." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-configure-db.xml:21(para) -msgid "In either case, use the configuration option settings documented in to configure the connection string for the nova database." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_lxc.xml:7(title) -msgid "LXC (Linux containers)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_lxc.xml:8(para) -msgid "LXC (also known as Linux containers) is a virtualization technology that works at the operating system level. This is different from hardware virtualization, the approach used by other hypervisors such as KVM, Xen, and VMware. LXC (as currently implemented using libvirt in the Compute service) is not a secure virtualization technology for multi-tenant environments (specifically, containers may affect resource quotas for other containers hosted on the same machine). Additional containment technologies, such as AppArmor, may be used to provide better isolation between containers, although this is not the case by default. For all these reasons, the choice of this virtualization technology is not recommended in production." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_lxc.xml:16(para) -msgid "If your compute hosts do not have hardware support for virtualization, LXC will likely provide better performance than QEMU. In addition, if your guests must access specialized hardware, such as GPUs, this might be easier to achieve with LXC than other hypervisors." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_lxc.xml:19(para) -msgid "Some OpenStack Compute features might be missing when running with LXC as the hypervisor. See the hypervisor support matrix for details." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_lxc.xml:22(para) -msgid "To enable LXC, ensure the following options are set in /etc/nova/nova.conf on all hosts running the nova-compute service." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_lxc.xml:29(para) -msgid "On Ubuntu, enable LXC support in OpenStack by installing the nova-compute-lxc package." -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/compute/section_compute-config-samples.xml:41(None) -msgid "@@image: '../../common/figures/SCH_5004_V00_NUAC-Network_mode_KVM_Flat_OpenStack.png'; md5=1e883ef27e5912b5c516d153b8844a28" -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/compute/section_compute-config-samples.xml:80(None) -msgid "@@image: '../../common/figures/SCH_5005_V00_NUAC-Network_mode_XEN_Flat_OpenStack.png'; md5=3b151435a0fda3702d4fac5a964fac83" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-config-samples.xml:6(title) -msgid "Example nova.conf configuration files" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-config-samples.xml:8(para) -msgid "The following sections describe the configuration options in the nova.conf file. You must copy the nova.conf file to each compute node. The sample nova.conf files show examples of specific configurations." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-config-samples.xml:14(title) -msgid "Small, private cloud" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-config-samples.xml:15(para) -msgid "This example nova.conf file configures a small private cloud with cloud controller services, database server, and messaging server on the same server. In this case, CONTROLLER_IP represents the IP address of a central server, BRIDGE_INTERFACE represents the bridge such as br100, the NETWORK_INTERFACE represents an interface to your VLAN setup, and passwords are represented as DB_PASSWORD_COMPUTE for your Compute (nova) database password, and RABBIT PASSWORD represents the password to your message queue installation." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-config-samples.xml:28(title) ./doc/config-reference/compute/section_compute-config-samples.xml:35(title) ./doc/config-reference/compute/section_compute-config-samples.xml:74(title) -msgid "KVM, Flat, MySQL, and Glance, OpenStack or EC2 API" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-config-samples.xml:30(para) -msgid "This example nova.conf file, from an internal Rackspace test system, is used for demonstrations." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-config-samples.xml:47(title) -msgid "XenServer, Flat networking, MySQL, and Glance, OpenStack API" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-config-samples.xml:49(para) -msgid "This example nova.conf file is from an internal Rackspace test system." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-options-reference.xml:7(title) -msgid "Compute sample configuration files" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-options-reference.xml:9(title) -msgid "nova.conf - configuration options" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-options-reference.xml:10(para) -msgid "For a complete list of all available configuration options for each OpenStack Compute service, run bin/nova-<servicename> --help." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:8(title) -msgid "Xen via Libvirt" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:9(para) -msgid "OpenStack Compute supports the Xen Project Hypervisor (or Xen). Xen can be integrated with OpenStack Compute via the libvirttoolstack or via the XAPItoolstack. This section describes how to set up OpenStack Compute with Xen and libvirt. For information on how to set up Xen with XAPI refer to ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:17(title) -msgid "Installing Xen with Libvirt" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:18(para) -msgid "At this stage we recommend to use the baseline that we use for the Xen Project OpenStack CI Loop, which contains the most recent stability fixes to both Xen and Libvirt." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:22(para) -msgid "Xen 4.5.1 (or newer) and Libvirt 1.2.15 (or newer) contain the most recent OpenStack improvements for Xen. The necessary Xen changes have also been backported to the Xen 4.4.3 stable branch (not yet released at this stage). Please check with the Linux and FreeBSD distros you are intending to use as Dom 0, whether the relevant version of Xen and Libvirt are available as installable packages." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:32(para) -msgid "The latest releases of Xen and libvirt packages that fulfil the above minimum requirements for the various openSUSE distributions can always be found and installed from the Open Build Service Virtualization project. To install these latest packages, add the Virtualization repository to your software management stack and get the newest packages from there. More information about the latest Xen and Libvirt packages are available here and here." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:42(para) -msgid "Alternatively, it is possible to use the Ubuntu LTS 14.04 Xen Package 4.4.1-0ubuntu0.14.04.4 (Xen 4.4.1) and apply the patches outlined here. You can also use the Ubuntu LTS 14.04 libvirt package 1.2.2 libvirt_1.2.2-0ubuntu13.1.7 as baseline and update it to libvirt version 1.2.15, or 1.2.14 with the patches outlined here applied. Note that this will require re-build these packages partly from source." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:52(para) -msgid "For further information and latest developments, you may want to consult the Xen Project's mailing lists for OpenStack related issues and questions." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:58(title) -msgid "Configuring Xen with Libvirt" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:59(para) -msgid "To enable Xen via libvirt, ensure the following options are set in /etc/nova/nova.conf on all hosts running the nova-compute service." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:68(title) -msgid "Additional configuration options" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:69(para) -msgid "Use the following as a guideline for configuring Xen for use in OpenStack:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:72(para) -msgid "Dom0 Memory: Set it between 1GB and 4GB by adding the following parameter to the Xen Boot Options in the grub.conf file. " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:77(para) -msgid "Note that the above memory limits are suggestions and should be based on the available compute host resources. For large hosts, that will run many hundred of instances,the chosen values may need to be higher." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:80(para) -msgid "The location of the grub.conf file depends on the host Linux distribution that you are using. Please refer to the distro documentation for more details (see Dom 0 for more resources)." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:86(para) -msgid "Dom0 vcpus: Set the virtual CPUs to 4 and employ CPU pinning by adding the following parameters to the Xen Boot Options in the grub.conf file. " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:92(para) -msgid "Note that the above virtual CPU limits are suggestions and should be based on the available compute host resources. For large hosts, that will run many hundred of instances, the suggested values may need to be higher." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:97(para) -msgid "PV vs HVM guests: A Xen virtual machine can be paravirtualized (PV) or hardware virtualized (HVM). The virtualization mode determines the interaction between Xen, Dom 0, and the guest VM's kernel. PV guests are aware of the fact that they are virtualized and will co-operate with Xen and Dom 0. The choice of virtualization mode determines performance characteristics. For an overview of Xen virtualization modes, see Xen Guest Types." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:105(para) -msgid "In OpenStack, customer VMs may run in either PV or HVM mode. The mode is a property of the operating system image used by the VM, and is changed by adjusting the image metadata stored in the glance image service. The image metadata can be changed using the nova or glance commands." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:110(para) -msgid "To choose one of the HVM modes (HVM, HVM with PV Drivers or PVHVM) use nova or glance to set the vm_mode property to hvm" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:115(replaceable) ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:116(replaceable) ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:119(replaceable) ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:120(replaceable) ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:141(replaceable) ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:142(replaceable) -msgid "img-uuid" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:113(para) -msgid "To choose one of the HVM modes (HVM, HVM with PV Drivers or PVHVM) use one of the following two commands To chose PV mode, which is supported by NetBSD, FreeBSD and Linux, useone of the following two commands " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:122(para) -msgid "The default for virtualization mode in nova is PV mode." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:125(para) -msgid "Image Formats: Xen supports raw, qcow2 and vhd image formats. For more information on image formats, refer to the OpenStack Virtual Image Guide and the Storage Options Guide on the Xen Project Wiki." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:133(para) -msgid "Image Metadata: In addition to the vm_mode property discussed above, the hypervisor_type property is another important component of the image metadata, especially if your cloud contains mixed hypervisor compute nodes. Setting the hypervisor_type property allows the nova scheduler to select a compute node running the specified hypervisor when launching instances of the image. Image metadata such as vm_mode, hypervisor_type, architecture, and others can be set when importing the image to glance. The metatdata can also be changed using the nova or glance commands: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:144(para) -msgid "For more more information on image metadata, refer to the OpenStack Virtual Image Guide." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:150(para) -msgid "To customize the libvirt driver, use the configuration option settings documented in ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:155(title) -msgid "Troubleshoot Xen with Libvirt" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:161(para) -msgid "/var/log/nova/compute.log (for more information refer to )." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:163(para) -msgid "/var/log/libvirt/libxl/libxl-driver.log," -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:164(para) -msgid "/var/log/xen/qemu-dm-${instancename}.log," -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:165(para) -msgid "/var/log/xen/xen-hotplug.log," -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:166(para) -msgid "/var/log/xen/console/guest-${instancename} (to enable see Enabling Guest Console Logs) and" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:170(para) -msgid "Host Console Logs (read Enabling and Retrieving Host Console Logs)." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:158(para) -msgid "Important Log Files: When an instance fails to start, or when you come across other issues, you should first consult the following log files: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:176(para) -msgid "If you need further help you can ask questions on the mailing lists xen-users@, wg-openstack@ or raise a bug against Xen." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:187(title) -msgid "Known issues" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:190(para) -msgid "Xen via libvirt is currently only supported with nova networking. A number of bugs are currently worked on to make sure that Xen via libvirt will also work with Neutron." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:196(title) -msgid "Additional information and resources" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:197(para) -msgid "The following section contains links to other useful resources" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:200(para) -msgid "wiki.xenproject.org/wiki/OpenStack - OpenStack Documentation on the Xen Project wiki" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:205(para) -msgid "wiki.xenproject.org/wiki/OpenStack_CI_Loop_for_Xen-Libvirt - Information about the Xen Project OpenStack CI Loop" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:211(para) -msgid "wiki.xenproject.org/wiki/OpenStack_via_DevStack - How to set up OpenStack via DevStack" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_libvirt.xml:216(para) -msgid "Mailing lists for OpenStack related issues and questions - This list is dedicated to coordinating bug fixes and issues across Xen, libvirt and OpenStack and the CI loop." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:7(title) -msgid "Compute log files" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:8(para) -msgid "The corresponding log file of each Compute service is stored in the /var/log/nova/ directory of the host on which each service runs." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:12(caption) -msgid "Log files used by Compute services" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:18(td) ./doc/config-reference/dashboard/section_dashboard-log-files.xml:21(td) ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:18(td) ./doc/config-reference/networking/section_networking-log-files.xml:17(td) ./doc/config-reference/block-storage/section_cinder-log-files.xml:18(td) -msgid "Log file" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:21(td) -msgid "Service name (CentOS/Fedora/openSUSE/Red Hat Enterprise Linux/SUSE Linux Enterprise)" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:25(td) -msgid "Service name (Ubuntu/Debian)" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:33(filename) ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:33(filename) ./doc/config-reference/block-storage/section_cinder-log-files.xml:33(filename) -msgid "api.log" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:35(td) -msgid "openstack-nova-api" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:44(filename) -msgid "cert.log" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:46(para) -msgid "The X509 certificate service (openstack-nova-cert/nova-cert) is only required by the EC2 API to the Compute service." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:52(systemitem) -msgid "openstack-nova-cert" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:55(systemitem) -msgid "nova-cert" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:60(filename) -msgid "compute.log" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:63(systemitem) -msgid "openstack-nova-compute" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:66(systemitem) -msgid "nova-compute" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:71(filename) -msgid "conductor.log" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:74(systemitem) -msgid "openstack-nova-conductor" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:77(systemitem) -msgid "nova-conductor" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:82(filename) -msgid "consoleauth.log" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:85(systemitem) -msgid "openstack-nova-consoleauth" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:88(systemitem) -msgid "nova-consoleauth" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:93(filename) -msgid "network.log" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:94(para) -msgid "The nova network service (openstack-nova-network/nova-network) only runs in deployments that are not configured to use the Networking service (neutron)." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:101(systemitem) -msgid "openstack-nova-network" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:104(systemitem) -msgid "nova-network" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:109(filename) -msgid "nova-manage.log" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:112(systemitem) ./doc/config-reference/compute/section_nova-log-files.xml:115(systemitem) -msgid "nova-manage" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:120(filename) ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:55(filename) ./doc/config-reference/block-storage/section_cinder-log-files.xml:55(filename) -msgid "scheduler.log" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:123(systemitem) -msgid "openstack-nova-scheduler" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-log-files.xml:126(systemitem) -msgid "nova-scheduler" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:6(title) -msgid "Cells" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:8(para) -msgid "Cells functionality enables you to scale an OpenStack Compute cloud in a more distributed fashion without having to use complicated technologies like database and message queue clustering. It supports very large deployments." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:13(para) -msgid "When this functionality is enabled, the hosts in an OpenStack Compute cloud are partitioned into groups called cells. Cells are configured as a tree. The top-level cell should have a host that runs a nova-api service, but no nova-compute services. Each child cell should run all of the typical nova-* services in a regular Compute cloud except for nova-api. You can think of cells as a normal Compute deployment in that each cell has its own database server and message queue broker." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:25(para) -msgid "The nova-cells service handles communication between cells and selects cells for new instances. This service is required for every cell. Communication between cells is pluggable, and currently the only option is communication through RPC." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:30(para) -msgid "Cells scheduling is separate from host scheduling. nova-cells first picks a cell. Once a cell is selected and the new build request reaches its nova-cells service, it is sent over to the host scheduler in that cell and the build proceeds as it would have without cells." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:38(para) -msgid "Cell functionality is currently considered experimental." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:42(title) -msgid "Cell configuration options" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:51(para) -msgid "Set to True to turn on cell functionality. Default is false." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:59(para) -msgid "Name of the current cell. Must be unique for each cell." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:66(para) -msgid "List of arbitrary key=value pairs defining capabilities of the current cell. Values include hypervisor=xenserver;kvm,os=linux;windows." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:76(para) -msgid "How long in seconds to wait for replies from calls between cells." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:83(para) -msgid "Filter classes that the cells scheduler should use. By default, uses \"nova.cells.filters.all_filters\" to map to all cells filters included with Compute." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:93(para) -msgid "Weight classes that the scheduler for cells uses. By default, uses nova.cells.weights.all_weighers to map to all cells weight algorithms included with Compute." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:102(para) -msgid "Multiplier used to weight RAM. Negative numbers indicate that Compute should stack VMs on one host instead of spreading out new VMs to more hosts in the cell. The default value is 10.0." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:43(para) -msgid "Cells are disabled by default. All cell-related configuration options appear in the [cells] section in nova.conf. The following cell-related options are currently supported:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:112(title) -msgid "Configure the API (top-level) cell" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:113(para) -msgid "The cell type must be changed in the API cell so that requests can be proxied through nova-cells down to the correct cell properly. Edit the nova.conf file in the API cell, and specify api in the key:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:126(title) -msgid "Configure the child cells" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:127(para) -msgid "Edit the nova.conf file in the child cells, and specify compute in the key:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:137(title) -msgid "Configure the database in each cell" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:138(para) -msgid "Before bringing the services online, the database in each cell needs to be configured with information about related cells. In particular, the API cell needs to know about its immediate children, and the child cells must know about their immediate agents. The information needed is the RabbitMQ server credentials for the particular cell." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:145(para) -msgid "Use the command to add this information to the database in each cell:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:177(para) -msgid "As an example, assume an API cell named api and a child cell named cell1." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:180(para) -msgid "Within the api cell, specify the following RabbitMQ server information:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:187(para) -msgid "Within the cell1 child cell, specify the following RabbitMQ server information:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:194(para) -msgid "You can run this in the API cell as root:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:198(para) -msgid "Repeat the previous steps for all child cells." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:199(para) -msgid "In the child cell, run the following, as root:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:203(para) -msgid "To customize the Compute cells, use the configuration option settings documented in ." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:208(title) -msgid "Cell scheduling configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:209(para) -msgid "To determine the best cell to use to launch a new instance, Compute uses a set of filters and weights defined in the /etc/nova/nova.conf file. The following options are available to prioritize cells for scheduling:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:218(para) -msgid "List of filter classes. By default is specified, which maps to all cells filters included with Compute (see )." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:229(para) -msgid "List of weight classes. By default is specified, which maps to all cell weight algorithms included with Compute. The following modules are available:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:237(para) -msgid "mute_child. Downgrades the likelihood of child cells being chosen for scheduling requests, which haven't sent capacity or capability updates in a while. Options include (multiplier for mute children; value should be negative)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:248(para) -msgid "ram_by_instance_type. Select cells with the most RAM capacity for the instance type being requested. Because higher weights win, Compute returns the number of available units for the instance type requested. The option defaults to 10.0 that adds to the weight by a factor of 10. Use a negative number to stack VMs on one host instead of spreading out new VMs to more hosts in the cell." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:263(para) -msgid "weight_offset. Allows modifying the database to weight a particular cell. You can use this when you want to disable a cell (for example, '0'), or to set a default cell by making its weight_offset very high (for example, '999999999999999'). The highest weight will be the first cell to be scheduled for launching an instance." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:278(para) -msgid "Additionally, the following options are available for the cell scheduler:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:284(para) -msgid "Specifies how many times the scheduler tries to launch a new instance when no cells are available (default=10)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:292(para) -msgid "Specifies the delay (in seconds) between retries (default=2)." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:297(para) -msgid "As an admin user, you can also add a filter that directs builds to a particular cell. The policy.json file must have a line with \"cells_scheduler_filter:TargetCellFilter\" : \"is_admin:True\" to let an admin user specify a scheduler hint to direct a build to a particular cell." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:306(title) -msgid "Optional cell configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:307(para) -msgid "Cells store all inter-cell communication data, including user names and passwords, in the database. Because the cells data is not updated very frequently, use the option to specify a JSON file to store cells data. With this configuration, the database is no longer consulted when reloading the cells data. The file must have columns present in the Cell model (excluding common database fields and the column). You must specify the queue connection information through a field, instead of , , and so on. The has the following form:" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:321(replaceable) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:80(replaceable) -msgid "USERNAME" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:321(replaceable) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:83(replaceable) -msgid "PASSWORD" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:321(replaceable) -msgid "HOSTNAME" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:321(replaceable) -msgid "VIRTUAL_HOST" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-cells.xml:322(para) -msgid "The scheme can be either qpid or rabbit, as shown previously. The following sample shows this optional configuration:" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:7(title) -msgid "Overview of nova.conf" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:8(para) -msgid "The nova.conf configuration file is an INI file format as explained in ." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:12(para) -msgid "You can use a particular configuration option file by using the option (nova.conf) parameter when you run one of the nova-* services. This parameter inserts configuration option definitions from the specified configuration file name, which might be useful for debugging or performance tuning." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:18(para) -msgid "For a list of configuration options, see the tables in this guide." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:20(para) -msgid "To learn more about the nova.conf configuration file, review the general purpose configuration options documented in ." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:24(para) -msgid "Do not specify quotes around Nova options." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:27(title) -msgid "Sections" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:34(para) -msgid "Contains most configuration options. If the documentation for a configuration option does not specify its section, assume that it appears in this section." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:42(literal) -msgid "[baremetal]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:44(para) -msgid "Configures the baremetal hypervisor driver." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:51(para) -msgid "Configures cells functionality. For details, see ." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:59(literal) -msgid "[conductor]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:61(para) -msgid "Configures the nova-conductor service." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:68(literal) -msgid "[database]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:70(para) -msgid "Configures the database that Compute uses." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:76(literal) -msgid "[glance]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:78(para) -msgid "Configures how to access the Image service." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:84(literal) -msgid "[hyperv]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:86(para) -msgid "Configures the Hyper-V hypervisor driver." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:92(literal) -msgid "[image_file_url]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:94(para) -msgid "Configures additional filesystems to access the Image Service." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:101(literal) -msgid "[keymgr]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:103(para) -msgid "Configures the key manager." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:109(literal) -msgid "[keystone_authtoken]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:111(para) -msgid "Configures authorization via Identity service." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:117(literal) -msgid "[libvirt]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:119(para) -msgid "Configures the hypervisor drivers using the Libvirt library: KVM, LXC, Qemu, UML, Xen." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:126(literal) -msgid "[matchmaker_redis]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:128(para) -msgid "Configures a Redis server." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:134(literal) -msgid "[matchmaker_ring]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:136(para) -msgid "Configures a matchmaker ring." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:144(para) -msgid "Configures weights for the metrics weigher." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:150(literal) -msgid "[neutron]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:152(para) -msgid "Configures Networking specific options." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:158(literal) -msgid "[osapi_v3]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:160(para) -msgid "Configures the OpenStack Compute API v3." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:166(literal) -msgid "[rdp]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:168(para) -msgid "Configures RDP proxying." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:174(literal) -msgid "[serial_console]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:176(para) -msgid "Configures serial console." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:182(literal) -msgid "[spice]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:184(para) -msgid "Configures virtual consoles using SPICE." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:190(literal) -msgid "[ssl]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:192(para) -msgid "Configures certificate authority using SSL." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:198(literal) -msgid "[trusted_computing]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:200(para) -msgid "Configures the trusted computing pools functionality and how to connect to a remote attestation service." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:207(literal) -msgid "[upgrade_levels]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:209(para) -msgid "Configures version locking on the RPC (message queue) communications between the various Compute services to allow live upgrading an OpenStack installation." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:218(literal) -msgid "[vmware]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:220(para) -msgid "Configures the VMware hypervisor driver." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:226(literal) -msgid "[xenserver]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:228(para) -msgid "Configures the XenServer hypervisor driver." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:234(literal) -msgid "[zookeeper]" -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:236(para) -msgid "Configures the ZooKeeper ServiceGroup driver." -msgstr "" - -#: ./doc/config-reference/compute/section_nova-conf.xml:28(para) -msgid "Configuration options are grouped by section. The Compute configuration file supports the following sections: " -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:8(title) ./doc/config-reference/networking/section_rpc-for-networking.xml:8(title) -msgid "Configure the Oslo RPC messaging system" -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:9(para) -msgid "OpenStack projects use AMQP, an open standard for messaging middleware. OpenStack services that run on multiple servers to talk to each other. OpenStack Oslo RPC supports three implementations of AMQP: RabbitMQ, Qpid, and ZeroMQ." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:16(title) ./doc/config-reference/database-service/section-databaseservice-rpc.xml:17(title) ./doc/config-reference/networking/section_rpc-for-networking.xml:19(title) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:17(title) -msgid "Configure RabbitMQ" -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:17(para) -msgid "OpenStack Oslo RPC uses RabbitMQ by default. Use these options to configure the RabbitMQ message system. The rpc_backend option is not required as long as RabbitMQ is the default messaging system. However, if it is included the configuration, you must set it to rabbit." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:26(para) -msgid "You can use these additional options to configure the RabbitMQ messaging system. You can configure messaging communication for different installation scenarios, tune retries for RabbitMQ, and define the size of the RPC thread pool. To monitor notifications through RabbitMQ, you must set the option to nova.openstack.common.notifier.rpc_notifier in the nova.conf file. The default for sending usage data is sixty seconds plus a random number of seconds from zero to sixty." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:39(title) ./doc/config-reference/database-service/section-databaseservice-rpc.xml:24(title) ./doc/config-reference/networking/section_rpc-for-networking.xml:46(title) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:42(title) -msgid "Configure Qpid" -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:40(para) -msgid "Use these options to configure the Qpid messaging system for OpenStack Oslo RPC. Qpid is not the default messaging system, so you must enable it by setting the option in the nova.conf file." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:48(para) -msgid "The Qpid driver has been deprecated. The driver is planned to be removed during the 'M' development cycle." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:52(para) -msgid "This critical option points the compute nodes to the Qpid broker (server). Set to the host name where the broker runs in the nova.conf file." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:57(para) ./doc/config-reference/networking/section_rpc-for-networking.xml:62(para) -msgid "The --qpid_hostname parameter accepts a host name or IP address value." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:61(para) ./doc/config-reference/networking/section_rpc-for-networking.xml:69(para) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:61(para) -msgid "If the Qpid broker listens on a port other than the AMQP default of 5672, you must set the option to that value:" -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:66(para) ./doc/config-reference/networking/section_rpc-for-networking.xml:78(para) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:68(para) -msgid "If you configure the Qpid broker to require authentication, you must add a user name and password to the configuration:" -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:71(para) ./doc/config-reference/networking/section_rpc-for-networking.xml:87(para) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:74(para) -msgid "By default, TCP is used as the transport. To enable SSL, set the option:" -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:74(para) -msgid "This table lists additional options that you use to configure the Qpid messaging driver for OpenStack Oslo RPC. These options are used infrequently." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:80(title) ./doc/config-reference/networking/section_rpc-for-networking.xml:102(title) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:87(title) -msgid "Configure ZeroMQ" -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:81(para) -msgid "Use these options to configure the ZeroMQ messaging system for OpenStack Oslo RPC. ZeroMQ is not the default messaging system, so you must enable it by setting the option in the nova.conf file." -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:90(title) ./doc/config-reference/database-service/section-databaseservice-rpc.xml:38(title) ./doc/config-reference/networking/section_rpc-for-networking.xml:112(title) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:97(title) -msgid "Configure messaging" -msgstr "" - -#: ./doc/config-reference/compute/section_rpc.xml:91(para) -msgid "Use these options to configure the RabbitMQ and Qpid messaging drivers." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:7(title) -msgid "iSCSI interface and offload support in Compute" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:9(para) -msgid "iSCSI interface and offload support is only present since Kilo." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:12(para) -msgid "Compute supports open-iscsi iSCSI interfaces for offload cards. Offload hardware must be present and configured on every compute node where offload is desired. Once an open-iscsi interface is configured, the iface name (iface.iscsi_ifacename) should be passed to libvirt via the iscsi_iface parameter for use. All iscsi sessions will be bound to this iSCSI interface." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:23(member) -msgid "be2iscsi" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:24(member) -msgid "bnx2i" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:25(member) -msgid "cxgb3i" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:26(member) -msgid "cxgb4i" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:27(member) -msgid "qla4xxx" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:28(member) -msgid "ocs" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:21(para) -msgid "Currently supported transports (iface.transport_name) are . No configuration changes are needed outside of Compute node." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:31(para) -msgid "iSER is currently supported via the separate iSER LibvirtISERVolumeDriver and will be rejected if used via the iscsi_iface parameter." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:36(title) -msgid "iSCSI iface configuration" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:39(para) -msgid "Note the distinction between the transport name (iface.transport_name) and iface name (iface.iscsi_ifacename). The actual iface name must be specified via the iscsi_iface parameter to libvirt for offload to work." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:48(para) -msgid "The default name for an iscsi iface (open-iscsi parameter iface.iscsi_ifacename) is in the format transport_name.hwaddress when generated by iscsiadm." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:55(para) -msgid "iscsiadm can be used to view and generate current iface configuration. Every network interface that supports an open-iscsi transport can have one or more iscsi ifaces associated with it. If no ifaces have been configured for a network interface supported by an open-iscsi transport, this command will create a default iface configuration for that network interface. For example : " -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:70(para) -msgid "The output is in the format : iface_name transport_name,hwaddress,ipaddress,net_ifacename,initiatorname." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:76(replaceable) ./doc/config-reference/compute/section_compute-iscsioffload.xml:88(replaceable) -msgid "IFACE_NAME" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:75(para) -msgid "Individual iface configuration can be viewed via " -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:88(replaceable) -msgid "SETTING" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:88(replaceable) -msgid "VALUE" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:87(para) -msgid "Configuration can be updated as desired via " -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:93(para) -msgid "All iface configurations need a minimum of iface.iface_name, iface.transport_name and iface.hwaddress to be correctly configured to work. Some transports may require iface.ipaddress and iface.net_ifacename as well to bind correctly." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-iscsioffload.xml:102(para) -msgid "Detailed configuration instructions can be found here http://www.open-iscsi.org/docs/README " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:9(para) -msgid "KVM is configured as the default hypervisor for Compute." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:11(para) -msgid "This document contains several sections about hypervisor selection. If you are reading this document linearly, you do not want to load the KVM module before you install nova-compute. The nova-compute service depends on qemu-kvm, which installs /lib/udev/rules.d/45-qemu-kvm.rules, which sets the correct permissions on the /dev/kvm device node." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:18(para) -msgid "To enable KVM explicitly, add the following configuration options to the /etc/nova/nova.conf file:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:24(para) -msgid "The KVM hypervisor supports the following virtual machine image formats:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:33(para) -msgid "QED Qemu Enhanced Disk" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:39(para) -msgid "This section describes how to enable KVM on your system. For more information, see the following distribution-specific documentation:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:43(para) -msgid "Fedora: Virtualization Getting Started Guide from the Fedora 22 documentation." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:49(para) -msgid "Ubuntu: KVM/Installation from the Community Ubuntu documentation." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:53(para) -msgid "Debian: Virtualization with KVM from the Debian handbook." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:58(para) -msgid "Red Hat Enterprise Linux: Installing virtualization packages on an existing Red Hat Enterprise Linux system from the Red Hat Enterprise Linux Virtualization Host Configuration and Guest Installation Guide." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:66(para) -msgid "openSUSE: Installing KVM from the openSUSE Virtualization with KVM manual." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:72(para) -msgid "SLES: Installing KVM from the SUSE Linux Enterprise Server Virtualization Guide." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:84(title) -msgid "Specify the CPU model of KVM guests" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:85(para) -msgid "The Compute service enables you to control the guest CPU model that is exposed to KVM virtual machines. Use cases include:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:89(para) -msgid "To maximize performance of virtual machines by exposing new host CPU features to the guest" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:93(para) -msgid "To ensure a consistent default CPU across all machines, removing reliance of variable QEMU defaults" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:97(para) -msgid "In libvirt, the CPU is specified by providing a base CPU model name (which is a shorthand for a set of feature flags), a set of additional feature flags, and the topology (sockets/cores/threads). The libvirt KVM driver provides a number of standard CPU model names. These models are defined in the /usr/share/libvirt/cpu_map.xml file. Check this file to determine which models are supported by your local installation." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:103(para) -msgid "Two Compute configuration options in the [libvirt] group of nova.conf define which type of CPU model is exposed to the hypervisor when using KVM: cpu_mode and cpu_model." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:107(para) -msgid "The cpu_mode option can take one of the following values: none, host-passthrough, host-model, and custom." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:111(title) -msgid "Host model (default for KVM & QEMU)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:112(para) -msgid "If your nova.conf file contains cpu_mode=host-model, libvirt identifies the CPU model in /usr/share/libvirt/cpu_map.xml file that most closely matches the host, and requests additional CPU flags to complete the match. This configuration provides the maximum functionality and performance and maintains good reliability and compatibility if the guest is migrated to another host with slightly different host CPUs." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:121(title) -msgid "Host pass through" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:122(para) -msgid "If your nova.conf file contains cpu_mode=host-passthrough, libvirt tells KVM to pass through the host CPU with no modifications. The difference to host-model, instead of just matching feature flags, every last detail of the host CPU is matched. This gives the best performance, and can be important to some apps which check low level CPU details, but it comes at a cost with respect to migration. The guest can only be migrated to a matching host CPU." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:131(title) -msgid "Custom" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:132(para) -msgid "If your nova.conf file contains cpu_mode=custom, you can explicitly specify one of the supported named models using the cpu_model configuration option. For example, to configure the KVM guests to expose Nehalem CPUs, your nova.conf file should contain:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:142(title) -msgid "None (default for all libvirt-driven hypervisors other than KVM & QEMU)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:144(para) -msgid "If your nova.conf file contains cpu_mode=none, libvirt does not specify a CPU model. Instead, the hypervisor chooses the default model." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:150(title) -msgid "Guest agent support" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:151(para) -msgid "Use guest agents to enable optional access between compute nodes and guests through a socket, using the QMP protocol." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:153(para) -msgid "To enable this feature, you must set hw_qemu_guest_agent=yes as a metadata parameter on the image you wish to use to create the guest-agent-capable instances from. You can explicitly disable the feature by setting hw_qemu_guest_agent=no in the image metadata." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:159(title) -msgid "KVM performance tweaks" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:160(para) -msgid "The VHostNet kernel module improves network performance. To load the kernel module, run the following command as root:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:166(title) -msgid "Troubleshoot KVM" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:167(para) -msgid "Trying to launch a new virtual machine instance fails with the ERRORstate, and the following error appears in the /var/log/nova/nova-compute.log file:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:171(para) -msgid "This message indicates that the KVM kernel modules were not loaded." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:172(para) -msgid "If you cannot start VMs after installation without rebooting, the permissions might not be set correctly. This can happen if you load the KVM module before you install nova-compute. To check whether the group is set to kvm, run:" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_kvm.xml:177(para) -msgid "If it is not set to kvm, run:" -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:156(None) -msgid "@@image: '../../common/figures/xenserver_architecture.png'; md5=99792432daf7f0302672fb8f03cb63bb" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:8(title) -msgid "XenServer (and other XAPI based Xen variants)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:9(para) -msgid "This section describes XAPI managed hypervisors, and how to use them with OpenStack." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:14(title) -msgid "Terminology" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:16(title) -msgid "Xen" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:17(para) -msgid "A hypervisor that provides the fundamental isolation between virtual machines. Xen is open source (GPLv2) and is managed by XenProject.org, a cross-industry organization and a Linux Foundation Collaborative project." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:24(para) -msgid "Xen is a component of many different products and projects. The hypervisor itself is very similar across all these projects, but the way that it is managed can be different, which can cause confusion if you're not clear which toolstack you are using. Make sure you know what toolstack you want before you get started. If you want to use Xen with libvirt in OpenStack Compute refer to ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:33(title) -msgid "XAPI" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:34(para) -msgid "XAPI is one of the toolstacks that could control a Xen based hypervisor. XAPI's role is similar to libvirt's in the KVM world. The API provided by XAPI is called XenAPI. To learn more about the provided interface, look at XenAPI Object Model Overview for definitions of XAPI specific terms such as SR, VDI, VIF and PIF." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:46(para) -msgid "OpenStack has a compute driver which talks to XAPI, therefore all XAPI managed servers could be used with OpenStack." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:52(title) -msgid "XenAPI" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:53(para) -msgid "XenAPI is the API provided by XAPI. This name is also used by the python library that is a client for XAPI." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:60(para) -msgid "An Open Source virtualization platform that delivers all features needed for any server and datacenter implementation including the Xen hypervisor and XAPI for the management. For more information and product downloads, visit xenserver.org ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:72(title) -msgid "XCP" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:73(para) -msgid "XCP is not supported anymore. XCP project recommends all XCP users to upgrade to the latest version of XenServer by visiting xenserver.org ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:83(title) -msgid "XenServer-core" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:84(para) -msgid "This is a method for building the core packages in a XenServer installation on an existing RPM-based system. Initial support for this configuration (notably running Compute services in domain 0) was added in Havana. XenServer-core for Debian/Ubuntu is built from the main branch and, therefore, is continuously up to date." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:93(title) -msgid "Kronos" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:94(para) -msgid "This is a project initiated to provide the ability to install XAPI toolstack onto an existing Debian-based deployment. For more information, visit the Xen wiki wiki.xenproject.org/wiki/Project_Kronos ." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:105(title) -msgid "Privileged and unprivileged domains" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:106(para) -msgid "A Xen host runs a number of virtual machines, VMs, or domains (the terms are synonymous on Xen). One of these is in charge of running the rest of the system, and is known as domain 0, or dom0. It is the first domain to boot after Xen, and owns the storage and networking hardware, the device drivers, and the primary control software. Any other VM is unprivileged, and is known as a domU or guest. All customer VMs are unprivileged, but you should note that on Xen, the OpenStack Compute service (nova-compute) also runs in a domU. This gives a level of security isolation between the privileged system software and the OpenStack software (much of which is customer-facing). This architecture is described in more detail later." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:123(title) -msgid "Paravirtualized versus hardware virtualized domains" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:124(para) -msgid "A Xen virtual machine can be paravirtualized (PV) or hardware virtualized (HVM). This refers to the interaction between Xen, domain 0, and the guest VM's kernel. PV guests are aware of the fact that they are virtualized and will co-operate with Xen and domain 0; this gives them better performance characteristics. HVM guests are not aware of their environment, and the hardware has to pretend that they are running on an unvirtualized machine. HVM guests do not need to modify the guest operating system, which is essential when running Windows." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:136(para) -msgid "In OpenStack, customer VMs may run in either PV or HVM mode. However, the OpenStack domU (that's the one running nova-compute) must be running in PV mode." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:145(title) -msgid "XenAPI deployment architecture" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:148(para) -msgid "A basic OpenStack deployment on a XAPI-managed server, assuming that the network provider is nova-network, looks like this: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:165(para) -msgid "The hypervisor: Xen" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:170(para) -msgid "Domain 0: runs XAPI and some small pieces from OpenStack, the XAPI plug-ins." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:176(para) -msgid "OpenStack VM: The Compute service runs in a paravirtualized virtual machine, on the host under management. Each host runs a local instance of Compute. It is also running an instance of nova-network." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:187(para) -msgid "OpenStack Compute uses the XenAPI Python library to talk to XAPI, and it uses the Management Network to reach from the OpenStack VM to Domain 0." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:161(para) -msgid "Key things to note: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:199(para) -msgid "The above diagram assumes FlatDHCP networking." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:208(para) -msgid "Management network: RabbitMQ, MySQL, inter-host communication, and compute-XAPI communication. Please note that the VM images are downloaded by the XenAPI plug-ins, so make sure that the OpenStack Image service is accessible through this network. It usually means binding those services to the management interface." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:220(para) -msgid "Tenant network: controlled by nova-network, this is used for tenant traffic." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:227(para) -msgid "Public network: floating IPs, public API endpoints." -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:204(para) -msgid "There are three main OpenStack networks: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:236(para) -msgid "The networks shown here must be connected to the corresponding physical networks within the data center. In the simplest case, three individual physical network cards could be used. It is also possible to use VLANs to separate these networks. Please note, that the selected configuration must be in line with the networking model selected for the cloud. (In case of VLAN networking, the physical channels have to be able to forward the tagged traffic.)" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:195(para) -msgid "Some notes on the networking: " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:252(title) -msgid "Further reading" -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:258(para) -msgid "Citrix XenServer official documentation: http://docs.vmd.citrix.com/XenServer " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:267(para) -msgid "What is Xen? by XenProject.org: XenProject.org > Users > Cloud " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:276(para) -msgid "Xen Hypervisor project: http://www.xenproject.org/developers/teams/hypervisor.html " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:285(para) -msgid "Xapi project: http://www.xenproject.org/developers/teams/xapi.html " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:294(para) -msgid "Further XenServer and OpenStack information: http://wiki.openstack.org/XenServer " -msgstr "" - -#: ./doc/config-reference/compute/section_hypervisor_xen_api.xml:253(para) -msgid "Here are some of the resources available to learn more about Xen: " -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:6(title) ./doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml:7(title) -msgid "Additional sample configuration files" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:7(para) -msgid "Files in this section can be found in /etc/nova." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:9(title) ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:23(title) ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:17(title) ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:21(title) -msgid "api-paste.ini" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:10(para) -msgid "The Compute service stores its API configuration settings in the api-paste.ini file." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:17(title) ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:56(title) ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:29(title) ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:24(title) ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:38(title) ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:28(title) ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:49(title) -msgid "policy.json" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:18(para) -msgid "The policy.json file defines additional access controls that apply to the Compute service." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:24(title) ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:34(title) ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:32(title) ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:33(title) -msgid "rootwrap.conf" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:25(para) -msgid "The rootwrap.conf file defines configuration values used by the rootwrap script when the Compute service needs to escalate its privileges to those of the root user." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-sample-configuration-files.xml:28(para) -msgid "It is also possible to disable the root wrapper, and default to sudo only. Configure the disable_rootwrap option in the section of the nova.conf configuration file." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install-plugins.xml:8(title) -msgid "Install XAPI plug-ins" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install-plugins.xml:9(para) -msgid "When you use a XAPI managed hypervisor, you can install a Python script (or any executable) on the host side, and execute that through XenAPI. These scripts are called plug-ins. The OpenStack related XAPI plug-ins live in OpenStack Compute's code repository. These plug-ins have to be copied to dom0's filesystem, to the appropriate directory, where XAPI can find them. It is important to ensure that the version of the plug-ins are in line with the OpenStack Compute installation you are using." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install-plugins.xml:19(para) -msgid "The plugins should typically be copied from the Nova installation running in the Compute's DomU, but if you want to download the latest version the following procedure can be used." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install-plugins.xml:25(title) -msgid "Manually installing the plug-ins" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install-plugins.xml:28(para) -msgid "Create temporary files/directories:" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install-plugins.xml:33(para) -msgid "Get the source from GitHub. The example assumes the master branch is used, and the XenServer host is accessible as xenserver. Match those parameters to your setup." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install-plugins.xml:43(para) -msgid "Copy the plug-ins to the hypervisor:" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install-plugins.xml:49(para) -msgid "Remove temporary files/directories:" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:7(title) -msgid "Install XenServer" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:8(para) -msgid "Before you can run OpenStack with XenServer, you must install the hypervisor on an appropriate server ." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:18(para) -msgid "Xen is a type 1 hypervisor: When your server starts, Xen is the first software that runs. Consequently, you must install XenServer before you install the operating system where you want to run OpenStack code. You then install nova-compute into a dedicated virtual machine on the host." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:33(link) -msgid "http://xenserver.org/open-source-virtualization-download.html" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:27(para) -msgid "Use the following link to download XenServer's installation media: " -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:40(para) -msgid "When you install many servers, you might find it easier to perform PXE boot installations . You can also package any post-installation changes that you want to make to your XenServer by following the instructions of creating your own XenServer supplemental pack ." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:55(para) -msgid "Make sure you use the EXT type of storage repository (SR). Features that require access to VHD files (such as copy on write, snapshot and migration) do not work when you use the LVM SR. Storage repository (SR) is a XAPI-specific term relating to the physical storage where virtual disks are stored." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:62(para) -msgid "On the XenServer installation screen, choose the XenDesktop Optimized option. If you use an answer file, make sure you use srtype=\"ext\" in the installation tag of the answer file." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:73(title) -msgid "Post-installation steps" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:74(para) -msgid "The following steps need to be completed after the hypervisor's installation:" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:80(para) -msgid "For resize and migrate functionality, enable password-less SSH authentication and set up the /images directory on dom0." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:88(para) -msgid "Install the XAPI plug-ins." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:93(para) -msgid "To support AMI type images, you must set up /boot/guest symlink/directory in dom0." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:100(para) -msgid "Create a paravirtualized virtual machine that can run nova-compute." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-install.xml:106(para) -msgid "Install and configure nova-compute in the above virtual machine." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-conductor.xml:7(title) -msgid "Conductor" -msgstr "" - -#: ./doc/config-reference/compute/section_compute-conductor.xml:8(para) -msgid "The nova-conductor service enables OpenStack to function without compute nodes accessing the database. Conceptually, it implements a new layer on top of nova-compute. It should not be deployed on compute nodes, or else the security benefits of removing database access from nova-compute are negated. Just like other nova services such as nova-api or nova-scheduler, it can be scaled horizontally. You can run multiple instances of nova-conductor on different machines as needed for scaling purposes." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-conductor.xml:21(para) -msgid "The methods exposed by nova-conductor are relatively simple methods used by nova-compute to offload its database operations. Places where nova-compute previously performed database access are now talking to nova-conductor. However, we have plans in the medium to long term to move more and more of what is currently in nova-compute up to the nova-conductor layer. The Compute service will start to look like a less intelligent slave service to nova-conductor. The conductor service will implement long running complex operations, ensuring forward progress and graceful error handling. This will be especially beneficial for operations that cross multiple compute nodes, such as migrations or resizes." -msgstr "" - -#: ./doc/config-reference/compute/section_compute-conductor.xml:40(para) -msgid "To customize the Conductor, use the configuration option settings documented in ." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-resize-setup.xml:8(title) -msgid "Modify dom0 for resize/migration support" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-resize-setup.xml:9(para) -msgid "To resize servers with XenServer you must:" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-resize-setup.xml:12(para) -msgid "Establish a root trust between all hypervisor nodes of your deployment:" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-resize-setup.xml:16(para) -msgid "To do so, generate an ssh key-pair with the command. Ensure that each of your dom0's authorized_keys file (located in /root/.ssh/authorized_keys) contains the public key fingerprint (located in /root/.ssh/id_rsa.pub)." -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-resize-setup.xml:26(para) -msgid "Provide a /images mount point to the dom0 for your hypervisor:" -msgstr "" - -#: ./doc/config-reference/compute/section_xapi-resize-setup.xml:30(para) -msgid "dom0 space is at a premium so creating a directory in dom0 is potentially dangerous and likely to fail especially when you resize large servers. The least you can do is to symlink /images to your local storage SR. The following instructions work for an English-based installation of XenServer and in the case of ext3-based SR (with which the resize functionality is known to work correctly)." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:6(title) -msgid "Image service sample configuration files" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:7(para) -msgid "You can find the files that are described in this section in the /etc/glance/ directory." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:11(title) -msgid "glance-api.conf" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:12(para) -msgid "The configuration file for the Image service API is found in the glance-api.conf file." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:14(para) ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:22(para) ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:13(para) -msgid "This file must be modified after installation." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:18(title) -msgid "glance-registry.conf" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:19(para) -msgid "Configuration for the Image service's registry, which stores the metadata about images, is found in the glance-registry.conf file." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:26(title) -msgid "glance-api-paste.ini" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:27(para) -msgid "Configuration for the Image service's API middleware pipeline is found in the glance-api-paste.ini file." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:29(para) -msgid "You should not need to modify this file." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:33(title) -msgid "glance-manage.conf" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:34(para) -msgid "The Image service's custom logging options are found in the glance-manage.conf file." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:36(para) -msgid "Options set in glance-manage.conf will override options of the same section and name set in glance-registry.conf and glance-api.conf. Similarly, options in glance-api.conf will override options set in glance-registry.conf." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:44(title) -msgid "glance-registry-paste.ini" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:45(para) -msgid "The Image service's middleware pipeline for its registry is found in the glance-registry-paste.ini file." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:50(title) -msgid "glance-scrubber.conf" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:51(para) -msgid " is a utility for the Image service that cleans up images that have been deleted; its configuration is stored in the glance-scrubber.conf file." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-sample-configuration-files.xml:57(para) -msgid "The /etc/glance/policy.json file defines additional access controls that apply to the Image service." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-ISO-support.xml:7(title) -msgid "Support for ISO images" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-ISO-support.xml:8(para) -msgid "You can load ISO images into the Image service. You can subsequently boot an ISO image using Compute." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-ISO-support.xml:11(title) -msgid "To load an ISO image to an Image service data store" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-ISO-support.xml:14(para) -msgid "In the Image service, run the following command:" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-ISO-support.xml:19(para) -msgid "In this command, ubuntu.iso is the name for the ISO image after it is loaded to the Image service, and ubuntu-14.04.2-server-amd64.iso is the name of the source ISO image." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-ISO-support.xml:26(para) -msgid "Optionally, to confirm the upload in Image Service (glance), run this command:" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-ISO-support.xml:26(para) -msgid "Run this command:" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:7(title) -msgid "Configure vCenter data stores for the Image service back end" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:10(para) -msgid "To use vCenter data stores for the Image service back end, you must update the glance-api.conf file, as follows:" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:15(para) -msgid "Add data store parameters to the VMware Datastore Store Options section." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:19(para) -msgid "Specify vSphere as the back end." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:23(para) -msgid "You must configure any configured Image service data stores for the Compute service." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:30(para) -msgid "If you intend to use multiple data stores for the back end, use the SPBM feature." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:26(para) -msgid "You can specify vCenter data stores directly by using the data store name or Storage Policy Based Management (SPBM), which requires vCenter Server 5.5 or later. For details, see . " -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:33(para) -msgid "In the glance_store section, set the option to , as shown in this code sample:" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:43(para) -msgid "The following table describes the parameters in the VMware Datastore Store Options section:" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:47(para) -msgid "The following block of text shows a sample configuration:" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:56(replaceable) -msgid "ADMINISTRATOR" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:59(replaceable) ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:71(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:82(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:146(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:354(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:411(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:490(replaceable) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:182(replaceable) -msgid "password" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:64(replaceable) -msgid "DATACENTER" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:67(replaceable) -msgid "datastore1" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:82(replaceable) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:512(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1060(td) -msgid "5" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:86(replaceable) -msgid "/openstack_glance" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:89(replaceable) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:346(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:437(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:478(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:409(literal) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:429(literal) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:798(literal) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:835(literal) ./doc/config-reference/conf-changes/cinder.xml:786(td) ./doc/config-reference/conf-changes/cinder.xml:791(td) ./doc/config-reference/conf-changes/cinder.xml:796(td) ./doc/config-reference/conf-changes/manila.xml:336(td) ./doc/config-reference/conf-changes/manila.xml:341(td) ./doc/config-reference/conf-changes/manila.xml:365(td) ./doc/config-reference/conf-changes/manila.xml:370(td) ./doc/config-reference/conf-changes/keystone.xml:194(td) ./doc/config-reference/conf-changes/keystone.xml:199(td) ./doc/config-reference/conf-changes/ironic.xml:298(td) ./doc/config-reference/conf-changes/ironic.xml:303(td) ./doc/config-reference/conf-changes/sahara.xml:242(td) ./doc/config-reference/conf-changes/sahara.xml:247(td) ./doc/config-reference/conf-changes/nova.xml:318(td) ./doc/config-reference/conf-changes/nova.xml:323(td) ./doc/config-reference/conf-changes/nova.xml:333(td) ./doc/config-reference/conf-changes/neutron.xml:296(td) ./doc/config-reference/conf-changes/neutron.xml:316(td) ./doc/config-reference/conf-changes/neutron.xml:336(td) ./doc/config-reference/conf-changes/neutron.xml:341(td) ./doc/config-reference/conf-changes/neutron.xml:346(td) ./doc/config-reference/conf-changes/neutron.xml:366(td) ./doc/config-reference/conf-changes/ceilometer.xml:281(td) ./doc/config-reference/conf-changes/ceilometer.xml:286(td) ./doc/config-reference/conf-changes/ceilometer.xml:296(td) ./doc/config-reference/conf-changes/trove.xml:374(td) ./doc/config-reference/conf-changes/trove.xml:379(td) ./doc/config-reference/conf-changes/trove.xml:434(td) ./doc/config-reference/conf-changes/glance.xml:229(td) ./doc/config-reference/conf-changes/glance.xml:234(td) ./doc/config-reference/conf-changes/heat.xml:268(td) ./doc/config-reference/conf-changes/heat.xml:287(td) ./doc/config-reference/conf-changes/heat.xml:292(td) -msgid "False" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:91(title) -msgid "Configure vCenter data stores for the back end" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:92(para) -msgid "You can specify a vCenter data store for the back end by setting the parameter value to the vCenter name of the data store. This configuration limits the back end to a single data store." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:98(title) -msgid "To configure a single data store" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:100(para) -msgid "If present, comment or delete the and parameters." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:106(para) -msgid "Uncomment and define the parameter with the name of the vCenter data store." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backend-vmware.xml:112(para) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:221(para) -msgid "Complete the other vCenter configuration parameters as appropriate." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:7(title) -msgid "Configure back ends" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:11(para) -msgid "OpenStack Block Storage (cinder)" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:14(para) -msgid "A directory on a local file system" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:17(para) -msgid "GridFS" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:20(para) -msgid "Ceph RBD" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:23(para) -msgid "Amazon S3" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:26(para) -msgid "Sheepdog" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:29(para) -msgid "OpenStack Object Storage (swift)" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:32(para) -msgid "VMware ESX" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-backends.xml:8(para) -msgid "The Image service supports several back ends for storing virtual machine images: The following tables detail the options available for each." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-api.xml:6(title) -msgid "Configure the API" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-api.xml:7(para) -msgid "The Image service has two APIs: the user-facing API, and the registry API, which is for internal requests that require access to the database." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-api.xml:10(para) -msgid "Both of the APIs currently have two major versions, v1 and v2. It is possible to run either or both versions, by setting appropriate values of enable_v1_api, enable_v2_api, enable_v1_registry and enable_v2_registry. If the v2 API is used, running glance-registry is optional, as v2 of glance-api can connect directly to the database." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-api.xml:18(para) -msgid "Tables of all the options used to configure the APIs, including enabling SSL and modifying WSGI settings are found below." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-rpc.xml:6(title) ./doc/config-reference/database-service/section-databaseservice-rpc.xml:7(title) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:7(title) -msgid "Configure the RPC messaging system" -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-rpc.xml:7(para) -msgid "OpenStack projects use an open standard for messaging middleware known as AMQP. This messaging middleware enables the OpenStack services that run on multiple servers to talk to each other. The OpenStack common library project, oslo, supports three implementations of AMQP: RabbitMQ, Qpid, and ZeroMQ." -msgstr "" - -#: ./doc/config-reference/image-service/section_image-service-rpc.xml:14(para) -msgid "The following tables contain settings to configure the messaging middleware for the Image service:" -msgstr "" - -#: ./doc/config-reference/database-service/section-databaseservice-rpc.xml:8(para) -msgid "OpenStack projects use an open standard for messaging middleware known as AMQP. This messaging middleware enables the OpenStack services that run on multiple servers to talk to each other. OpenStack Trove RPC supports three implementations of AMQP: RabbitMQ, Qpid, and ZeroMQ." -msgstr "" - -#: ./doc/config-reference/database-service/section-databaseservice-rpc.xml:18(para) -msgid "Use these options to configure the RabbitMQ messaging system:" -msgstr "" - -#: ./doc/config-reference/database-service/section-databaseservice-rpc.xml:25(para) -msgid "Use these options to configure the Qpid messaging system:" -msgstr "" - -#: ./doc/config-reference/database-service/section-databaseservice-rpc.xml:31(title) -msgid "Configure ZeroMq" -msgstr "" - -#: ./doc/config-reference/database-service/section-databaseservice-rpc.xml:32(para) -msgid "Use these options to configure the ZeroMq messaging system:" -msgstr "" - -#: ./doc/config-reference/database-service/section-databaseservice-rpc.xml:40(para) ./doc/config-reference/networking/section_rpc-for-networking.xml:114(para) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:99(para) -msgid "Use these common options to configure the RabbitMQ, Qpid, and ZeroMq messaging drivers:" -msgstr "" - -#: ./doc/config-reference/database-service/section-databaseservice-db.xml:7(title) -msgid "Configure the database" -msgstr "" - -#: ./doc/config-reference/database-service/section-databaseservice-db.xml:9(para) -msgid "Use the options to configure the used databases:" -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml:8(para) -msgid "Find the following files in /etc/openstack-dashboard." -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml:10(title) -msgid "keystone_policy.json" -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml:11(para) -msgid "The keystone_policy.json file defines additional access controls for the dashboard that apply to the Identity service." -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml:15(para) -msgid "The keystone_policy.json file must match the Identity service /etc/keystone/policy.json policy file." -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml:23(title) -msgid "nova_policy.json" -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml:24(para) -msgid "The nova_policy.json file defines additional access controls for the dashboard that apply to the Compute service." -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-sample-configuration-files.xml:28(para) -msgid "The nova_policy.json file must match the Compute /etc/nova/policy.json policy file." -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-log-files.xml:7(title) -msgid "Dashboard log files" -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-log-files.xml:8(para) -msgid "The dashboard is served to users through the Apache web server (httpd)." -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-log-files.xml:10(para) -msgid "As a result, dashboard-related logs appear in files in the /var/log/httpd or /var/log/apache2 directory on the system where the dashboard is hosted. The following table describes these files:" -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-log-files.xml:16(caption) -msgid "Dashboard/httpd log files" -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-log-files.xml:27(filename) -msgid "access_log" -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-log-files.xml:28(td) -msgid "Logs all attempts to access the web server." -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-log-files.xml:31(filename) -msgid "error_log" -msgstr "" - -#: ./doc/config-reference/dashboard/section_dashboard-log-files.xml:32(td) -msgid "Logs all unsuccessful attempts to access the web server, along with the reason that each attempt failed." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:6(title) -msgid "Shared File Systems sample configuration files" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:7(para) -msgid "All the files in this section can be found in /etc/manila." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:9(title) -msgid "manila.conf" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:10(para) -msgid "The manila.conf file is installed in /etc/manila by default. When you manually install the Shared File Systems service, the options in the manila.conf file are set to default values." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:14(para) -msgid "The manila.conf file contains most of the options to configure the Shared File Systems service." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:24(para) -msgid "Use the api-paste.ini file to configure the Shared File Systems API service." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:30(para) -msgid "The policy.json file defines additional access controls that apply to the Shared File Systems service." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml:35(para) -msgid "The rootwrap.conf file defines configuration values used by the script when the Shared File Systems service must escalate its privileges to those of the root user." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-misc.xml:7(title) -msgid "Configuration Options" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-misc.xml:9(para) -msgid "These following options can be set in the manila.conf file." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:7(title) -msgid "Log files used by Shared File Systems" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:8(para) -msgid "The corresponding log file of each Shared File Systems service is stored in the /var/log/manila/ directory of the host on which each service runs." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:12(caption) -msgid "Log files used by Shared File Systems services" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:21(td) ./doc/config-reference/block-storage/section_cinder-log-files.xml:21(td) -msgid "Service/interface (for CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, and SUSE Linux Enterprise)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:25(td) ./doc/config-reference/block-storage/section_cinder-log-files.xml:25(td) -msgid "Service/interface (for Ubuntu and Debian)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:36(systemitem) -msgid "openstack-manila-api" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:39(systemitem) -msgid "manila-api" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:44(filename) -msgid "manila-manage.log" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:47(systemitem) ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:50(systemitem) -msgid "manila-manage" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:58(systemitem) -msgid "openstack-manila-scheduler" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:61(systemitem) -msgid "manila-scheduler" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:66(filename) -msgid "share.log" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:69(systemitem) -msgid "openstack-manila-share" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_manila-log-files.xml:72(systemitem) -msgid "manila-share" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:7(title) -msgid "Introduction to the Shared File Systems service" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:8(para) -msgid "The Shared File Systems service provides shared file systems that Compute instances can consume." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:10(para) -msgid "The Shared File Systems service provides:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:13(para) -msgid "manila-api. A WSGI app that authenticates and routes requests throughout the Shared File Systems service. It supports the OpenStack APIs." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:18(para) -msgid "manila-scheduler. Schedules and routes requests to the appropriate share service. The scheduler uses configurable filters and weighers to route requests. The Filter Scheduler is the default and enables filters on things like Capacity, Availability Zone, Share Types, and Capabilities as well as custom filters." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:25(para) -msgid "manila-share. Manages back-end devices that provide shared file systems. A manila-share service can run in one of two modes, with or without handling of share servers. Share servers export file shares via share networks. When share servers are not used, the networking requirements are handled outside of Manila." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:33(para) -msgid "The Shared File Systems service contains the following components:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:36(para) -msgid "Back-end storage devices. The Shared File Services service requires some form of back-end shared file system provider that the service is built on. The reference implementation uses the Block Storage service (Cinder) and a service VM to provide shares. Additional drivers are used to access shared file systems from a variety of vendor solutions." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:43(para) -msgid "Users and tenants (projects). The Shared File Systems service can be used by many different cloud computing consumers or customers (tenants on a shared system), using role-based access assignments. Roles control the actions that a user is allowed to perform. In the default configuration, most actions do not require a particular role unless they are restricted to administrators, but this can be configured by the system administrator in the appropriate policy.json file that maintains the rules. A user's access to manage particular shares is limited by tenant. Guest access to mount and use shares is secured by IP and/or user access rules. Quotas used to control resource consumption across available hardware resources are per tenant." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:54(para) ./doc/config-reference/block-storage/section_block-storage-overview.xml:64(para) -msgid "For tenants, quota controls are available to limit:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:57(para) -msgid "The number of shares that can be created." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:60(para) -msgid "The number of gigabytes that can be provisioned for shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:63(para) -msgid "The number of share snapshots that can be created." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:66(para) -msgid "The number of gigabytes that can be provisioned for share snapshots." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:69(para) -msgid "The number of share networks that can be created." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:72(para) -msgid "You can revise the default quota values with the Shared File Systems CLI, so the limits placed by quotas are editable by admin users." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:76(para) -msgid "Shares, snapshots, and share networks. The basic resources offered by the Shared File Systems service are shares, snapshots and share networks:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:81(para) -msgid "Shares. A share is a unit of storage with a protocol, a size, and an access list. Shares are the basic primitive provided by Manila. All shares exist on a backend. Some shares are associated with share networks and share servers. The main protocols supported are NFS and CIFS, but other protocols are supported as well." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:89(para) -msgid "Snapshots. A snapshot is a point in time copy of a share. Snapshots can only be used to create new shares (containing the snapshotted data). Shares cannot be deleted until all associated snapshots are deleted." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml:95(para) -msgid "Share networks. A share network is a tenant-defined object that informs Manila about the security and network configuration for a group of shares. Share networks are only relevant for backends that manage share servers. A share network contains a security service and network/subnet." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_share-drivers.xml:7(title) -msgid "Share drivers" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_share-drivers.xml:8(para) -msgid "To use different share drivers for the manila-share service, use the parameters described in these sections." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/section_share-drivers.xml:11(para) -msgid "The share drivers are included in the Shared File Systems repository (https://git.openstack.org/cgit/openstack/manila/tree/manila/share/drivers). To set a share driver, use the share_driver flag. For example, to use the generic reference driver:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:4(title) ./doc/config-reference/block-storage/drivers/glusterfs-driver.xml:6(title) -msgid "GlusterFS driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:5(para) -msgid "GlusterFS driver uses GlusterFS, an open source distributed file system, as the storage back end for serving file shares to the Shared File Systems clients." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:11(title) ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:23(title) ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:28(title) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:12(title) ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:142(title) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:28(title) -msgid "Supported shared filesystems" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:14(para) ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:26(para) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:15(para) ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:145(para) -msgid "NFS (access by IP)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:21(title) ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:38(title) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:22(title) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:66(title) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:24(title) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:47(title) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:35(title) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:10(title) ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:93(title) ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:22(title) ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:23(title) ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:17(title) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:60(title) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:10(title) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:17(title) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:241(title) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:73(title) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:38(title) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:18(title) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:21(title) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:289(title) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:59(title) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:35(title) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:17(title) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:18(title) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:99(title) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:48(title) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:53(title) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:98(title) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:14(title) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:16(title) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:56(title) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:48(title) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:15(title) -msgid "Supported operations" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:24(para) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:25(para) -msgid "Create NFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:29(para) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:30(para) -msgid "Delete NFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:34(para) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:50(para) -msgid "Allow NFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:39(para) -msgid "only 'rw' access" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:46(para) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:62(para) -msgid "Deny NFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:56(para) -msgid "Install glusterfs-server package, version >= 3.5.x, on the storage back end." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:62(para) -msgid "Install NFS-Ganesha, version >=2.1, if using NFS-Ganesha as the NFS server for the GlusterFS back end." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:68(para) -msgid "Install glusterfs and glusterfs-fuse package, version >=3.5.x, on the Shared File Systems service host." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:74(para) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:97(para) -msgid "Establish network connection between the Shared File Systems service host and the storage back end." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:82(title) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:109(title) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:105(title) -msgid "Shared File Systems service driver configuration setting" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:83(para) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:106(para) -msgid "The following parameters in the Shared File Systems service's configuration file need to be set:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:89(para) -msgid "share_driver = manila.share.drivers.glusterfs.GlusterfsShareDriver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:95(para) -msgid "If the back-end GlusterFS server runs on the Shared File Systems service host machine," -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:101(para) -msgid "glusterfs_target = <glustervolserver>:/<glustervolid>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:107(para) -msgid "And if the back-end GlusterFS server runs remotely," -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:112(para) -msgid "glusterfs_target = <username>@<glustervolserver>:/<glustervolid>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:120(para) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:157(para) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:179(para) -msgid "The following configuration parameters are optional:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:127(term) -msgid "glusterfs_nfs_server_type =" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:131(para) -msgid "<NFS server type used by the GlusterFS back end, Gluster or Ganesha. Gluster is the default type>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:143(term) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:186(term) -msgid "glusterfs_mount_point_base =" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:147(para) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:190(para) -msgid "<base path of GlusterFS volume mounted on the Shared File Systems service host>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:156(para) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:199(para) -msgid "glusterfs_path_to_private_key = <path to Shared File Systems service host's private key file>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:162(para) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:205(para) -msgid "glusterfs_server_password = <password of remote GlusterFS server machine>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:170(title) ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:58(title) ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:174(title) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:205(title) ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:157(title) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:213(title) -msgid "Known restrictions" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:173(para) -msgid "The driver does not support network segmented multi-tenancy model, but instead works over a flat network, where the tenants share a network." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:180(para) -msgid "If NFS Ganesha is the NFS server used by the GlusterFS back end, then the shares can be accessed by NFSv3 and v4 protocols. However, if Gluster NFS is used by the GlusterFS back end, then the shares can only be accessed by NFSv3 protocol." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:189(para) -msgid "All Shared File Systems service shares, which map to subdirectories within a GlusterFS volume, are currently created within a single GlusterFS volume of a GlusterFS storage pool." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:197(para) -msgid "The driver does not provide read-only access level for shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:211(title) ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:90(title) ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:198(title) ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:233(title) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:462(title) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:289(title) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:264(title) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:131(title) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:493(title) -msgid "Driver configuration options" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml:212(para) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:265(para) -msgid "Configuration options specific to this driver are documented here: ." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:4(title) -msgid "NetApp Clustered Data ONTAP driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:5(para) -msgid "The Shared File Systems service can be configured to use NetApp clustered Data ONTAP version 8." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:10(title) ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:21(title) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:272(title) -msgid "Network approach" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:11(para) -msgid "L3 connectivity between the storage cluster and Shared File Systems service host should exist, and VLAN segmentation should be configured." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:15(para) -msgid "The clustered Data ONTAP driver creates storage virtual machines (SVM, previously known as vServers) as representations of the Shared File Systems service share server interface, configures logical interfaces (LIFs) and stores shares there." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:31(para) -msgid "CIFS (authentication by user)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:38(title) -msgid "Required licenses" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:41(para) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:47(para) -msgid "NFS" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:46(para) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:42(para) -msgid "CIFS" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:51(para) -msgid "FlexClone" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:61(para) -msgid "For CIFS shares an external active directory service is required. Its data should be provided via security-service that is attached to used share-network." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:68(para) -msgid "Share access rule by user for CIFS shares can be created only for existing user in active directory." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:74(para) -msgid "To be able to configure clients to security services, the time on these external security services and storage should be synchronized. The maximum allowed clock skew is 5 minutes." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml:91(para) -msgid "Configuration options specific to this driver are documented here: ." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:4(title) -msgid "HDFS native driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:5(para) -msgid "HDFS native driver is a plug-in based on the Shared File Systems service, which uses Hadoop distributed file system (HDFS), a distributed file system designed to hold very large amounts of data, and provide high-throughput access to the data." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:11(para) -msgid "A Shared File Systems service share in this driver is a subdirectory in hdfs root directory. Instances talk directly to the HDFS storage back end with 'hdfs' protocol. And access to each share is allowed by user based access type, which is aligned with HDFS ACLs to support access control of multiple users and groups." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:21(para) -msgid "The storage back end and Shared File Systems service hosts should be in a flat network, otherwise, the L3 connectivity between them should exist." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:31(para) -msgid "HDFS (authentication by user)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:41(para) -msgid "Create HDFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:46(para) -msgid "Delete HDFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:51(para) -msgid "Allow HDFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:56(para) -msgid "Only support user access type." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:61(para) -msgid "* Support level of access (ro/rw)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:66(para) -msgid "Deny HDFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:71(para) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:104(para) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:48(para) -msgid "Create snapshot." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:76(para) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:109(para) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:53(para) -msgid "Delete snapshot." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:81(para) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:114(para) -msgid "Create share from snapshot." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:91(para) -msgid "Install HDFS package, version >= 2.4.x, on the storage back end." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:97(para) -msgid "To enable access control, the HDFS file system must have ACLs enabled." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:103(para) -msgid "Establish network connection between the Shared File Systems service host and storage back end." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:111(title) -msgid "Shared File Systems service driver configuration" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:114(para) -msgid "share_driver = manila.share.drivers.hdfs.hdfs_native.HDFSNativeShareDriver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:122(term) -msgid "hdfs_namenode_ip = the IP address of the HDFS namenode, and only single" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:127(para) -msgid "namenode is supported now" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:135(para) -msgid "hdfs_namenode_port = the port of the HDFS namenode service" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:140(para) -msgid "hdfs_ssh_port = HDFS namenode SSH port" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:145(para) -msgid "hdfs_ssh_name = HDFS namenode SSH login name" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:152(term) -msgid "hdfs_ssh_pw = HDFS namenode SSH login password, this parameter is not" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:157(para) -msgid "necessary, if the following hdfs_ssh_private_key is configured" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:166(para) -msgid "hdfs_ssh_private_key = Path to the HDFS namenode private key to ssh login" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:177(para) -msgid "This driver does not support network segmented multi-tenancy model. Instead multi-tenancy is supported by the tenant specific user authentication." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:184(para) -msgid "Only support for single HDFS namenode in Kilo release." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml:199(para) -msgid "Configuration options specific to this driver are documented here: ." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:4(title) -msgid "IBM GPFS driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:5(para) -msgid "GPFS driver uses IBM General Parallel File System (GPFS), a high-performance, clustered file system, developed by IBM, as the storage back end for serving file shares to the Shared File Systems service clients." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:35(para) -msgid "Create share snapshot." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:40(para) -msgid "Delete share snapshot." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:45(para) -msgid "Create share from a share snapshot." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:55(para) -msgid "Currently only 'rw' access level is supported." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:72(para) -msgid "Install GPFS with server license, version >= 2.0, on the storage back end." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:78(para) -msgid "Install Kernel NFS or Ganesha NFS server on the storage back-end servers." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:84(para) -msgid "If using Ganesha NFS, currently NFS Ganesha v1.5 and v2.0 are supported." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:90(para) -msgid "Create a GPFS cluster and create a filesystem on the cluster, that will be used to create the Shared File Systems service shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:96(para) -msgid "Enable quotas for the GPFS file system (mmchfs -Q yes)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:101(para) -msgid "Establish network connection between the Shared File Systems Service host and the storage back end." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:110(para) -msgid "The following parameters in the Shared File Systems service configuration file need to be set:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:116(para) -msgid "share_driver = manila.share.drivers.ibm.gpfs.GPFSShareDriver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:121(para) -msgid "gpfs_share_export_ip = <IP to be added to GPFS export string>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:127(para) -msgid "If the back-end GPFS server is not running on the Shared File Systems service host machine, the following options are required to SSH to the remote GPFS back-end server:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:135(para) -msgid "gpfs_ssh_login = <GPFS server SSH login name>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:138(para) -msgid "and one of the following settings is required to execute commands over SSH:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:144(para) -msgid "gpfs_ssh_private_key = <path to GPFS server SSH private key for login>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:150(para) -msgid "gpfs_ssh_password = <GPFS server SSH login password>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:162(para) -msgid "gpfs_mount_point_base = <base folder where exported shares are located>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:168(para) -msgid "gpfs_nfs_server_type = <KNFS|GNFS>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:173(para) -msgid "gpfs_nfs_server_list = <list of the fully qualified NFS server names>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:179(para) -msgid "gpfs_ssh_port = <ssh port number>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:186(term) -msgid "knfs_export_options = <options to use when creating a share using kernel>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:191(para) -msgid "<NFS server>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:199(para) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:175(para) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:102(para) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:266(para) -msgid "Restart of manila-share service is needed for the configuration changes to take effect." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:208(para) -msgid "The driver does not support a segmented-network multi-tenancy model but instead works over a flat network where the tenants share a network." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:215(para) -msgid "While using remote GPFS node, with Ganesha NFS, 'gpfs_ssh_private_key' for remote login to the GPFS node must be specified and there must be a passwordless authentication already setup between the manila-share service and the remote GPFS node." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml:234(para) -msgid "Configuration options specific to this driver are documented here: ." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:4(title) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:6(title) -msgid "EMC VNX driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:5(para) -msgid "The EMC Shared File Systems service driver framework (EMCShareDriver) utilizes the EMC storage products to provide the shared file systems to OpenStack. The EMC driver is a plug-in based driver which is designed to use different plug-ins to manage different EMC storage products." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:11(para) -msgid "The VNX plug-in is the plug-in which manages the VNX to provide shared filesystems. The EMC driver framework with the VNX plug-in is referred to as the VNX driver in this document." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:16(para) -msgid "This driver performs the operations on VNX by XMLAPI and the file command line. Each back end manages one Data Mover of VNX. Multiple Shared File Systems service back ends need to be configured to manage multiple Data Movers." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:26(para) -msgid "VNX OE for File version 7.1 or higher" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:31(para) -msgid "VNX Unified, File only, or Gateway system with a single storage back end" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:37(para) -msgid "The following licenses should be activated on VNX for File:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:52(para) -msgid "SnapSure (for snapshot)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:57(para) -msgid "ReplicationV2 (for create share from snapshot)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:67(para) -msgid "The following operations will be supported on the VNX array:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:72(para) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:28(para) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:41(para) -msgid "Create CIFS/NFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:77(para) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:31(para) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:46(para) -msgid "Delete CIFS/NFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:82(para) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:34(para) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:51(para) -msgid "Allow CIFS/NFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:87(para) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:374(para) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:270(para) -msgid "Only IP access type is supported for NFS." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:92(para) ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:379(para) -msgid "Only user access type is supported for CIFS." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:99(para) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:45(para) -msgid "Deny CIFS/NFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:119(para) -msgid "While the generic driver creates shared filesystems based on cinder volumes attached to nova VMs, the VNX driver performs similar operations using the Data Movers on the array." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:126(title) -msgid "Pre-configurations on VNX" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:129(para) -msgid "Enable Unicode on Data Mover." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:134(para) -msgid "The VNX driver requires that the Unicode is enabled on Data Mover." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:137(para) -msgid "CAUTION: After enabling Unicode, you cannot disable it. If there are some filesystems created before Unicode is enabled on the VNX, consult the storage administrator before enabling Unicode." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:142(para) -msgid "To check the Unicode status on Data Mover, use the following VNX File command on the VNX control station:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:147(para) -msgid "server_cifs <mover_name> | head where: mover_name = <name of the Data Mover>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:152(para) -msgid "Check the value of I18N mode field. UNICODE mode is shown as I18N mode = UNICODE" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:157(para) -msgid "To enable the Unicode for Data Mover:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:161(para) -msgid "uc_config -on -mover <mover_name> where: mover_name = <name of the Data Mover>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:166(para) -msgid "Refer to the document Using International Character Sets on VNX for File on [EMC support site](http://support.emc.com) for more information." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:174(para) -msgid "Enable CIFS service on Data Mover." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:179(para) -msgid "Ensure the CIFS service is enabled on the Data Mover which is going to be managed by VNX driver." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:183(para) -msgid "To start the CIFS service, use the following command:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:187(para) -msgid "server_setup <mover_name> -Protocol cifs -option start [=<n>] where: <mover_name> = <name of the Data Mover> [=<n>] = <number of threads for CIFS users>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:194(para) -msgid "Note: If there is 1 GB of memory on the Data Mover, the default is 96 threads; however, if there is over 1 GB of memory, the default number of threads is 256." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:199(para) -msgid "To check the CIFS service status, use this command:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:203(para) -msgid "server_cifs <mover_name> | head where: <mover_name> = <name of the Data Mover>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:208(para) -msgid "The command output will show the number of CIFS threads started." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:213(para) -msgid "NTP settings on Data Mover." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:218(para) -msgid "VNX driver only supports CIFS share creation with share network which has an Active Directory security-service associated." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:222(para) -msgid "Creating CIFS share requires that the time on the Data Mover is in sync with the Active Directory domain so that the CIFS server can join the domain. Otherwise, the domain join will fail when creating share with this security service. There is a limitation that the time of the domains used by security-services even for different tenants and different share networks should be in sync. Time difference should be less than 10 minutes." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:231(para) -msgid "It is recommended to set the NTP server to the same public NTP server on both the Data Mover and domains used in security services to ensure the time is in sync everywhere." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:236(para) -msgid "Check the date and time on Data Mover:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:240(para) -msgid "server_date <mover_name> where: mover_name = <name of the Data Mover>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:245(para) -msgid "Set the NTP server for Data Mover:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:249(para) -msgid "server_date <mover_name> timesvc start ntp <host> [<host> ...] where: mover_name = <name of the Data Mover> host = <IP address of the time server host>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:255(para) -msgid "Note: The host must be running the NTP protocol. Only 4 host entries are allowed." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:261(para) -msgid "Configure User Mapping on the Data Mover." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:266(para) -msgid "Before creating CIFS share using VNX driver, you must select a method of mapping Windows SIDs to UIDs and GIDs. EMC recommends using usermapper in single protocol (CIFS) environment which is enabled on VNX by default." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:272(para) -msgid "To check usermapper status, use this command syntax:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:276(para) -msgid "server_usermapper <movername> where: <movername> = <name of the Data Mover>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:281(para) -msgid "If usermapper is not started, the following command can be used to start the usermapper:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:286(para) -msgid "server_usermapper <movername> -enable where: <movername> = <name of the Data Mover>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:291(para) -msgid "For a multiple protocol environment, refer to Configuring VNX User Mapping on [EMC support site](http://support.emc.com) for additional information." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:299(para) -msgid "Network Connection." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:304(para) -msgid "In the current release, the share created by the VNX driver uses the first network device (physical port on NIC) of Data Mover to access the network." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:309(para) -msgid "Go to Unisphere to check the device list: Settings -> Network -> Settings for File (Unified system only) -> Device." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:315(title) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:147(title) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:88(title) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:187(title) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:209(title) -msgid "Backend configuration" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:316(para) -msgid "The following parameters need to be configured in /etc/manila/manila.conf for the VNX driver:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:321(para) -msgid "emc_share_backend = vnx emc_nas_server = <IP address> emc_nas_password = <password> emc_nas_login = <user> emc_nas_server_container = <Data Mover name> emc_nas_pool_name = <pool name> share_driver = manila.share.drivers.emc.driver.EMCShareDriver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:331(para) -msgid "emc_share_backend is the plug-in name. Set it to vnx for the VNX driver." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:337(para) -msgid "emc_nas_server is the control station IP address of the VNX system to be managed." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:343(para) -msgid "emc_nas_password and emc_nas_login fields are used to provide credentials to the VNX system. Only local users of VNX File is supported." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:350(para) -msgid "emc_nas_server_container field is the name of the Data Mover to serve the share service." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:356(para) -msgid "emc_nas_pool_name is the pool name user wants to create volume from. The pools can be created using Unisphere for VNX." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:362(para) -msgid "Restart of the manila-share service is needed for the configuration changes to take effect." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:368(title) ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:264(title) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:108(title) -msgid "Restrictions" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:369(para) -msgid "The VNX driver has the following restrictions:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:384(para) -msgid "Only FLAT network and VLAN network are supported." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:389(para) -msgid "VLAN network is supported with limitations. The neutron subnets in different VLANs that are used to create share networks cannot have overlapped address spaces. Otherwise, VNX may have a problem to communicate with the hosts in the VLANs. To create shares for different VLANs with same subnet address, use different Data Movers." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:399(para) -msgid "The 'Active Directory' security service is the only supported security service type and it is required to create CIFS shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:406(para) -msgid "Only one security service can be configured for each share network." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:412(para) -msgid "Active Directory domain name of the 'active_directory' security service should be unique even for different tenants." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:418(para) -msgid "The time on Data Mover and the Active Directory domains used in security services should be in sync (time difference should be less than 10 minutes). It is recommended to use same NTP server on both the Data Mover and Active Directory domains." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:426(para) -msgid "On VNX the snapshot is stored in the SavVols. VNX system allows the space used by SavVol to be created and extended until the sum of the space consumed by all SavVols on the system exceeds the default 20% of the total space available on the system. If the 20% threshold value is reached, an alert will be generated on VNX. Continuing to create snapshot will cause the old snapshot to be inactivated (and the snapshot data to be abandoned). The limit percentage value can be changed manually by storage administrator based on the storage needs. Administrator is recommended to configure the notification on the SavVol usage. Refer to Using VNX SnapSure document on [EMC support site](http://support.emc.com) for more information." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:444(para) -msgid "VNX has limitations on the overall numbers of Virtual Data Movers, filesystems, shares, checkpoints, etc. Virtual Data Mover(VDM) is created by the VNX driver on the VNX to serve as the Shared File Systems service share server. Similarly, filesystem is created, mounted, and exported from the VDM over CIFS or NFS protocol to serve as the Shared File Systems service share. The VNX checkpoint serves as the Shared File Systems service share snapshot. Refer to the NAS Support Matrix document on [EMC support site](http://support.emc.com) for the limitations and configure the quotas accordingly." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml:463(para) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:132(para) -msgid "Configuration options specific to this driver are documented here: ." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:4(title) -msgid "Huawei driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:5(para) -msgid "Huawei NAS driver is a plug-in based on the Shared File Systems service. The Huawei NAS driver can be used to provide functions such as the share and snapshot for virtual machines, or instances, in OpenStack. Huawei NAS driver enables the OceanStor V3 series V300R002 storage system to provide only network filesystems for OpenStack." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:13(para) -msgid "The OceanStor V3 series V300R002 storage system." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:18(para) -msgid "The following licenses should be activated on V3 for File: CIFS, NFS, HyperSnap License (for snapshot)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:25(para) -msgid "The following operations are supported on V3 storage:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:37(para) -msgid "Only IP access type is supported for NFS(ro/rw)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:40(para) -msgid "Only USER access type is supported for CIFS(ro/rw)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:58(para) -msgid "Manage CIFS/NFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:61(para) -msgid "Support pools in one backend." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:64(para) -msgid "Extend share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:67(para) -msgid "Shrink share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:70(para) -msgid "Support multi RestURLs." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:75(title) -msgid "Pre-configurations on Huawei" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:76(para) -msgid "1. Create a driver configuration file. The driver configuration file name must be the same as the manila_huawei_conf_file item in the manila_conf configuration file." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:81(para) -msgid "2. Configure Product. Product indicates the storage system type. For the OceanStor V3 series V300R002 storage systems, the driver configuration file is as follows:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:106(para) -msgid "Product is a type of storage product. Set it to V3." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:109(para) -msgid "LogicalPortIP is an IP address of the logical port." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:112(para) -msgid "RestURL is an access address of the REST interface. Multi RestURLs can be configured in <RestURL> (separated by \";\"). When one of the RestURL fails to connect, the driver will retry another automatically." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:117(para) -msgid "UserName is a user name of an administrator." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:122(para) -msgid "UserPassword is a password of an administrator." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:127(para) -msgid "Thin_StoragePool is a name of a thin storage pool to be used." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:130(para) -msgid "Thick_StoragePool is a name of a thick storage pool to be used." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:133(para) -msgid "WaitInterval is the interval time of querying the file system status." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:139(para) -msgid "Timeout is the timeout period for waiting command execution of a device to complete." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:148(para) -msgid "Modify the manila.conf Shared File Systems service configuration file and add share_driver and manila_huawei_conf_file items. Example for configuring a storage system:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:152(para) -msgid "share_driver = manila.share.drivers.huawei.huawei_nas.HuaweiNasDriver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:158(para) -msgid "manila_huawei_conf_file = /etc/manila/manila_huawei_conf.xml" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:163(para) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:205(para) -msgid "driver_handles_share_servers = False" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:169(para) -msgid "As far as the Shared File Systems service requires share type for creation of shares, make sure that used share type has extra spec driver_handles_share_servers set to False otherwise Huawei back end will be filtered by manila-scheduler. If you do not provide share type with share creation request then default share type and its extra specs will be used." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:178(title) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:307(title) -msgid "Share types" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:179(para) -msgid "When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the default_share_type set in the Manila configuration file is used." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:182(para) -msgid "The Shared File Systems service requires that the share type includes the driver_handles_share_servers extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers (share networks) capability. For the Huawei driver, this must be set to False." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:186(para) -msgid "Another common extra-spec used to determine where a share is created is share_backend_name. When this extra-spec is defined in the share type, the share will be created on a backend with a matching share_backend_name." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:193(para) -msgid "capabilities:dedupe" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:196(para) -msgid "capabilities:compression" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:199(para) -msgid "capabilities:thin_provisioning" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:204(para) -msgid "huawei_smartcache:cachename" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:202(para) -msgid "capabilities:huawei_smartcache" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:211(para) -msgid "huawei_smartpartition:partitionname" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:209(para) -msgid "capabilities:huawei_smartpartition" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:189(para) -msgid "The Shared File Systems service \"share types\" may contain qualified extra-specs, extra-specs that have significance for the backend driver and the CapabilityFilter. This commit makes the Huawei driver report the following boolean capabilities:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:219(para) -msgid "capabilities:dedupe='<is> True' or '<is> False'" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:222(para) -msgid "capabilities:compression='<is> True' or '<is> False'" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:225(para) -msgid "capabilities:thin_provisioning='<is> True' or '<is> False'" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:228(para) -msgid "capabilities:huawei_smartcache='<is> True' or '<is> False'" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:231(para) -msgid "huawei_smartcache:cachename=test_cache_name" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:236(para) -msgid "capabilities:huawei_smartpartition='<is> True' or '<is> False'" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:239(para) -msgid "huawei_smartpartition:partitionname=test_partition_name" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:216(para) -msgid "The scheduler will choose a host that supports the needed capability when the CapabilityFilter is used and a share type uses one or more of the following extra-specs:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:244(para) -msgid "thin_provisioning will be reported as True for backends that use thin provisioned pool. Backends that use thin provisioning also support Manila's over-subscription feature. 'thin_provisioning' will be reported as False for backends that use thick provisioned pool." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:248(para) -msgid "dedupe will be reported as True for backends that use deduplication technology." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:249(para) -msgid "compression will be reported as True for backends that use compression technology." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:250(para) -msgid "huawei_smartcache will be reported as True for backends that use smartcache technology. Adds SSDs into a high-speed cache pool and divides the pool into multiple cache partitions to cache hotspot data in random and small read I/Os." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:253(para) -msgid "huawei_smartpartition will be reported as True for backends that use smartpartition technology. Add share to the smartpartition named 'test_partition_name'. Allocates cache resources based on service characteristics, ensuring the quality of critical services." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:257(para) -msgid "snapshot_support will be reported as True for backends that support all snapshot functionalities, including create_snapshot, delete_snapshot, and create_share_from_snapshot. Huawei Driver does not support create_share_from_snapshot API now, so make sure that used share type has extra spec snapshot_support set to False." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:265(para) -msgid "The Huawei driver has the following restrictions:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:275(para) -msgid "Only USER access type is supported for CIFS." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml:290(para) -msgid "Configuration options specific to this driver are documented here: ." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:4(title) -msgid "Generic approach for share provisioning" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:5(para) -msgid "The Shared File Systems service can be configured to use nova VMs and cinder volumes. There are two modules that handle them in the Shared File Systems service:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:10(para) -msgid "service_instance module creates VMs in nova with predefined image called service image. This module can be used by any driver for provisioning of service VMs to be able to separate share resources among tenants." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:16(para) -msgid "'generic' module operates with cinder volumes and VMs created by service_instance module, then creates shared filesystems based on volumes attached to VMs." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:24(title) -msgid "Network configurations" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:25(para) -msgid "Each driver can handle networking in its own way, see: https://wiki.openstack.org/wiki/manila/Networking." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:31(term) -msgid "One of two possible configurations can be chosen for share provisioning" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:36(para) -msgid "using service_instance module:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:42(term) -msgid "- Service VM has one net interface from net that is connected to public router." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:47(para) -msgid "For successful creation of share, user network should be connected to public router too." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:54(term) -msgid "- Service VM has two net interfaces, first one connected to service network," -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:59(para) -msgid "second one connected directly to user's network." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:67(title) -msgid "Requirements for service image" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:70(para) -msgid "Linux based distro" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:75(para) -msgid "NFS server" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:80(para) -msgid "Samba server >=3.2.0, that can be configured by data stored in registry" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:86(para) -msgid "SSH server" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:91(para) -msgid "Two net interfaces configured to DHCP (see network approaches)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:96(para) -msgid "'exportfs' and 'net conf' libraries used for share actions" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:103(term) -msgid "Following files will be used, so if their paths differ one needs to create at" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:108(para) -msgid "least symlinks for them:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:116(para) -msgid "/etc/exports (permanent file with NFS exports)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:121(para) -msgid "/var/lib/nfs/etab (temporary file with NFS exports used by 'exportfs')" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:127(para) -msgid "/etc/fstab (permanent file with mounted filesystems)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:132(para) -msgid "/etc/mtab (temporary file with mounted filesystems used by 'mount')" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:150(para) -msgid "CIFS (access by IP)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:160(para) -msgid "One of nova's configurations only allows 26 shares per server. This limit comes from the maximum number of virtual PCI interfaces that are used for block device attaching. There are 28 virtual PCI interfaces, in this configuration, two of them are used for server needs and other 26 are used for attaching block devices that are used for shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:170(para) -msgid "Juno version works only with neutron. Each share should be created with neutron-net and neutron-subnet IDs provided via share-network entity." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:177(para) -msgid "Juno version handles security group, flavor, image, keypair for nova VM and also creates service networks, but does not use availability zones for nova VMs and volume types for cinder block devices." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:185(para) -msgid "Juno version does not use security services data provided with share-network. These data will be just ignored." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:191(para) -msgid "Liberty version adds a share extend capability. Share access will be briefly interrupted during an extend operation." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:197(para) -msgid "Liberty version adds a share shrink capability, but this capability is not effective because generic driver shrinks only filesystem size and doesn't shrink the size of cinder volume." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:207(title) ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:251(title) ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:139(title) ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:111(title) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:208(title) -msgid "Driver options" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/generic-driver.xml:208(para) -msgid "The following table contains the configuration options specific to this driver." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:4(title) -msgid "GlusterFS Native driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:5(para) -msgid "GlusterFS Native driver uses GlusterFS, an open source distributed file system, as the storage back end for serving file shares to Shared File Systems service clients." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:10(para) -msgid "A Shared File Systems service share is a GlusterFS volume. This driver uses flat-network (share-server-less) model. Instances directly talk with the GlusterFS back end storage pool. The instances use 'glusterfs' protocol to mount the GlusterFS shares. Access to each share is allowed via TLS Certificates. Only the instance which has the TLS trust established with the GlusterFS back end can mount and hence use the share. Currently only 'rw' access is supported." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:22(para) -msgid "L3 connectivity between the storage back end and the host running the Shared File Systems share service should exist." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:31(para) -msgid "GlusterFS (access by TLS Certificates (cert access type))" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:39(title) -msgid "Multi-tenancy model" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:40(para) -msgid "The driver does not support network segmented multi-tenancy model. Instead multi-tenancy is supported using tenant specific TLS certificates." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:50(para) -msgid "Create GlusterFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:55(para) -msgid "Delete GlusterFS share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:60(para) -msgid "Allow GlusterFS share access (rw)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:65(para) -msgid "Deny GlusterFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:70(para) -msgid "Create GlusterFS snapshot." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:75(para) -msgid "Delete GlusterFS snapshot." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:85(para) -msgid "Install glusterfs-server package, version >= 3.6.x, on the storage back end." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:91(para) -msgid "Install glusterfs and glusterfs-fuse package, version >=3.6.x, on the Shared File Systems service host." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:115(term) -msgid "share_driver =" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:119(para) -msgid "manila.share.drivers.glusterfs_native.GlusterfsNativeShareDriver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:129(term) -msgid "glusterfs_servers = List of GlusterFS servers which provide volumes" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:134(para) -msgid "that can be used to create shares. The servers are expected to be of distinct Gluster clusters (ie. should not be gluster peers). Each server should be of the form [<remoteuser>@]<glustervolserver>." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:140(para) -msgid "The optional <remoteuser>@ part of the server URI indicates SSH access for cluster management (see related optional parameters below). If it is not given, direct command line management is performed (the Shared File Systems service host is assumed to be part of the GlusterFS cluster the server belongs to)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:156(term) -msgid "glusterfs_volume_pattern = Regular expression template" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:160(para) -msgid "used to filter GlusterFS volumes for share creation. The regex template can contain the #{size} parameter which matches a number (sequence of digits) and the value shall be interpreted as size of the volume in GB. Examples: manila-share-volume-\\d+$, manila-share-volume-#{size}G-\\d+$; with matching volume names, respectively: manila-share-volume-12, manila-share-volume-3G-13\". In latter example, the number that matches #{size}, that is, 3, is an indication that the size of volume is 3G." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:216(para) -msgid "GlusterFS volumes are not created on demand. A pre-existing set of GlusterFS volumes should be supplied by the GlusterFS cluster(s), conforming to the naming convention encoded by glusterfs_volume_pattern. However, the GlusterFS endpoint is allowed to extend this set any time (so the Shared File Systems service and GlusterFS endpoints are expected to communicate volume supply/demand out-of-band). glusterfs_volume_pattern can include a size hint (with #{size} syntax), which, if present, requires the GlusterFS end to indicate the size of the shares in GB in the name. (On share creation, the Shared File Systems service picks volumes at least as big as the requested one.)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:235(para) -msgid "Certificate setup (also known as trust setup) between instance and storage back end is out of band of the Shared File Systems service." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:241(para) -msgid "Support for 'create_share_from_snapshot' is planned for Liberty release." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml:247(para) -msgid "For the Shared File Systems service to use GlusterFS volumes, the name of the trashcan directory in GlusterFS volumes must not be changed from the default." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:4(title) -msgid "EMC Isilon driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:5(para) -msgid "The EMC Shared File Systems driver framework (EMCShareDriver) utilizes EMC storage products to provide shared file systems to OpenStack. The EMC driver is a plug-in based driver which is designed to use different plug-ins to manage different EMC storage products." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:12(para) -msgid "The Isilon driver is a plug-in for the EMC framework which allows the Shared File Systems service to interface with an Isilon back end to provide a shared filesystem. The EMC driver framework with the Isilon plug-in is referred to as the \"Isilon Driver\" in this document." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:19(para) -msgid "This Isilon Driver interfaces with an Isilon cluster via the REST Isilon Platform API (PAPI) and the RESTful Access to Namespace API (RAN)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:28(para) -msgid "Isilon cluster running OneFS 7.2 or higher" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:36(para) -msgid "The following operations will be supported on an Isilon cluster:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:56(para) ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:114(para) -msgid "Only IP access type is supported for NFS and CIFS." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:61(para) -msgid "* Only RW access is supported." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:66(para) -msgid "Deny CIFS/NFS share access" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:76(para) -msgid "Delete snapshot" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:81(para) -msgid "Create share from snapshot" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:89(para) -msgid "The following parameters need to be configured in the Shared File Systems service configuration file for the Isilon driver:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:94(para) -msgid "share_driver = manila.share.drivers.emc.driver.EMCShareDriver emc_share_backend = isilon emc_nas_server = <IP address of Isilon cluster> emc_nas_login = <username> emc_nas_password = <password> isilon_share_root_dir = <directory on Isilon where shares will be created>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:109(para) -msgid "The Isilon driver has the following restrictions:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:119(para) -msgid "Only FLAT network is supported." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml:124(para) -msgid "Quotas are not yet supported." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:4(title) -msgid "HPE 3PAR driver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:5(para) -msgid "The HPE 3PAR driver provides NFS and CIFS shared file systems to OpenStack using HPE 3PAR's File Persona capabilities." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:11(para) -msgid "The following operations are supported with HPE 3PAR File Persona:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:16(para) -msgid "Create/delete NFS and CIFS shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:21(para) -msgid "Shares are not accessible until access rules allow access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:28(para) -msgid "Allow/deny NFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:33(para) -msgid "IP access rules are required for NFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:38(para) -msgid "User access rules are not allowed for NFS shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:43(para) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:78(para) -msgid "Access level (RW/RO) is ignored." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:48(para) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:83(para) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:106(para) -msgid "Shares created from snapshots are always read-only." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:53(para) ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:88(para) -msgid "Shares not created from snapshots are read-write (and subject to ACLs)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:61(para) -msgid "Allow/deny CIFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:66(para) -msgid "Both IP and user access rules are required for CIFS share access." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:72(para) -msgid "User access requires a 3PAR local user (LDAP and AD is not yet supported)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:96(para) -msgid "Create/delete snapshots." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:101(para) -msgid "Create shares from snapshots." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:113(para) -msgid "Share networks are not supported. Shares are created directly on the 3PAR without the use of a share server or service VM. Network connectivity is setup outside of the Shared File Systems service." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:121(para) -msgid "On the system running the manila-share service:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:126(para) -msgid "python-3parclient version 4.0.0 or newer from PyPI." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:131(para) -msgid "On the HPE 3PAR array:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:136(para) -msgid "HPE 3PAR Operating System software version 3.2.1 MU3 or higher." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:141(para) -msgid "A license that enables the File Persona feature." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:146(para) -msgid "The array class and hardware configuration must support File. Persona" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:154(title) -msgid "Pre-configuration on the HPE 3PAR" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:157(para) -msgid "HPE 3PAR File Persona must be initialized and started (startfs)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:162(para) -msgid "A File Provisioning Group (FPG) must be created for use with the Shared File Systems service." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:168(para) -msgid "A Virtual File Server (VFS) must be created for the FPG." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:173(para) -msgid "The VFS must be configured with an appropriate share export IP address." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:179(para) -msgid "A local user in the Administrators group is needed for CIFS shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:188(para) -msgid "The following parameters need to be configured in the Shared File Systems service configuration file for the HPE 3PAR driver:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:194(para) -msgid "share_backend_name = <back end name to enable>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:199(para) -msgid "share_driver = manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:210(para) -msgid "hpe3par_fpg = <FPG to use for share creation>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:215(para) -msgid "hpe3par_share_ip_address = <IP address to use for share export location>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:221(para) -msgid "hpe3par_san_ip = <IP address for SSH access to the SAN controller>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:227(para) -msgid "hpe3par_api_url = <3PAR WS API Server URL>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:232(para) -msgid "hpe3par_username = <3PAR username with the 'edit' role>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:237(para) -msgid "hpe3par_password = <3PAR password for the user specified in hpe3par_username>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:243(para) -msgid "hpe3par_san_login = <Username for SSH access to the SAN controller>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:249(para) -msgid "hpe3par_san_password = <Password for SSH access to the SAN controller>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:255(para) -msgid "hpe3par_debug = <False or True for extra debug logging>" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:260(para) -msgid "The hpe3par_share_ip_address must be a valid IP address for the configured FPG's VFS. This IP address is used in export locations for shares that are created. Networking must be configured to allow connectivity from clients to shares." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:273(para) -msgid "Connectivity between the storage array (SSH/CLI and WSAPI) and the Shared File Systems service host is required for share management." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:277(para) -msgid "Connectivity between the clients and the VFS is required for mounting and using the shares. This includes:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:283(para) -msgid "Routing from the client to the external network" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:288(para) -msgid "Assigning the client an external IP address (e.g., a floating IP)" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:294(para) -msgid "Configuring the Shared File Systems service host networking properly for IP forwarding" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:300(para) -msgid "Configuring the VFS networking properly for client subnets" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:308(para) -msgid "When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the default_share_type set in the Shared File Systems service configuration file is used." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:315(para) -msgid "The Shared File Systems service requires that the share type includes the driver_handles_share_servers extra-spec. This ensures that the share will be created on a back end that supports the requested driver_handles_share_servers (share networks) capability. For the HPE 3PAR driver, this must be set to False." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:323(para) -msgid "Another common Shared File Systems service extra-spec used to determine where a share is created is share_backend_name. When this extra-spec is defined in the share type, the share will be created on a back end with a matching share_backend_name." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:330(para) -msgid "The HPE 3PAR driver automatically reports capabilities based on the FPG used for each back end. Share types with extra specs can be created by an administrator to control which share types are allowed to use FPGs with or without specific capabilities. The following extra-specs are used with the capabilities filter and the HPE 3PAR driver:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:340(para) -msgid "hpe3par_flash_cache = '<is> True' or '<is> False'" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:345(para) -msgid "thin_provisioning = '<is> True' or '<is> False'" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:350(para) -msgid "dedupe = '<is> True' or '<is> False'" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:355(para) -msgid "hpe3par_flash_cache will be reported as True for back ends that have 3PAR's Adaptive Flash Cache enabled." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:359(para) -msgid "thin_provisioning will be reported as True for back ends that use thin provisioned volumes. FPGs that use fully provisioned volumes will report False. Backends that use thin provisioning also support the Shared File Systems service's over-subscription feature." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:365(para) -msgid "dedupe will be reported as True for back ends that use deduplication technology." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:369(para) -msgid "Scoped extra-specs are used to influence vendor-specific implementation details. Scoped extra-specs use a prefix followed by a colon. For HPE 3PAR these extra-specs have a prefix of hpe3par." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:374(para) -msgid "The following HPE 3PAR extra-specs are used when creating CIFS (SMB) shares:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:380(para) -msgid "hpe3par:smb_access_based_enum = true or false" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:385(para) -msgid "hpe3par:smb_continuous_avail = true or false" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:390(para) -msgid "hpe3par:smb_cache = off, manual, optimized or auto" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:395(para) -msgid "smb_access_based_enum (Access Based Enumeration) specifies if users can see only the files and directories to which they have been allowed access on the shares. The default is false." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:400(para) -msgid "smb_continuous_avail (Continuous Availability) specifies if SMB3 continuous availability features should be enabled for this share. If not specified, the default is true." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:405(para) -msgid "smb_cache specifies client-side caching for offline files. Valid values are:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:411(para) -msgid "`off`: The client must not cache any files from this share. The share is configured to disallow caching." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:417(para) -msgid "`manual`: The client must allow only manual caching for the files open from this share." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:423(para) -msgid "`optimized`: The client may cache every file that it opens from this share. Also, the client may satisfy the file requests from its local cache. The share is configured to allow automatic caching of programs and documents." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:431(para) -msgid "`auto`: The client may cache every file that it opens from this share. The share is configured to allow automatic caching of documents." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:438(para) -msgid "If this is not specified, the default is manual." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:443(para) -msgid "The following HPE 3PAR extra-specs are used when creating NFS shares:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:449(para) -msgid "hpe3par:nfs_options = Comma separated list of NFS export options." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:455(para) -msgid "The NFS export options have the following limitations:" -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:461(para) -msgid "ro and rw are not allowed (will be determined by the driver)." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:466(para) -msgid "no_subtree_check and fsid are not allowed per HPE 3PAR CLI support." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:472(para) -msgid "(in)secure and (no_)root_squash are not allowed because the HPE 3PAR driver controls those settings." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:479(para) -msgid "All other NFS options are forwarded to the HPE 3PAR as part of share creation. The HPE 3PAR will do additional validation at share creation time. Refer to HPE 3PAR CLI help for more details." -msgstr "" - -#: ./doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml:494(para) -msgid "Configuration options specific to this driver are documented here: ." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:7(title) -msgid "Networking configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:8(para) -msgid "The options and descriptions listed in this introduction are auto generated from the code in the Networking service project, which provides software-defined networking between VMs run in Compute. The list contains common options, while the subsections list the options for the various networking plug-ins." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:21(para) -msgid "Use the following options to alter agent-related settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:26(title) -msgid "API" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:27(para) -msgid "Use the following options to alter API-related settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:32(title) -msgid "Token authentication" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:33(para) -msgid "Use the following options to alter token authentication settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:39(para) -msgid "Use the following options to alter Compute-related settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:45(para) -msgid "Use the following options to alter CORS-related settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:50(title) -msgid "Database" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:51(para) ./doc/config-reference/networking/section_networking-options-reference.xml:57(para) -msgid "Use the following options to alter Database-related settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:56(title) -msgid "DHCP agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:62(title) -msgid "Distributed virtual router" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:63(para) -msgid "Use the following options to alter DVR-related settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:68(title) -msgid "Firewall-as-a-Service driver" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:69(para) -msgid "Use the following options in the fwaas_driver.ini file for the FWaaS driver." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:77(title) -msgid "Load-Balancer-as-a-Service configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:78(para) -msgid "Use the following options in the neutron_lbaas.conf file for the LBaaS agent." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:81(para) -msgid "Use the following options in the lbaas_agent.ini file for the LBaaS agent." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:84(para) -msgid "Use the following options in the services_lbaas.conf file for the LBaaS agent." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:87(para) -msgid "Use the following options in the /etc/octavia/octavia.conf file for octavia config." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:93(title) -msgid "VPN-as-a-Service configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:94(para) -msgid "Use the following options in the vpnaas_agent.ini file for the VPNaaS agent." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:103(title) -msgid "IPv6 router advertisement" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:104(para) -msgid "Use the following options to alter IPv6 RA settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:109(title) -msgid "L3 agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:110(para) -msgid "Use the following options in the l3_agent.ini file for the L3 agent." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:116(title) -msgid "Logging" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:117(para) -msgid "Use the following options to alter logging settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:122(title) -msgid "Metadata Agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:123(para) -msgid "Use the following options in the metadata_agent.ini file for the Metadata agent." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:127(para) -msgid "Previously, neutron metadata agent connected to a neutron server via REST API using a neutron client. This is ineffective because keystone is then fully involved into the authentication process and gets overloaded." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:128(para) -msgid "The neutron metadata agent has been reworked to use RPC by default to connect to a server since Kilo release. This is a typical way of interacting between neutron server and its agents. If neutron server does not support metadata RPC then neutron client will be used." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:131(para) -msgid "Do not run the neutron-ns-metadata-proxy proxy namespace as root on a node with the L3 agent running. In OpenStack Kilo and newer, you can change the permissions of neutron-ns-metadata-proxy after the proxy installation using the and options." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:142(title) -msgid "Metering Agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:143(para) -msgid "Use the following options in the metering_agent.ini file for the Metering agent." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:149(title) -msgid "Nova" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:150(para) -msgid "Use the following options in the neutron.conf file to change nova-related settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:156(title) -msgid "Policy" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:157(para) -msgid "Use the following options in the neutron.conf file to change policy settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:163(title) -msgid "Quotas" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:164(para) -msgid "Use the following options in the neutron.conf file for the quota system." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:170(title) -msgid "Scheduler" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:171(para) -msgid "Use the following options in the neutron.conf file to change scheduler settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:177(title) -msgid "Security Groups" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:178(para) -msgid "Use the following options in the configuration file for your driver to change security group settings." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:182(para) -msgid "Now Networking uses iptables to achieve security group functions. In L2 agent with option enabled, it makes use of IPset to improve security group's performance, as it represents a hash set which is insensitive to the number of elements." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:186(para) -msgid "When a port is created, L2 agent will add an additional IPset chain to it's iptables chain, if the security group that this port belongs to has rules between other security group, the member of that security group will be added to the ipset chain." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:190(para) -msgid "If a member of a security group is changed, it used to reload iptables rules which is expensive. However, when IPset option is enabled on L2 agent, it does not need to reload iptables if only members of security group were changed, it should just update an IPset." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:196(para) -msgid "A single default security group has been introduced in order to avoid race conditions when creating a tenant's default security group. The race conditions are caused by the uniqueness check of a new security group name. A table default_security_group implements such a group. It has tenant_id field as a primary key and security_group_id, which is an identifier of a default security group. The migration that introduces this table has a sanity check that verifies if a default security group is not duplicated in any tenant." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:209(title) -msgid "SSL and Certification Authority" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-options-reference.xml:210(para) -msgid "Use the following options in the neutron.conf file to enable SSL." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:7(title) -msgid "Networking plug-ins" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:8(para) -msgid "OpenStack Networking introduces the concept of a plug-in, which is a back-end implementation of the OpenStack Networking API. A plug-in can use a variety of technologies to implement the logical API requests. Some OpenStack Networking plug-ins might use basic Linux VLANs and IP tables, while others might use more advanced technologies, such as L2-in-L3 tunneling or OpenFlow. These sections detail the configuration options for the various plug-ins." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:19(para) -msgid "The following plugins have been removed in Kilo:" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:22(para) -msgid "Ryu plugin. The Ryu team recommends that you migrate to the ML2 plugin with the ofagent mechanism driver. However, note that the functionality is not the same. There is no upgrade procedure currently available." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:28(para) -msgid "Mellanox plugin." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:32(title) -msgid "BaGpipe configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:38(title) -msgid "BigSwitch configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:44(title) -msgid "Brocade configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:50(title) -msgid "Brocade MLX L3 plug-in" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:51(para) ./doc/config-reference/networking/section_networking-plugins-ml2.xml:84(para) -msgid "Configure switch names to be used as group names as described below" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:58(title) -msgid "Brocade Vyatta layer 3 plug-in" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:59(para) -msgid "The Brocade Vyatta Layer 3 plug-in configures Vyatta vRouter. More information about the plug-in is available at: Brocade_Vyatta_L3_Plugin." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:63(para) -msgid "Use the following options to configure the Brocade Vyatta Layer 3 plug-in." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:69(title) -msgid "CISCO configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:75(title) -msgid "Fujitsu CFAB configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:81(title) -msgid "Fujitsu ISM configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:87(title) -msgid "CloudBase Hyper-V Agent configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:94(title) -msgid "Embrane configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:101(title) -msgid "IBM SDN-VE configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:107(title) ./doc/config-reference/networking/section_networking-plugins.xml:113(title) -msgid "Layer 2 Gateway configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:119(title) -msgid "Linux bridge Agent configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:127(title) -msgid "MidoNet configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:133(title) -msgid "NEC configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:139(title) -msgid "One Convergence NVSD configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:145(title) -msgid "Open Networking Operating System (ONOS) configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:151(title) -msgid "OpenContrail configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:157(title) -msgid "Open vSwitch Agent configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:164(title) -msgid "Virtual Network for Open vSwitch options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:170(title) -msgid "IPv6 Prefix Delegation configuradtion options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:176(title) -msgid "PLUMgrid configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:182(title) -msgid "SR-IOV configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:188(title) -msgid "VMware vSphere configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:194(title) -msgid "VMware NSX configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins.xml:200(title) -msgid "VMware DVS configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:6(title) -msgid "Networking sample configuration files" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:7(para) -msgid "All the files in this section can be found in /etc/neutron/." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:9(title) -msgid "neutron.conf" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:10(para) -msgid "Use the neutron.conf file to configure the majority of the OpenStack Networking options." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:18(para) -msgid "Use the api-paste.ini to configure the OpenStack Networking API." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:25(para) -msgid "Use the policy.json file to define additional access controls that apply to the OpenStack Networking service." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:33(para) -msgid "Use the rootwrap.conf file to define configuration values used by the script when the OpenStack Networking service must escalate its privileges to those of the root user." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:41(title) -msgid "Configuration files for plug-in agents" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:42(para) -msgid "Each plug-in agent that runs on an OpenStack Networking node, to perform local networking configuration for the node's VMs and networking services, has its own configuration file." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:46(title) -msgid "dhcp_agent.ini" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:52(title) -msgid "l3_agent.ini" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-sample-configuration-files.xml:58(title) -msgid "metadata_agent.ini" -msgstr "" - -#: ./doc/config-reference/networking/section_rpc-for-networking.xml:10(para) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:8(para) -msgid "OpenStack projects use an open standard for messaging middleware known as AMQP. This messaging middleware enables the OpenStack services that run on multiple servers to talk to each other. OpenStack Oslo RPC supports three implementations of AMQP: RabbitMQ, Qpid, and ZeroMQ." -msgstr "" - -#: ./doc/config-reference/networking/section_rpc-for-networking.xml:21(para) -msgid "OpenStack Oslo RPC uses RabbitMQ by default. Use these options to configure the RabbitMQ message system. The option is optional as long as RabbitMQ is the default messaging system. However, if it is included the configuration, you must set it to neutron.openstack.common.rpc.impl_kombu." -msgstr "" - -#: ./doc/config-reference/networking/section_rpc-for-networking.xml:35(para) -msgid "Use these options to configure the RabbitMQ messaging system. You can configure messaging communication for different installation scenarios, tune retries for RabbitMQ, and define the size of the RPC thread pool. To monitor notifications through RabbitMQ, you must set the option to neutron.openstack.common.notifier.rpc_notifier in the neutron.conf file:" -msgstr "" - -#: ./doc/config-reference/networking/section_rpc-for-networking.xml:47(para) -msgid "Use these options to configure the Qpid messaging system for OpenStack Oslo RPC. Qpid is not the default messaging system, so you must enable it by setting the option in the neutron.conf file:" -msgstr "" - -#: ./doc/config-reference/networking/section_rpc-for-networking.xml:56(para) -msgid "This critical option points the compute nodes to the Qpid broker (server). Set the option to the host name where the broker runs in the neutron.conf file." -msgstr "" - -#: ./doc/config-reference/networking/section_rpc-for-networking.xml:94(para) ./doc/config-reference/orchestration/section_orchestration-rpc.xml:79(para) -msgid "Use these additional options to configure the Qpid messaging driver for OpenStack Oslo RPC. These options are used infrequently." -msgstr "" - -#: ./doc/config-reference/networking/section_rpc-for-networking.xml:103(para) -msgid "Use these options to configure the ZeroMQ messaging system for OpenStack Oslo RPC. ZeroMQ is not the default messaging system, so you must enable it by setting the option in the neutron.conf file:" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:7(title) -msgid "Log files used by Networking" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:8(para) -msgid "The corresponding log file of each Networking service is stored in the /var/log/neutron/ directory of the host on which each service runs." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:12(caption) -msgid "Log files used by Networking services" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:20(td) -msgid "Service/interface" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:28(filename) -msgid "dhcp-agent.log" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:31(systemitem) -msgid "neutron-dhcp-agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:36(filename) -msgid "l3-agent.log" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:39(systemitem) -msgid "neutron-l3-agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:44(filename) -msgid "lbaas-agent.log" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:47(systemitem) -msgid "neutron-lbaas-agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:49(para) -msgid "The neutron-lbaas-agent service only runs when Load-Balancer-as-a-Service is enabled." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:57(filename) -msgid "linuxbridge-agent.log" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:60(systemitem) -msgid "neutron-linuxbridge-agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:65(filename) -msgid "metadata-agent.log" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:68(systemitem) -msgid "neutron-metadata-agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:73(filename) -msgid "metering-agent.log" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:76(systemitem) -msgid "neutron-metering-agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:81(filename) -msgid "openvswitch-agent.log" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:84(systemitem) -msgid "neutron-openvswitch-agent" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:89(filename) -msgid "server.log" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-log-files.xml:92(systemitem) -msgid "neutron-server" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:7(title) -msgid "Modular Layer 2 (ml2) configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:8(para) -msgid "The Modular Layer 2 (ml2) plug-in has two components: network types and mechanisms. You can configure these components separately. This section describes these configuration options." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:13(title) -msgid "Configure MTU for VXLAN tunnelling" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:14(para) -msgid "Specific MTU configuration is necessary for VXLAN to function as expected:" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:18(para) -msgid "One option is to increase the MTU value of the physical interface and physical switch fabric by at least 50 bytes. For example, increase the MTU value to 1550. This value enables an automatic 50-byte MTU difference between the physical interface (1500) and the VXLAN interface (automatically 1500-50 = 1450). An MTU value of 1450 causes issues when virtual machine taps are configured at an MTU value of 1500." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:28(para) -msgid "Another option is to decrease the virtual Ethernet devices' MTU. Set the option to 1450 in the neutron.conf file, and set all guest virtual machines' MTU to the same value by using a DHCP option. For information about how to use this option, see Configure OVS plug-in." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:40(title) -msgid "Modular Layer 2 (ml2) Flat Type configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:45(title) -msgid "Modular Layer 2 (ml2) GRE Type configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:50(title) -msgid "Modular Layer 2 (ml2) VLAN Type configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:55(title) -msgid "Modular Layer 2 (ml2) VXLAN Type configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:60(title) -msgid "Modular Layer 2 (ml2) Arista Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:67(title) -msgid "Modular Layer 2 (ml2) BaGpipe Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:72(title) -msgid "Modular Layer 2 (ml2) BigSwitch Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:77(title) -msgid "Modular Layer 2 (ml2) Brocade Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:82(title) -msgid "Modular Layer 3 (ml2) Brocade MLX ICX Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:89(title) -msgid "Modular Layer 2 (ml2) Cisco Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:94(title) -msgid "Modular Layer 2 (ml2) Freescale SDN Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:99(title) -msgid "Modular Layer 2 (ml2) Geneve Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:104(title) -msgid "Modular Layer 2 (ml2) OpenDaylight Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:106(para) -msgid "Use of VLANs with the OpenDaylight mechanism driver requires OpenDaylight Helium or newer to be installed." -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:111(title) -msgid "Modular Layer 2 (ml2) OpenFlow Agent (ofagent) Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:116(title) -msgid "Modular Layer 2 (ml2) L2 Population Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:121(title) -msgid "Modular Layer 2 (ml2) Tail-f NCS Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/networking/section_networking-plugins-ml2.xml:126(title) -msgid "Modular Layer 2 (ml2) SR-IOV Mechanism configuration options" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:11(title) -msgid "Configure Object Storage features" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:13(title) -msgid "Object Storage zones" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:14(para) -msgid "In OpenStack Object Storage, data is placed across different tiers of failure domains. First, data is spread across regions, then zones, then servers, and finally across drives. Data is placed to get the highest failure domain isolation. If you deploy multiple regions, the Object Storage service places the data across the regions. Within a region, each replica of the data should be stored in unique zones, if possible. If there is only one zone, data should be placed on different servers. And if there is only one server, data should be placed on different drives." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:25(para) -msgid "Regions are widely separated installations with a high-latency or otherwise constrained network link between them. Zones are arbitrarily assigned, and it is up to the administrator of the Object Storage cluster to choose an isolation level and attempt to maintain the isolation level through appropriate zone assignment. For example, a zone may be defined as a rack with a single power source. Or a zone may be a DC room with a common utility provider. Servers are identified by a unique IP/port. Drives are locally attached storage volumes identified by mount point." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:36(para) -msgid "In small clusters (five nodes or fewer), everything is normally in a single zone. Larger Object Storage deployments may assign zone designations differently; for example, an entire cabinet or rack of servers may be designated as a single zone to maintain replica availability if the cabinet becomes unavailable (for example, due to failure of the top of rack switches or a dedicated circuit). In very large deployments, such as service provider level deployments, each zone might have an entirely autonomous switching and power infrastructure, so that even the loss of an electrical circuit or switching aggregator would result in the loss of a single replica at most." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:50(title) -msgid "Rackspace zone recommendations" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:51(para) -msgid "For ease of maintenance on OpenStack Object Storage, Rackspace recommends that you set up at least five nodes. Each node is assigned its own zone (for a total of five zones), which gives you host level redundancy. This enables you to take down a single zone for maintenance and still guarantee object availability in the event that another zone fails during your maintenance." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:59(para) -msgid "You could keep each server in its own cabinet to achieve cabinet level isolation, but you may wish to wait until your Object Storage service is better established before developing cabinet-level isolation. OpenStack Object Storage is flexible; if you later decide to change the isolation level, you can take down one zone at a time and move them to appropriate new homes." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:67(title) -msgid "RAID controller configuration" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:68(para) -msgid "OpenStack Object Storage does not require RAID. In fact, most RAID configurations cause significant performance degradation. The main reason for using a RAID controller is the battery-backed cache. It is very important for data integrity reasons that when the operating system confirms a write has been committed that the write has actually been committed to a persistent location. Most disks lie about hardware commits by default, instead writing to a faster write cache for performance reasons. In most cases, that write cache exists only in non-persistent memory. In the case of a loss of power, this data may never actually get committed to disk, resulting in discrepancies that the underlying file system must handle." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:81(para) -msgid "OpenStack Object Storage works best on the XFS file system, and this document assumes that the hardware being used is configured appropriately to be mounted with the option. For more information, see the XFS FAQ." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:88(para) -msgid "To get the most out of your hardware, it is essential that every disk used in OpenStack Object Storage is configured as a standalone, individual RAID 0 disk; in the case of 6 disks, you would have six RAID 0s or one JBOD. Some RAID controllers do not support JBOD or do not support battery backed cache with JBOD. To ensure the integrity of your data, you must ensure that the individual drive caches are disabled and the battery backed cache in your RAID card is configured and used. Failure to configure the controller properly in this case puts data at risk in the case of sudden loss of power." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:100(para) -msgid "You can also use hybrid drives or similar options for battery backed up cache configurations without a RAID controller." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:106(title) -msgid "Throttle resources through rate limits" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:107(para) -msgid "Rate limiting in OpenStack Object Storage is implemented as a pluggable middleware that you configure on the proxy server. Rate limiting is performed on requests that result in database writes to the account and container SQLite databases. It uses memcached and is dependent on the proxy servers having highly synchronized time. The rate limits are limited by the accuracy of the proxy server clocks." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:116(title) -msgid "Configure rate limiting" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:117(para) -msgid "All configuration is optional. If no account or container limits are provided, no rate limiting occurs. Available configuration options include:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:123(para) -msgid "The container rate limits are linearly interpolated from the values given. A sample container rate limiting could be:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:126(para) -msgid "container_ratelimit_100 = 100" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:127(para) -msgid "container_ratelimit_200 = 50" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:128(para) -msgid "container_ratelimit_500 = 20" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:129(para) -msgid "This would result in:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:131(caption) -msgid "Values for Rate Limiting with Sample Configuration Settings" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:135(td) -msgid "Container Size" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:136(td) -msgid "Rate Limit" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:139(td) -msgid "0-99" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:140(td) -msgid "No limiting" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:143(td) ./doc/config-reference/object-storage/section_object-storage-features.xml:144(td) -msgid "100" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:148(td) -msgid "150" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:149(td) -msgid "75" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:152(td) -msgid "500" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:153(td) ./doc/config-reference/object-storage/section_object-storage-features.xml:157(td) -msgid "20" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:156(td) -msgid "1000" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:164(title) -msgid "Health check" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:165(para) -msgid "Provides an easy way to monitor whether the Object Storage proxy server is alive. If you access the proxy with the path /healthcheck, it responds with OK in the response body, which monitoring tools can use." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:173(title) -msgid "Domain remap" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:174(para) -msgid "Middleware that translates container and account parts of a domain to path parameters that the proxy server understands." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:182(title) -msgid "CNAME lookup" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:183(para) -msgid "Middleware that translates an unknown domain in the host header to something that ends with the configured storage_domain by looking up the given domain's CNAME record in DNS." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:193(title) -msgid "Temporary URL" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:194(para) -msgid "Allows the creation of URLs to provide temporary access to objects. For example, a website may wish to provide a link to download a large object in OpenStack Object Storage, but the Object Storage account has no public access. The website can generate a URL that provides GET access for a limited time to the resource. When the web browser user clicks on the link, the browser downloads the object directly from Object Storage, eliminating the need for the website to act as a proxy for the request. If the user shares the link with all his friends, or accidentally posts it on a forum, the direct access is limited to the expiration time set when the website created the link." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:205(literal) -msgid "temp_url_sig" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:207(para) -msgid "A cryptographic signature" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:211(literal) -msgid "temp_url_expires" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:213(para) -msgid "An expiration date, in Unix time" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:202(para) -msgid "A temporary URL is the typical URL associated with an object, with two additional query parameters:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:217(para) -msgid "An example of a temporary URL:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:223(para) -msgid "To create temporary URLs, first set the X-Account-Meta-Temp-URL-Key header on your Object Storage account to an arbitrary string. This string serves as a secret key. For example, to set a key of b3968d0207b54ece87cccc06515a89d4 by using the command-line tool:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:231(replaceable) -msgid "b3968d0207b54ece87cccc06515a89d4" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:232(para) -msgid "Next, generate an HMAC-SHA1 (RFC 2104) signature to specify:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:236(para) -msgid "Which HTTP method to allow (typically GET or PUT)" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:241(para) -msgid "The expiry date as a Unix timestamp" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:244(para) -msgid "The full path to the object" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:247(para) -msgid "The secret key set as the X-Account-Meta-Temp-URL-Key" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:251(para) -msgid "Here is code generating the signature for a GET for 24 hours on /v1/AUTH_account/container/object:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:266(para) -msgid "Any alteration of the resource path or query arguments results in a 401Unauthorized error. Similarly, a PUT where GET was the allowed method returns a 401 error. HEAD is allowed if GET or PUT is allowed. Using this in combination with browser form post translation middleware could also allow direct-from-browser uploads to specific locations in Object Storage." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:276(para) -msgid "Changing the X-Account-Meta-Temp-URL-Key invalidates any previously generated temporary URLs within 60 seconds, which is the memcache time for the key. Object Storage supports up to two keys, specified by X-Account-Meta-Temp-URL-Key and X-Account-Meta-Temp-URL-Key-2. Signatures are checked against both keys, if present. This process enables key rotation without invalidating all existing temporary URLs." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:288(para) -msgid "Object Storage includes the script that generates the query parameters automatically:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:296(para) -msgid "Because this command only returns the path, you must prefix the Object Storage host name (for example, https://swift-cluster.example.com)." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:299(para) -msgid "With GET Temporary URLs, a Content-Disposition header is set on the response so that browsers interpret this as a file attachment to be saved. The file name chosen is based on the object name, but you can override this with a filename query parameter. The following example specifies a filename of My Test File.pdf:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:312(para) -msgid "If you do not want the object to be downloaded, you can cause Content-Disposition: inline to be set on the response by adding the inline parameter to the query string, as follows:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:331(emphasis) -msgid "tempurl" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:320(para) -msgid "To enable Temporary URL functionality, edit /etc/swift/proxy-server.conf to add tempurl to the pipeline variable defined in the [pipeline:main] section. The tempurl entry should appear immediately before the authentication filters in the pipeline, such as authtoken, tempauth or keystoneauth. For example:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:338(title) -msgid "Name check filter" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:339(para) -msgid "Name Check is a filter that disallows any paths that contain defined forbidden characters or that exceed a defined length." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:347(title) -msgid "Constraints" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:348(para) -msgid "To change the OpenStack Object Storage internal limits, update the values in the swift-constraints section in the swift.conf file. Use caution when you update these values because they affect the performance in the entire cluster." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:359(title) -msgid "Cluster health" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:360(para) -msgid "Use the tool to measure overall cluster health. This tool checks if a set of deliberately distributed containers and objects are currently in their proper places within the cluster. For instance, a common deployment has three replicas of each object. The health of that object can be measured by checking if each replica is in its proper place. If only 2 of the 3 is in place the object's health can be said to be at 66.66%, where 100% would be perfect. A single object's health, especially an older object, usually reflects the health of that entire partition the object is in. If you make enough objects on a distinct percentage of the partitions in the cluster,you get a good estimate of the overall cluster health." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:374(para) -msgid "In practice, about 1% partition coverage seems to balance well between accuracy and the amount of time it takes to gather results. To provide this health value, you must create an account solely for this usage. Next, you must place the containers and objects throughout the system so that they are on distinct partitions. Use the tool to create random container and object names until they fall on distinct partitions." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:384(para) -msgid "Last, and repeatedly for the life of the cluster, you must run the tool to check the health of each container and object." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:389(para) -msgid "These tools must have direct access to the entire cluster and ring files. Installing them on a proxy server suffices." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:393(para) -msgid "The and commands both use the same /etc/swift/dispersion.conf configuration file. Example dispersion.conf file:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:405(para) -msgid "You can use configuration options to specify the dispersion coverage, which defaults to 1%, retries, concurrency, and so on. However, the defaults are usually fine. After the configuration is in place, run the tool to populate the containers and objects throughout the cluster. Now that those containers and objects are in place, you can run the tool to get a dispersion report or view the overall health of the cluster. Here is an example of a cluster in perfect health:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:425(para) -msgid "Now, deliberately double the weight of a device in the object ring (with replication turned off) and re-run the dispersion report to show what impact that has:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:441(para) -msgid "You can see the health of the objects in the cluster has gone down significantly. Of course, this test environment has just four devices, in a production environment with many devices the impact of one device change is much less. Next, run the replicators to get everything put back into place and then rerun the dispersion report:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:460(para) -msgid "Alternatively, the dispersion report can also be output in JSON format. This allows it to be more easily consumed by third-party utilities:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:473(title) -msgid "Static Large Object (SLO) support" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:474(para) -msgid "This feature is very similar to Dynamic Large Object (DLO) support in that it enables the user to upload many objects concurrently and afterwards download them as a single object. It is different in that it does not rely on eventually consistent container listings to do so. Instead, a user-defined manifest of the object segments is used." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:481(para) -msgid "For more information regarding SLO usage and support, please see: Static Large Objects." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:489(title) -msgid "Container quotas" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:490(para) -msgid "The container_quotas middleware implements simple quotas that can be imposed on Object Storage containers by a user with the ability to set container metadata, most likely the account administrator. This can be useful for limiting the scope of containers that are delegated to non-admin users, exposed to form uploads, or just as a self-imposed sanity check." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:498(para) -msgid "Any object operations that exceed these quotas return a Forbidden (403) status code." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:500(para) -msgid "Quotas are subject to several limitations: eventual consistency, the timeliness of the cached container_info (60 second TTL by default), and it is unable to reject chunked transfer uploads that exceed the quota (though once the quota is exceeded, new chunked transfers are refused)." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:506(para) -msgid "Set quotas by adding meta values to the container. These values are validated when you set them:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:510(para) -msgid "X-Container-Meta-Quota-Bytes: Maximum size of the container, in bytes." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:514(para) -msgid "X-Container-Meta-Quota-Count: Maximum object count of the container." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:523(title) -msgid "Account quotas" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:524(para) -msgid "The x-account-meta-quota-bytes metadata entry must be requests (PUT, POST) if a given account quota (in bytes) is exceeded while DELETE requests are still allowed." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:528(para) -msgid "The x-account-meta-quota-bytes metadata entry must be set to store and enable the quota. Write requests to this metadata entry are only permitted for resellers. There is no account quota limitation on a reseller account even if x-account-meta-quota-bytes is set." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:535(para) -msgid "Any object PUT operations that exceed the quota return a 413 response (request entity too large) with a descriptive body." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:538(para) -msgid "The following command uses an admin account that own the Reseller role to set a quota on the test account:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:542(para) -msgid "Here is the stat listing of an account where quota has been set:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:552(para) -msgid "This command removes the account quota:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:556(title) -msgid "Bulk delete" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:557(para) -msgid "Use bulk-delete to delete multiple files from an account with a single request. Responds to DELETE requests with a header 'X-Bulk-Delete: true_value'. The body of the DELETE request is a new line-separated list of files to delete. The files listed must be URL encoded and in the form:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:567(para) -msgid "If all files are successfully deleted (or did not exist), the operation returns HTTPOk. If any files failed to delete, the operation returns HTTPBadGateway. In both cases, the response body is a JSON dictionary that shows the number of files that were successfully deleted or not found. The files that failed are listed." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:580(title) -msgid "Drive audit" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:581(para) -msgid "The configuration items reference a script that can be run by using to watch for bad drives. If errors are detected, it unmounts the bad drive so that OpenStack Object Storage can work around it. It takes the following options:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:593(title) -msgid "Form post" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:594(para) -msgid "Middleware that enables you to upload objects to a cluster by using an HTML form ." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:598(para) -msgid "The format of the form is:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:615(para) -msgid "In the form:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:621(literal) -msgid "action=\"<swift-url>\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:623(para) -msgid "The URL to the Object Storage destination, such as https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:627(para) -msgid "The name of each uploaded file is appended to the specified swift-url. So, you can upload directly to the root of container with a URL like https://swift-cluster.example.com/v1/AUTH_account/container/." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:633(para) -msgid "Optionally, you can include an object prefix to separate different users' uploads, such as https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:642(literal) -msgid "method=\"POST\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:643(para) -msgid "The form method must be ." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:650(literal) -msgid "enctype=\"multipart/form-data" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:651(para) -msgid "The enctype must be set to multipart/form-data." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:658(literal) -msgid "name=\"redirect\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:659(para) -msgid "The URL to which to redirect the browser after the upload completes. The URL has status and message query parameters added to it that indicate the HTTP status code for the upload and, optionally, additional error information. The 2nn status code indicates success. If an error occurs, the URL might include error information, such as \"max_file_size exceeded\"." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:672(literal) -msgid "name=\"max_file_size\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:673(para) -msgid "Required. The maximum number of bytes that can be uploaded in a single file upload." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:681(literal) -msgid "name=\"max_file_count\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:682(para) -msgid "Required. The maximum number of files that can be uploaded with the form." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:690(literal) -msgid "name=\"expires\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:692(para) -msgid "The expiration date and time for the form in UNIX Epoch time stamp format. After this date and time, the form is no longer valid." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:698(para) ./doc/config-reference/object-storage/section_object-storage-features.xml:748(para) -msgid "For example, 1440619048 is equivalent to Mon, Wed, 26 Aug 2015 19:57:28 GMT." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:706(literal) -msgid "name=\"signature\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:708(para) -msgid "The HMAC-SHA1 signature of the form. This sample Python code shows how to compute the signature:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:724(para) -msgid "The key is the value of the X-Account-Meta-Temp-URL-Key header on the account." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:727(para) -msgid "Use the full path from the /v1/ value and onward." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:731(para) -msgid "During testing, you can use the command-line tool to compute the expires and signature values." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:741(literal) -msgid "name=\"x_delete_at\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:743(para) -msgid "The date and time in UNIX Epoch time stamp format when the object will be removed." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:752(para) -msgid "This attribute enables you to specify the X-Delete- At header value in the form ." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:760(literal) -msgid "name=\"x_delete_after\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:762(para) -msgid "The number of seconds after which the object is removed. Internally, the Object Storage system stores this value in the X-Delete-At metadata item. This attribute enables you to specify the X-Delete-After header value in the form ." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:772(literal) -msgid "type=\"file\" name=\"filexx\"" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:774(para) -msgid "Optional. One or more files to upload. Must appear after the other attributes to be processed correctly. If attributes come after the file attribute, they are not sent with the sub- request because on the server side, all attributes in the file cannot be parsed unless the whole file is read into memory and the server does not have enough memory to service these requests. So, attributes that follow the file attribute are ignored." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:789(title) -msgid "Static web sites" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-features.xml:790(para) -msgid "When configured, this middleware serves container data as a static web site with index file and error file resolution and optional file listings. This mode is normally only active for anonymous requests." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-cors.xml:7(title) -msgid "Cross-origin resource sharing" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-cors.xml:8(para) -msgid "Cross-Origin Resource Sharing (CORS) is a mechanism that allows code running in a browser (JavaScript for example) to make requests to a domain, other than the one it was originated from. OpenStack Object Storage supports CORS requests to containers and objects within the containers using metadata held on the container." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-cors.xml:12(para) -msgid "In addition to the metadata on containers, you can use the option in the proxy-server.conf file to set a list of hosts that are included with any CORS request by default." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:7(title) -msgid "Object Storage general service configuration" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:8(para) -msgid "Most Object Storage services fall into two categories, Object Storage's WSGI servers and background daemons." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:12(para) -msgid "Object Storage uses paste.deploy to manage server configurations. Read more at http://pythonpaste.org/deploy/." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:15(para) -msgid "Default configuration options are set in the `[DEFAULT]` section, and any options specified there can be overridden in any of the other sections when the syntax set option_name = value is in place." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:20(para) -msgid "Configuration for servers and daemons can be expressed together in the same file for each type of server, or separately. If a required section for the service trying to start is missing, there will be an error. Sections not used by the service are ignored." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:26(para) -msgid "Consider the example of an Object Storage node. By convention configuration for the object-server, object-updater, object-replicator, and object-auditor exist in a single file /etc/swift/object-server.conf:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:52(para) -msgid "Object Storage services expect a configuration path as the first argument:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:60(para) -msgid "If you omit the object-auditor section, this file cannot be used as the configuration path when starting the daemon:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:68(para) -msgid "If the configuration path is a directory instead of a file, all of the files in the directory with the file extension \".conf\" will be combined to generate the configuration object which is delivered to the Object Storage service. This is referred to generally as \"directory-based configuration\"." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:75(para) -msgid "Directory-based configuration leverages ConfigParser's native multi-file support. Files ending in \".conf\" in the given directory are parsed in lexicographical order. File names starting with '.' are ignored. A mixture of file and directory configuration paths is not supported - if the configuration path is a file, only that file will be parsed." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:83(para) -msgid "The Object Storage service management tool swift-init has adopted the convention of looking for /etc/swift/{type}-server.conf.d/ if the file /etc/swift/{type}-server.conf file does not exist." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:91(para) -msgid "When using directory-based configuration, if the same option under the same section appears more than once in different files, the last value parsed is said to override previous occurrences. You can ensure proper override precedence by prefixing the files in the configuration directory with numerical values, as in the following example file layout:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:110(para) -msgid "You can inspect the resulting combined configuration object using the command-line tool." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-general-service-conf.xml:114(para) -msgid "All the services of an Object Store deployment share a common configuration in the [swift-hash] section of the /etc/swift/swift.conf file. The and values must be identical on all the nodes." -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:7(title) -msgid "Configure Object Storage with the S3 API" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:8(para) -msgid "The Swift3 middleware emulates the S3 REST API on top of Object Storage." -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:10(para) -msgid "The following operations are currently supported:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:13(para) -msgid "GET Service" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:16(para) -msgid "DELETE Bucket" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:19(para) -msgid "GET Bucket (List Objects)" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:22(para) -msgid "PUT Bucket" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:25(para) -msgid "DELETE Object" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:28(para) -msgid "GET Object" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:31(para) -msgid "HEAD Object" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:34(para) -msgid "PUT Object" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:37(para) -msgid "PUT Object (Copy)" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:40(para) -msgid "To use this middleware, first download the latest version from its repository to your proxy server(s)." -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:44(para) -msgid "Then, install it using standard python mechanisms, such as:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:47(para) -msgid "Alternatively, if you have configured the Ubuntu Cloud Archive, you may use: " -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:50(para) -msgid "To add this middleware to your configuration, add the swift3 middleware in front of the swauth middleware, and before any other middleware that looks at Object Storage requests (like rate limiting)." -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:53(para) -msgid "Ensure that your proxy-server.conf file contains swift3 in the pipeline and the [filter:swift3] section, as shown below:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:61(para) -msgid "Next, configure the tool that you use to connect to the S3 API. For S3curl, for example, you must add your host IP information by adding your host IP to the @endpoints array (line 33 in s3curl.pl):" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:66(para) -msgid "Now you can send commands to the endpoint, such as:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_configure_s3.xml:69(para) -msgid "To set up your client, ensure you are using the ec2 credentials, which can be downloaded from the API Endpoints tab of the dashboard. The host should also point to the Object Storage node's hostname. It also will have to use the old-style calling format, and not the hostname-based container format. Here is an example client setup using the Python boto library on a locally installed all-in-one Object Storage installation." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:7(title) -msgid "Endpoint listing middleware" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:8(para) -msgid "The endpoint listing middleware enables third-party services that use data locality information to integrate with OpenStack Object Storage. This middleware reduces network overhead and is designed for third-party services that run inside the firewall. Deploy this middleware on a proxy server because usage of this middleware is not authenticated." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:12(para) -msgid "Format requests for endpoints, as follows:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:13(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:14(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:15(replaceable) -msgid "{account}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:13(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:14(replaceable) -msgid "{container}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:13(replaceable) -msgid "{object}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:16(para) -msgid "Use the configuration option in the proxy_server.conf file to customize the /endpoints/ path." -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:19(para) -msgid "Responses are JSON-encoded lists of endpoints, as follows:" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:21(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:22(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:23(replaceable) -msgid "{server}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:21(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:22(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:23(replaceable) -msgid "{port}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:21(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:22(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:23(replaceable) -msgid "{dev}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:21(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:22(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:23(replaceable) -msgid "{part}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:21(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:22(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:23(replaceable) -msgid "{acc}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:21(replaceable) ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:22(replaceable) -msgid "{cont}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:21(replaceable) -msgid "{obj}" -msgstr "" - -#: ./doc/config-reference/object-storage/section_object-storage-listendpoints.xml:24(para) -msgid "An example response is:" -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:7(title) -msgid "Identity service sample configuration files" -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:8(para) -msgid "You can find the files described in this section in the /etc/keystone directory." -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:11(title) -msgid "keystone.conf" -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:12(para) -msgid "Use the keystone.conf file to configure most Identity service options:" -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:18(title) -msgid "keystone-paste.ini" -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:19(para) -msgid "Use the keystone-paste.ini file to configure the Web Service Gateway Interface (WSGI) middleware pipeline for the Identity service." -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:26(title) -msgid "logging.conf" -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:27(para) -msgid "You can specify a special logging configuration file in the keystone.conf configuration file. For example, /etc/keystone/logging.conf." -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:31(para) -msgid "For details, see the (Python logging module documentation)." -msgstr "" - -#: ./doc/config-reference/identity/section_keystone-sample-conf-files.xml:39(para) -msgid "Use the policy.json file to define additional access controls that apply to the Identity service." -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-rpc.xml:19(para) -msgid "OpenStack Oslo RPC uses RabbitMQ by default. Use these options to configure the RabbitMQ message system. The option is optional as long as RabbitMQ is the default messaging system. However, if it is included in the configuration, you must set it to heat.openstack.common.rpc.impl_kombu." -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-rpc.xml:31(para) -msgid "Use these options to configure the RabbitMQ messaging system. You can configure messaging communication for different installation scenarios, tune retries for RabbitMQ, and define the size of the RPC thread pool. To monitor notifications through RabbitMQ, you must set the option to heat.openstack.common.notifier.rpc_notifier in the heat.conf file:" -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-rpc.xml:43(para) -msgid "Use these options to configure the Qpid messaging system for OpenStack Oslo RPC. Qpid is not the default messaging system, so you must enable it by setting the option in the heat.conf file:" -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-rpc.xml:50(para) -msgid "This critical option points the compute nodes to the Qpid broker (server). Set the option to the host name where the broker runs in the heat.conf file." -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-rpc.xml:56(para) -msgid "The option accepts a host name or IP address value." -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-rpc.xml:88(para) -msgid "Use these options to configure the ZeroMQ messaging system for OpenStack Oslo RPC. ZeroMQ is not the default messaging system, so you must enable it by setting the option in the heat.conf file:" -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-clients.xml:7(title) -msgid "Configure Clients" -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-clients.xml:8(para) -msgid "The following options allow configuration of the clients that Orchestration uses to talk to other services." -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-api.xml:7(title) -msgid "Configure APIs" -msgstr "" - -#: ./doc/config-reference/orchestration/section_orchestration-api.xml:8(para) -msgid "The following options allow configuration of the APIs that Orchestration supports. Currently this includes compatibility APIs for CloudFormation and CloudWatch and a native API." -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:7(title) -msgid "Log files used by Block Storage" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:8(para) -msgid "The corresponding log file of each Block Storage service is stored in the /var/log/cinder/ directory of the host on which each service runs." -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:12(caption) -msgid "Log files used by Block Storage services" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:36(systemitem) -msgid "openstack-cinder-api" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:39(systemitem) -msgid "cinder-api" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:44(filename) -msgid "cinder-manage.log" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:47(systemitem) ./doc/config-reference/block-storage/section_cinder-log-files.xml:50(systemitem) -msgid "cinder-manage" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:58(systemitem) -msgid "openstack-cinder-scheduler" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:61(systemitem) -msgid "cinder-scheduler" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:66(filename) -msgid "volume.log" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:69(systemitem) -msgid "openstack-cinder-volume" -msgstr "" - -#: ./doc/config-reference/block-storage/section_cinder-log-files.xml:72(systemitem) -msgid "cinder-volume" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:6(title) -msgid "Fibre Channel Zone Manager" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:7(para) -msgid "The Fibre Channel Zone Manager allows FC SAN Zone/Access control management in conjunction with Fibre Channel block storage. The configuration of Fibre Channel Zone Manager and various zone drivers are described in this section." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:12(title) -msgid "Configure Block Storage to use Fibre Channel Zone Manager" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:14(para) -msgid "If Block Storage is configured to use a Fibre Channel volume driver that supports Zone Manager, update cinder.conf to add the following configuration options to enable Fibre Channel Zone Manager." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:19(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:383(para) -msgid "Make the following changes in the /etc/cinder/cinder.conf file." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:23(para) -msgid "To use different Fibre Channel Zone Drivers, use the parameters described in this section." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:26(para) -msgid "When multi backend configuration is used, provide the configuration option as part of the volume driver configuration where option is specified." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:32(para) -msgid "Default value of is None and this needs to be changed to fabric to allow fabric zoning." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:37(para) -msgid " can be configured as initiator-target or initiator" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:44(title) -msgid "Brocade Fibre Channel Zone Driver" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:45(para) -msgid "Brocade Fibre Channel Zone Driver performs zoning operations through SSH. Configure Brocade Zone Driver and lookup service by specifying the following parameters:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:49(para) ./doc/config-reference/block-storage/section_fc-zoning.xml:84(para) -msgid "Configure SAN fabric parameters in the form of fabric groups as described in the example below:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:53(para) ./doc/config-reference/block-storage/section_fc-zoning.xml:88(para) -msgid "Define a fabric group for each fabric using the fabric names used in configuration option as group name." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:59(title) ./doc/config-reference/block-storage/section_fc-zoning.xml:98(title) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:36(title) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:29(title) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:21(title) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:19(title) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:25(title) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:16(title) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:17(title) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:17(title) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:13(title) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:17(title) -msgid "System requirements" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:60(para) -msgid "Brocade Fibre Channel Zone Driver requires firmware version FOS v6.4 or higher." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:62(para) -msgid "As a best practice for zone management, use a user account with zoneadmin role. Users with admin role (including the default admin user account) are limited to a maximum of two concurrent SSH sessions." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:68(para) -msgid "For information about how to manage Brocade Fibre Channel switches, see the Brocade Fabric OS user documentation." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:73(title) -msgid "Cisco Fibre Channel Zone Driver" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:74(para) -msgid "Cisco Fibre Channel Zone Driver automates the zoning operations through SSH. Configure Cisco Zone Driver, Cisco Southbound connector, FC SAN lookup service and Fabric name." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:77(para) -msgid "Set the following options in the cinder.conf configuration file." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:81(replaceable) -msgid "CISCO_FABRIC_EXAMPLE" -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:92(para) -msgid "The Cisco Fibre Channel Zone Driver supports basic and enhanced zoning modes.The zoning VSAN must exist with an active zone set name which is same as the option." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:99(para) -msgid "Cisco MDS 9000 Family Switches." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:100(para) -msgid "Cisco MDS NX-OS Release 6.2(9) or later." -msgstr "" - -#: ./doc/config-reference/block-storage/section_fc-zoning.xml:101(para) -msgid "For information about how to manage Cisco Fibre Channel switches, see the Cisco MDS 9000 user documentation." -msgstr "" - -#: ./doc/config-reference/block-storage/section_backup-drivers.xml:7(title) -msgid "Backup drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/section_backup-drivers.xml:8(para) -msgid "This section describes how to configure the cinder-backup service and its drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/section_backup-drivers.xml:11(para) -msgid "The volume drivers are included with the Block Storage repository (https://git.openstack.org/cgit/openstack/cinder/). To set a backup driver, use the backup_driver flag. By default there is no backup driver enabled." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:11(title) -msgid "Volume encryption supported by the key manager" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:12(para) -msgid "We recommend the Key management service (barbican) for storing encryption keys used by the OpenStack volume encryption feature. It can be enabled by updating cinder.conf and nova.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:20(title) -msgid "Initial configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:22(para) -msgid "Configuration changes need to be made to any nodes running the cinder-volume or nova-compute server." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:26(para) -msgid "Steps to update cinder-volume servers:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:29(para) -msgid "Edit the /etc/cinder/cinder.conf file to use Key management service as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:33(para) -msgid "Look for the [keymgr] section." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:36(para) -msgid "Enter a new line directly below [keymgr] with the following: api_class=cinder.keymgr.barbican.BarbicanKeyManager" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:40(para) -msgid "Use a '#' prefix to comment out the line in this section that begins with 'fixed_key'." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:47(para) -msgid "Restart cinder-volume." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:52(para) -msgid "Update nova-compute servers:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:55(para) -msgid "Repeat the same steps above to set up the Key management service by editing /etc/nova/nova.conf" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:60(para) -msgid "Restart nova-compute." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:64(para) -msgid "Follow the instructions in the OpenStack Admin User Guide under the heading Create an encrypted volume type or alternatively, see in this manual to do this via the command line." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:70(para) -msgid "Create an encrypted volume by typing the command:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:73(para) -msgid "For alternate instructions and details, including the console output, see the in this document." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:77(title) -msgid "Create an encrypted volume type" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:79(para) -msgid "Block Storage volume type assignment provides scheduling to a specific back-end, and can be used to specify actionable information for a back-end storage device." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:81(para) -msgid "This example creates a volume type called LUKS and provides configuration information for the storage system to encrypt or decrypt the volume." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:85(para) ./doc/config-reference/block-storage/section_volume-encryption.xml:118(para) -msgid "Source your admin credentials:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:89(para) -msgid "Create the volume type:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:98(para) -msgid "Mark the volume type as encrypted and provide the necessary details. Use --control_location to specify where encryption is performed: front-end (default) or back-end." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:110(para) -msgid "The OpenStack dashboard (horizon) supports creating the encrypted volume type as of the Kilo release." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:113(title) -msgid "Create an encrypted volume" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:114(para) -msgid "Use the OpenStack dashboard (horizon), or the command to create volumes just as you normally would. For an encrypted volume use the LUKS tag, for unencrypted leave the LUKS tag off." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:122(para) -msgid "Create an unencrypted 1GB test volume:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:149(para) -msgid "Create an encrypted 1GB test volume:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:176(para) -msgid "Notice the encrypted parameter; it will show True/False. The option is also shown for easy review." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:178(para) -msgid "Due to the issue that some of the volume drivers do not set 'encrypted' flag, attaching of encrypted volumes to a virtual guest will fail, because OpenStack Compute service will not run encryption providers." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:185(title) -msgid "Testing volume encryption" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:186(para) -msgid "This is a simple test scenario to help validate your encryption. It assumes an LVM based Block Storage server." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:187(para) -msgid "Perform these steps after completing the volume encryption setup and creating the volume-type for LUKS as described in the preceding sections." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:190(para) -msgid "Create a VM:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:194(para) -msgid "Create two volumes, one encrypted and one not encrypted then attach them to your VM:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:208(para) -msgid "On the VM, send some text to the newly attached volumes and synchronize them:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:215(para) -msgid "On the system hosting cinder volume services, synchronize to flush the I/O cache then test to see if your strings can be found:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-encryption.xml:222(para) -msgid "In the above example you see that the search returns the string written to the unencrypted volume, but not the encrypted one." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:6(title) -msgid "Block Storage sample configuration files" -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:7(para) -msgid "All the files in this section can be found in /etc/cinder." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:9(title) -msgid "cinder.conf" -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:10(para) -msgid "The cinder.conf file is installed in /etc/cinder by default. When you manually install the Block Storage service, the options in the cinder.conf file are set to default values." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:14(para) -msgid "The cinder.conf file contains most of the options to configure the Block Storage service." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:22(para) -msgid "Use the api-paste.ini file to configure the Block Storage API service." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:29(para) -msgid "The policy.json file defines additional access controls that apply to the Block Storage service." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-sample-configuration-files.xml:34(para) -msgid "The rootwrap.conf file defines configuration values used by the script when the Block Storage service must escalate its privileges to those of the root user." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:7(title) -msgid "Introduction to the Block Storage service" -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:8(para) -msgid "The OpenStack Block Storage service provides persistent block storage resources that OpenStack Compute instances can consume. This includes secondary attached storage similar to the Amazon Elastic Block Storage (EBS) offering. In addition, you can write images to a Block Storage device for Compute to use as a bootable persistent instance." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:12(para) -msgid "The Block Storage service differs slightly from the Amazon EBS offering. The Block Storage service does not provide a shared storage solution like NFS. With the Block Storage service, you can attach a device to only one instance." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:15(para) -msgid "The Block Storage service provides:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:18(para) -msgid "cinder-api. A WSGI app that authenticates and routes requests throughout the Block Storage service. It supports the OpenStack APIs only, although there is a translation that can be done through Compute's EC2 interface, which calls in to the Block Storage client." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:24(para) -msgid "cinder-scheduler. Schedules and routes requests to the appropriate volume service. Depending upon your configuration, this may be simple round-robin scheduling to the running volume services, or it can be more sophisticated through the use of the Filter Scheduler. The Filter Scheduler is the default and enables filters on things like Capacity, Availability Zone, Volume Types, and Capabilities as well as custom filters." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:32(para) -msgid "cinder-volume. Manages Block Storage devices, specifically the back-end devices themselves." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:37(para) -msgid "cinder-backup. Provides a means to back up a Block Storage volume to OpenStack Object Storage (swift)." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:42(para) -msgid "The Block Storage service contains the following components:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:45(para) -msgid "Back-end Storage Devices. The Block Storage service requires some form of back-end storage that the service is built on. The default implementation is to use LVM on a local volume group named \"cinder-volumes.\" In addition to the base driver implementation, the Block Storage service also provides the means to add support for other storage devices to be utilized such as external Raid Arrays or other storage appliances. These back-end storage devices may have custom block sizes when using KVM or QEMU as the hypervisor." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:54(para) -msgid "Users and Tenants (Projects). The Block Storage service can be used by many different cloud computing consumers or customers (tenants on a shared system), using role-based access assignments. Roles control the actions that a user is allowed to perform. In the default configuration, most actions do not require a particular role, but this can be configured by the system administrator in the appropriate policy.json file that maintains the rules. A user's access to particular volumes is limited by tenant, but the user name and password are assigned per user. Key pairs granting access to a volume are enabled per user, but quotas to control resource consumption across available hardware resources are per tenant." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:68(para) -msgid "The number of volumes that can be created." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:72(para) -msgid "The number of snapshots that can be created." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:76(para) -msgid "The total number of GBs allowed per tenant (shared between snapshots and volumes)." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:80(para) -msgid "You can revise the default quota values with the Block Storage CLI, so the limits placed by quotas are editable by admin users." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:84(para) -msgid "Volumes, Snapshots, and Backups. The basic resources offered by the Block Storage service are volumes and snapshots which are derived from volumes and volume backups:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:89(para) -msgid "Volumes. Allocated block storage resources that can be attached to instances as secondary storage or they can be used as the root store to boot instances. Volumes are persistent R/W block storage devices most commonly attached to the compute node through iSCSI." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:98(para) -msgid "Snapshots. A read-only point in time copy of a volume. The snapshot can be created from a volume that is currently in use (through the use of --force True) or in an available state. The snapshot can then be used to create a new volume through create from snapshot." -msgstr "" - -#: ./doc/config-reference/block-storage/section_block-storage-overview.xml:105(para) -msgid "Backups. An archived copy of a volume currently stored in OpenStack Object Storage (swift)." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-drivers.xml:7(title) -msgid "Volume drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-drivers.xml:8(para) -msgid "To use different volume drivers for the cinder-volume service, use the parameters described in these sections." -msgstr "" - -#: ./doc/config-reference/block-storage/section_volume-drivers.xml:11(para) -msgid "The volume drivers are included in the Block Storage repository (https://git.openstack.org/cgit/openstack/cinder/). To set a volume driver, use the volume_driver flag. The default is:" -msgstr "" - -#: ./doc/config-reference/block-storage/section_misc.xml:7(title) -msgid "Additional options" -msgstr "" - -#: ./doc/config-reference/block-storage/section_misc.xml:9(para) -msgid "These options can also be set in the cinder.conf file." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/swift-backup-driver.xml:5(title) -msgid "Swift backup driver" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/swift-backup-driver.xml:6(para) -msgid "The backup driver for the swift back end performs a volume backup to an object storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/swift-backup-driver.xml:8(para) -msgid "To enable the swift backup driver, include the following option in the cinder.conf file:" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/swift-backup-driver.xml:12(para) -msgid "The following configuration options are available for the Swift back-end backup driver." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/swift-backup-driver.xml:17(para) -msgid "To enable the swift backup driver for 1.0 or 2.0 authentication version, specify 1 or 2 correspondingly. For example:" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/swift-backup-driver.xml:20(para) -msgid "In addition, the 2.0 authentication system requires backup_swift_tenant setting:" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/swift-backup-driver.xml:24(para) -msgid "This example shows the default options for the Swift back-end backup driver." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/tsm-backup-driver.xml:6(title) -msgid "IBM Tivoli Storage Manager backup driver" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/tsm-backup-driver.xml:7(para) -msgid "The IBM Tivoli Storage Manager (TSM) backup driver enables performing volume backups to a TSM server." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/tsm-backup-driver.xml:10(para) -msgid "The TSM client should be installed and configured on the machine running the cinder-backup service. See the IBM Tivoli Storage Manager Backup-Archive Client Installation and User's Guide for details on installing the TSM client." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/tsm-backup-driver.xml:17(para) -msgid "To enable the IBM TSM backup driver, include the following option in cinder.conf:" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/tsm-backup-driver.xml:20(para) -msgid "The following configuration options are available for the TSM backup driver." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/tsm-backup-driver.xml:23(para) -msgid "This example shows the default options for the TSM backup driver." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:5(title) -msgid "Ceph backup driver" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:6(para) -msgid "The Ceph backup driver backs up volumes of any type to a Ceph back-end store. The driver can also detect whether the volume to be backed up is a Ceph RBD volume, and if so, it tries to perform incremental and differential backups." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:11(para) -msgid "For source Ceph RBD volumes, you can perform backups within the same Ceph pool (not recommended). You can also perform backups between different Ceph pools and between different Ceph clusters." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:15(para) -msgid "At the time of writing, differential backup support in Ceph/librbd was quite new. This driver attempts a differential backup in the first instance. If the differential backup fails, the driver falls back to full backup/copy." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:20(para) -msgid "If incremental backups are used, multiple backups of the same volume are stored as snapshots so that minimal space is consumed in the backup store. It takes far less time to restore a volume than to take a full copy." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:26(para) -msgid "Block Storage enables you to:" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:29(para) -msgid "Restore to a new volume, which is the default and recommended action." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:34(para) -msgid "Restore to the original volume from which the backup was taken. The restore action takes a full copy because this is the safest action." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:42(para) -msgid "To enable the Ceph backup driver, include the following option in the cinder.conf file:" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:46(para) -msgid "The following configuration options are available for the Ceph backup driver." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/ceph-backup-driver.xml:50(para) -msgid "This example shows the default options for the Ceph backup driver." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/nfs-backup-driver.xml:5(title) -msgid "NFS backup driver" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/nfs-backup-driver.xml:6(para) -msgid "The backup driver for the NFS back end backs up volumes of any type to an NFS exported backup repository." -msgstr "" - -#: ./doc/config-reference/block-storage/backup/nfs-backup-driver.xml:8(para) -msgid "To enable the NFS backup driver, include the following option in the [DEFAULT] section of the cinder.conf file:" -msgstr "" - -#: ./doc/config-reference/block-storage/backup/nfs-backup-driver.xml:12(para) -msgid "The following configuration options are available for the NFS back-end backup driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:5(title) -msgid "Oracle ZFS Storage Appliance iSCSI driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:6(para) -msgid "Oracle ZFS Storage Appliances (ZFSSAs) provide advanced software to protect data, speed tuning and troubleshooting, and deliver high performance and high availability. Through the Oracle ZFSSA iSCSI Driver, OpenStack Block Storage can use an Oracle ZFSSA as a block storage resource. The driver enables you to create iSCSI volumes that an OpenStack Block Storage server can allocate to any virtual machine running on a compute host. The Oracle ZFSSA iSCSI Driver, version 1.0.0 and later, supports ZFSSA software release 2013.1.2.0 and later." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:17(para) -msgid "Enable RESTful service on the ZFSSA Storage Appliance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:20(para) -msgid "Create a new user on the appliance with the following authorizations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:23(code) -msgid "scope=stmf - allow_configure=true" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:26(code) -msgid "scope=nas - allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:29(para) -msgid "You can create a role with authorizations as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:41(para) -msgid "You can create a user with a specific role as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:49(para) -msgid "You can also run this workflow to automate the above tasks." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:55(para) -msgid "Ensure that the ZFSSA iSCSI service is online. If the ZFSSA iSCSI service is not online, enable the service by using the BUI, CLI or REST API in the appliance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:67(para) -msgid "Define the following required properties in the cinder.conf file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:69(replaceable) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:180(replaceable) -msgid "myhost" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:70(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:81(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:145(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:353(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:410(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:489(replaceable) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:181(replaceable) -msgid "username" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:72(replaceable) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:184(replaceable) -msgid "mypool" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:73(replaceable) -msgid "myproject" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:74(replaceable) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:228(literal) -msgid "default" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:75(replaceable) -msgid "w.x.y.z:3260" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:76(replaceable) -msgid "e1000g0" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:77(para) -msgid "Optionally, you can define additional properties." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:78(para) -msgid "Target interfaces can be seen as follows in the CLI:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:86(para) -msgid "Do not use management interfaces for zfssa_target_interfaces." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:96(para) -msgid "Create and delete volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:99(para) -msgid "Extend volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:102(para) -msgid "Create and delete snapshots" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:105(para) -msgid "Create volume from snapshot" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:108(para) -msgid "Delete volume snapshots" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:111(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:21(para) -msgid "Attach and detach volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:114(para) -msgid "Get volume stats" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:117(para) -msgid "Clone volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:120(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:39(para) -msgid "Volume migration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:123(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:42(para) -msgid "Local cache of a bootable volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:128(title) -msgid "ZFSSA assisted volume migration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:129(para) -msgid "The ZFSSA iSCSI driver supports storage assisted volume migration starting in the Liberty release. This feature uses remote replication feature on the ZFSSA. Volumes can be migrated between two backends configured not only to the same ZFSSA but also between two separate ZFSSAs altogether." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:134(para) -msgid "The following conditions must be met in order to use ZFSSA assisted volume migration:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:139(para) -msgid "Both the source and target backends are configured to ZFSSAs." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:142(para) -msgid "Remote replication service on the source and target appliance is enabled." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:145(para) -msgid "The ZFSSA to which the target backend is configured should be configured as a target in the remote replication service of the ZFSSA configured to the source backend. The remote replication target needs to be configured even when the source and the destination for volume migration are the same ZFSSA. Define zfssa_replication_ip in the cinder.conf of the source backend as the IP address used to register the target ZFSSA in the remote replication service of the source ZFSSA." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:155(para) -msgid "The name of the iSCSI target group(zfssa_target_group) on the source and the destination ZFSSA is the same." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:160(para) -msgid "The volume is not attached and is in available state." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:164(para) -msgid "If any of the above conditions are not met, the driver will proceed with generic volume migration." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:166(para) -msgid "The ZFSSA user on the source and target appliances will need to have additional role authorizations for assisted volume migration to work. In scope nas, set allow_rrtarget and allow_rrsource to true." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:178(title) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:216(title) -msgid "ZFSSA local cache" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:179(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:217(para) -msgid "The local cache feature enables ZFSSA drivers to serve the usage of bootable volumes significantly better. With the feature, the first bootable volume created from an image is cached, so that subsequent volumes can be created directly from the cache, instead of having image data transferred over the network multiple times." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:183(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:221(para) -msgid "The following conditions must be met in order to use ZFSSA local cache feature:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:187(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:225(para) -msgid "A storage pool needs to be configured." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:190(para) -msgid "REST and iSCSI services need to be turned on." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:197(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:235(para) -msgid "zfssa_enable_local_cache: (True/False) To enable/disable the feature." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:200(para) -msgid "zfssa_cache_project: The ZFSSA project name where cache volumes are stored." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:193(para) -msgid "On an OpenStack controller, cinder.conf needs to contain necessary properties used to configure and set up the ZFSSA iSCSI driver, including the following new properties: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:208(para) -msgid "Every cache volume has two additional properties stored as ZFSSA custom schema. It is important that the schema are not altered outside of Block Storage when the driver is in use:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:213(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:251(para) -msgid "image_id: stores the image id as in Image service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:216(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:254(para) -msgid "updated_at: stores the most current timestamp when the image is updated in Image service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:221(title) -msgid "Supported extra specs" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:222(para) -msgid "Extra specs provide the OpenStack storage admin the flexibility to create volumes with different characteristics from the ones specified in cinder.conf. The admin will specify the volume properties as keys at volume type creation. When a user requests a volume of this volume type, the volume will be created with the properties specified as extra specs." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:229(para) -msgid "The following extra specs scoped keys are supported by the driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:233(para) -msgid "zfssa:volblocksize" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:236(para) -msgid "zfssa:sparse" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:239(para) -msgid "zfssa:compression" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:242(para) -msgid "zfssa:logbias" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:245(para) -msgid "Volume types can be created using the cinder type-create. Extra spec keys can be added using cinder type-key command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-iscsi-driver.xml:252(para) -msgid "The Oracle ZFSSA iSCSI Driver supports these options:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:7(title) -msgid "EMC ScaleIO Block Storage driver configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:9(para) -msgid "ScaleIO is a software-only solution that uses existing servers' local disks and LAN to create a virtual SAN that has all of the benefits of external storage but at a fraction of the cost and complexity. Using the driver, OpenStack Block Storage hosts can connect to a ScaleIO Storage cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:12(para) -msgid "This section explains how to configure and connect an OpenStack block storage host to a ScaleIO storage cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:14(title) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:13(title) -msgid "Support matrix" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:17(para) -msgid "ScaleIO: Version 1.32" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:25(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:24(para) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:101(para) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:17(para) -msgid "Create, delete, clone, attach, and detach volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:28(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:27(para) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:104(para) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:20(para) -msgid "Create and delete volume snapshots" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:31(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:26(para) ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:32(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:30(para) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:107(para) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:23(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:27(para) -msgid "Create a volume from a snapshot" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:34(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:25(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:33(para) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:110(para) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:26(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:30(para) -msgid "Copy an image to a volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:37(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:25(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:36(para) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:113(para) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:29(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:33(para) -msgid "Copy a volume to an image" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:40(para) ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:33(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:39(para) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:116(para) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:32(para) -msgid "Extend a volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:43(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:45(para) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:119(para) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:35(para) -msgid "Get volume statistics" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:48(title) -msgid "ScaleIO Block Storage driver configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:49(para) -msgid "Edit the cinder.conf file by adding the configuration below under the [DEFAULT] section of the file in case of a single back end or under a separate section in case of multiple back ends (for example [ScaleIO]). The configuration file is usually located under the following path /etc/cinder/cinder.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:53(para) -msgid "For a configuration example, refer to the configuration example." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:55(title) -msgid "ScaleIO driver name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:56(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:58(para) -msgid "Configure the driver name by adding the following parameter:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:57(code) -msgid "volume_driver = cinder.volume.drivers.emc.scaleio.ScaleIODriver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:60(title) -msgid "ScaleIO MDM server IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:61(para) -msgid "The ScaleIO Meta Data Manager monitors and maintains the available resources and permissions." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:62(para) -msgid "To retrieve the MDM server IP, use the CLI command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:63(para) -msgid "Configure the MDM server IP by adding the following parameter: san_ip = ScaleIO MDM IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:67(title) -msgid "ScaleIO protection domain name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:68(para) -msgid "ScaleIO allows multiple protection domains (groups of SDSs that provide backup for each other)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:69(para) -msgid "To retrieve the available protection domains, use the and search for the protection domains section." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:71(para) -msgid "Configure the protection domain for newly created volumes by adding the following parameter: sio_protection_domain_name = ScaleIO Protection Domain" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:75(title) -msgid "ScaleIO storage pool name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:76(para) -msgid "A ScaleIO storage pool is a set of physical devices in a protection domain." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:77(para) ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:85(para) -msgid "To retrieve the available storage pools, use the and search for available storage pools." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:79(para) -msgid "Configure the storage pool for newly created volumes by adding the following parameter: sio_storage_pool_name = ScaleIO Storage Pool" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:83(title) -msgid "ScaleIO storage pools" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:84(para) -msgid "Multiple storage pools and protection domains can be listed for use by the virtual machines." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:87(para) -msgid "Configure the available storage pools by adding the following parameter: sio_storage_pools = Comma separated list of protection domain:storage pool name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:91(title) -msgid "ScaleIO user credentials" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:92(para) -msgid "OpenStack Block Storage requires a ScaleIO user with administrative privileges. ScaleIO recommends creating a dedicated OpenStack user account that holds an administrative user role." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:94(para) -msgid "Refer to the ScaleIO User Guide for details on user account management" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:95(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:88(para) -msgid "Configure the user credentials by adding the following parameters:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:96(replaceable) -msgid "ScaleIO username" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:96(code) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:89(code) -msgid "san_login = " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:97(replaceable) -msgid "ScaleIO password" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:97(code) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:90(code) -msgid "san_password = " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:101(title) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:94(title) -msgid "Multiple back ends" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:102(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:95(para) -msgid "Configuring multiple storage back ends enables you to create several back-end storage solutions that serve the same OpenStack Compute resources." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:103(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:96(para) -msgid "When a volume is created, the scheduler selects the appropriate back end to handle the request, according to the specified volume type." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:106(title) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:119(title) -msgid "Restarting OpenStack Block Storage" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:107(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:120(para) -msgid "Save thecinder.conffile and restart cinder by running the following command:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:111(title) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:136(title) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:220(title) -msgid "Configuration example" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:112(subtitle) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:137(subtitle) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:221(subtitle) -msgid "cinder.conf example file" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:113(para) ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:138(para) -msgid "You can update the cinder.conf file by editing the necessary parameters as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:121(replaceable) -msgid "MDM_IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:122(replaceable) -msgid "Default_domain" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:123(replaceable) -msgid "Default_pool" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:124(replaceable) -msgid "Domain1:Pool1,Domain2:Pool2" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:125(replaceable) -msgid "SIO_USER" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:126(replaceable) -msgid "SIO_PASSWD" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:130(title) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:291(caption) ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:215(title) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:170(title) -msgid "Configuration options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-scaleio-driver.xml:131(para) -msgid "The ScaleIO driver supports these configuration options:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/glusterfs-driver.xml:7(para) -msgid "GlusterFS is an open-source scalable distributed file system that is able to grow to petabytes and beyond in size. More information can be found on Gluster's homepage." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/glusterfs-driver.xml:12(para) -msgid "This driver enables the use of GlusterFS in a similar fashion as NFS. It supports basic volume operations, including snapshot/clone." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/glusterfs-driver.xml:16(para) -msgid "You must use a Linux kernel of version 3.4 or greater (or version 2.6.32 or greater in Red Hat Enterprise Linux/CentOS 6.3+) when working with Gluster-based volumes. See Bug 1177103 for more information." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/glusterfs-driver.xml:22(para) -msgid "To use Block Storage with GlusterFS, first set the volume_driver in cinder.conf:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/glusterfs-driver.xml:26(para) -msgid "The following table contains the configuration options supported by the GlusterFS driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/tintri-volume-driver.xml:5(title) -msgid "Tintri" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/tintri-volume-driver.xml:6(para) -msgid "Tintri VMstore is a smart storage that sees, learns and adapts for cloud and virtualization. The Tintri Cinder driver will interact with configured VMstore running Tintri OS 4.0 and above. It supports various operations using Tintri REST APIs and NFS protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/tintri-volume-driver.xml:11(para) -msgid "To configure the use of a Tintri VMstore with Block Storage, perform the following actions:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/tintri-volume-driver.xml:14(para) -msgid "Edit the etc/cinder/cinder.conf file and set the cinder.volume.drivers.tintri options:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/tintri-volume-driver.xml:43(para) -msgid "Edit the etc/nova/nova.conf file, and set the :" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/tintri-volume-driver.xml:48(para) -msgid "Edit the /etc/cinder/nfs_shares file, and add the Tintri VMstore mount points associated with the configured VMstore management IP in the cinder.conf file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-sonas-7k-driver.xml:5(title) -msgid "IBM SONAS and Storwise V7000 volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-sonas-7k-driver.xml:6(para) -msgid "The IBM Storage Driver for OpenStack is a Block Storage driver that supports IBM SONAS and V7000 storage systems over NFS." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-sonas-7k-driver.xml:10(para) -msgid "Set the following in your cinder.conf, and use the table of options to configure it." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:6(title) -msgid "Pure Storage iSCSI and Fibre Channel volume drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:8(para) -msgid "The Pure Storage FlashArray volume drivers for OpenStack Block Storage interact with configured Pure Storage arrays and support various operations." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:10(para) -msgid "Support for iSCSI storage protocol is available with the PureISCSIDriver Volume Driver class, and Fibre Channel with PureFCDriver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:12(para) -msgid "All drivers are compatible with Purity FlashArrays that support the REST API version 1.2, 1.3, or 1.4 (Purity 4.0.0 and newer)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:15(title) -msgid "Limitations and known issues" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:16(para) -msgid "If you do not set up the nodes hosting instances to use multipathing, all network connectivity will use a single physical port on the array. In addition to significantly limiting the available bandwidth, this means you do not have the high-availability and non-disruptive upgrade benefits provided by FlashArray." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:20(para) -msgid "Workaround: You must set up multipathing on your hosts." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:26(para) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:20(para) -msgid "Create, delete, attach, detach, retype, clone, and extend volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:29(para) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:23(para) -msgid "Create a volume from snapshot." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:32(para) ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:25(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:66(para) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:15(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:24(para) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:255(para) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:79(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:44(para) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:25(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:296(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:65(para) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:42(para) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:26(para) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:24(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:105(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:56(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:61(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:24(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:56(para) -msgid "Create, list, and delete volume snapshots." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:35(para) -msgid "Create, list, update, and delete consistency groups." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:38(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:71(para) -msgid "Create, list, and delete consistency group snapshots." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:41(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:93(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:45(para) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:29(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:129(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:88(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:93(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:88(para) -msgid "Manage and unmanage a volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:44(para) -msgid "Manage and unmanage a snapshot." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:47(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:84(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:42(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:65(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:314(para) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:32(para) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:80(para) -msgid "Get volume statistics." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:50(para) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:35(para) -msgid "Create a thin provisioned volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:55(title) -msgid "Configure OpenStack and Purity" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:56(para) -msgid "You need to configure both your Purity array and your OpenStack cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:58(para) -msgid "These instructions assume that the cinder-api and cinder-scheduler services are installed and configured in your OpenStack cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:64(title) -msgid "Configure the OpenStack Block Storage service" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:65(para) -msgid "In these steps, you will edit the cinder.conf file to configure OpenStack Block Storage service to enable multipathing and to use the Pure Storage FlashArray as back-end storage." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:69(title) -msgid "Retrieve an API token from Purity" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:70(para) -msgid "The OpenStack Block Storage service configuration requires an API token from Purity. Actions performed by the volume driver use this token for authorization. Also, Purity logs the volume driver's actions as being performed by the user who owns this API token." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:73(para) -msgid "If you created a Purity user account that is dedicated to managing your OpenStack Block Storage volumes, copy the API token from that user account." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:75(para) -msgid "Use the appropriate create or list command below to display and copy the Purity API token:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:79(para) -msgid "To create a new API token:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:80(replaceable) ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:90(replaceable) -msgid "USER" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:81(para) ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:91(para) -msgid "The following is an example output:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:89(para) -msgid "To list an existing API token:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:101(para) -msgid "Copy the API token retrieved (902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9 from the examples above) to use in the next step." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:105(title) -msgid "Edit the OpenStack Block Storage service configuration file" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:106(para) -msgid "The following sample /etc/cinder/cinder.conf configuration lists the relevant settings for a typical Block Storage service using a single Pure Storage array:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:118(replaceable) ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:125(term) -msgid "PURE_VOLUME_DRIVER" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:119(replaceable) ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:132(term) -msgid "IP_PURE_MGMT" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:120(replaceable) ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:139(term) -msgid "PURE_API_TOKEN" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:122(para) -msgid "Replace the following variables accordingly:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:127(para) -msgid "Use either cinder.volume.drivers.pure.PureISCSIDriver for iSCSI or cinder.volume.drivers.pure.PureFCDriver for Fibre Channel connectivity." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:134(para) -msgid "The IP address of the Pure Storage array's management interface or a domain name that resolves to that IP address." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:141(para) -msgid "The Purity Authorization token that the volume driver uses to perform volume management on the Pure Storage array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:151(para) -msgid "The volume driver automatically creates purity host objects for initiators as needed. If CHAP authentication is enabled via the setting, you must ensure there are no manually created host objects with IQN's that will be used by the OpenStack Block Storage. The driver will only modify credentials on hosts that it manages." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/pure-storage-driver.xml:158(para) -msgid "If using the PureFCDriver it is recommended to use the OpenStack Block Storage Fibre Channel Zone Manager." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/solidfire-volume-driver.xml:5(title) -msgid "SolidFire" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/solidfire-volume-driver.xml:6(para) -msgid "The SolidFire Cluster is a high performance all SSD iSCSI storage device that provides massive scale out capability and extreme fault tolerance. A key feature of the SolidFire cluster is the ability to set and modify during operation specific QoS levels on a volume for volume basis. The SolidFire cluster offers this along with de-duplication, compression, and an architecture that takes full advantage of SSDs." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/solidfire-volume-driver.xml:14(para) -msgid "To configure the use of a SolidFire cluster with Block Storage, modify your cinder.conf file as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/solidfire-volume-driver.xml:23(para) -msgid "Older versions of the SolidFire driver (prior to Icehouse) created a unique account prefixed with $cinder-volume-service-hostname-$tenant-id on the SolidFire cluster for each tenant. Unfortunately, this account formation resulted in issues for High Availability (HA) installations and installations where the cinder-volume service can move to a new node. The current default implementation does not experience this issue as no prefix is used. For installations created on a prior release, the OLD default behavior can be configured by using the keyword \"hostname\" in sf_account_prefix." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:4(title) -msgid "Huawei volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:5(para) -msgid "Huawei volume driver can be used to provide functions such as the logical volume and snapshot for virtual machines (VMs) in the OpenStack Block Storage Driver that supports iSCSI and Fibre Channel protocols." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:9(title) -msgid "Version mappings" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:10(para) -msgid "The following table describes the version mappings among the Block Storage driver, Huawei storage system, and OpenStack:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:13(caption) -msgid "Version mappings among the Block Storage driver and Huawei storage system" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:17(th) -msgid "Description (Volume Driver Version)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:18(th) -msgid "Storage System Version" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:19(th) -msgid "Volume Driver Version" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:24(para) -msgid "Create, delete, expand, attach, and detach volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:24(para) ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:31(para) -msgid "Create and delete a snapshot" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:26(para) ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:34(para) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:36(para) -msgid "Clone a volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:27(para) -msgid "QoS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:29(para) -msgid "OceanStor T series V1R5 C02/C30" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:30(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:47(para) -msgid "OceanStor T series V2R2 C00/C20/C30" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:31(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:48(para) -msgid "OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:32(para) -msgid "OceanStor 18500/18800V1R1C00/C20/C30 V3R3C00" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:35(para) -msgid "1.1.0" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:36(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:51(td) -msgid "1.2.0" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:40(para) -msgid "Volume Migration (version 1.2.0 or later)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:41(para) -msgid "Auto zoning (version 1.2.0 or later)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:42(para) -msgid "SmartTier (version 1.2.0 or later)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:43(para) -msgid "SmartCache (version 1.2.0 or later)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:44(para) -msgid "Smart Thin/Thick (version 1.2.0 or later)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:45(para) -msgid "SmartPartition (version 1.2.0 or later)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:49(para) -msgid "OceanStor 18500/18800V1R1C00/C20/C30" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:57(title) -msgid "Volume Driver Configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:58(para) -msgid "This section describes how to configure the Huawei volume driver for iSCSI storage and FC storage." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:61(title) -msgid "Configuring the volume driver for iSCSI storage" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:62(para) -msgid "This section describes how to configure the volume driver for different products for iSCSI storage products." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:65(title) -msgid "Configuring the volume driver for T series V1 (iSCSI)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:68(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:129(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:189(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:249(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:615(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:677(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:737(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:798(para) -msgid "In /etc/cinder, create a Huawei-customized driver configuration file. The file format is XML." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:72(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:133(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:193(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:253(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:619(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:681(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:741(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:802(para) -msgid "Configure parameters in the driver configuration file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:102(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:161(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:221(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:282(para) -msgid "For details about the parameters in the configuration file, see section 1.2.1.5 \"Parameters in the Configuration File\"." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:111(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:171(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:231(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:292(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:659(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:719(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:779(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:840(para) -msgid "Configure the cinder.conf file" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:112(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:172(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:232(para) -msgid "In the [default] block of /etc/cinder/cinder.conf, add the following contents. volume_driver indicates the loaded driver, and cinder_huawei_conf_file indicates the specified Huawei-customized configuration file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:120(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:180(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:240(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:301(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:668(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:728(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:788(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:849(para) -msgid "Run the service cinder-volume restart command to restart the Block Storage service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:126(title) -msgid "Configuring the volume driver for T series V2 (iSCSI)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:186(title) -msgid "Configuring the volume driver for V3 (iSCSI)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:246(title) -msgid "Configuring the volume driver for OceanStor 18000 series (iSCSI)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:293(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:660(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:720(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:780(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:841(para) -msgid "In the [default] block of /etc/cinder/cinder.conf, add the following contents. volume_driver indicates the loaded driver file, and cinder_huawei_conf_file indicates the specified Huawei-customized configuration file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:307(title) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:855(title) -msgid "Parameters in the configuration file" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:309(caption) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:857(caption) -msgid "Mandatory parameters" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:312(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:430(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:860(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:978(th) -msgid "Parameter" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:313(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:431(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:861(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:979(th) -msgid "Default value" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:315(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:433(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:863(th) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:981(th) -msgid "Applicable to" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:324(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:334(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:343(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:350(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:357(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:370(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:383(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:394(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:403(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:416(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:526(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:536(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:547(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:872(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:882(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:891(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:898(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:905(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:918(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:931(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:942(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:951(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:964(td) -msgid "-" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:327(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:875(para) -msgid "Type of a storage product. Possible values are T, 18000 and V3." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:330(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:339(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:375(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:389(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:398(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:406(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:412(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:418(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:446(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:475(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:488(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:531(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:542(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:878(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:887(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:923(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:937(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:946(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:954(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:960(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:966(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:994(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1023(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1036(td) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:128(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:132(para) -msgid "All" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:336(para) -msgid "Type of a connection protocol. The possible value is iSCSI." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:344(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:892(td) -msgid "IP address of the primary controller on an OceanStor T series V100R005 storage device." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:346(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:353(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:461(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:498(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:507(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:894(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:901(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1009(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1046(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1055(para) -msgid "T series V1" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:351(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:899(td) -msgid "IP address of the secondary controller on an OceanStor T series V100R005 storage device." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:358(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:906(td) -msgid "Access address of the REST interface, https://x.x.x.x/devicemanager/rest/. x.x.x.x indicates the management IP address. OceanStor 18000 uses the preceding setting, and V2 and V3 need to add port number 8088, for example, https://x.x.x.x:8088/deviceManager/rest/." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:363(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:515(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:522(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:552(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:911(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1063(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1070(td) -msgid "T series V2 V3 18000" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:373(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:921(para) -msgid "User name of a storage administrator." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:386(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:934(para) -msgid "Password of a storage administrator." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:395(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:943(td) -msgid "Name of a storage pool to be used. If you need to configure multiple storage pools, separate them by semicolons (;)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:404(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:952(td) -msgid "Default IP address of the iSCSI target port that is provided for computing nodes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:410(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:958(td) -msgid "Linux" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:411(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:959(td) -msgid "Operating system of the Nova computer node's host." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:417(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:965(td) -msgid "IP address of the Nova computer node's host." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:423(title) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:971(title) -msgid "Note for the parameters" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:424(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:972(para) -msgid "The value of StoragePool cannot contain Chinese characters." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:427(caption) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:975(caption) -msgid "Optional parameters" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:442(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:990(para) -msgid "Thin" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:444(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:992(td) -msgid "Type of the LUNs to be created. The value can be Thick or Thin." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:455(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1003(para) -msgid "64" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:458(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1006(para) -msgid "Stripe depth of a LUN to be created. The unit is KB. This parameter is invalid when a thin LUN is created." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:469(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:482(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1017(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1030(para) ./doc/config-reference/conf-changes/neutron.xml:327(td) -msgid "1" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:472(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1020(para) -msgid "Cache write type, possible values are: 1 (write back), 2 (write through), and 3 (mandatory write back)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:485(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1033(para) -msgid "Cache mirroring or not, possible values are: 0 (without mirroring) or 1 (with mirroring)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:492(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1040(td) -msgid "3" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:494(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1042(para) -msgid "Cache prefetch policy, possible values are: 0 (no prefetch), 1 (fixed prefetch), 2 (variable prefetch) or 3 (intelligent prefetch)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:502(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1050(td) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:385(para) ./doc/config-reference/conf-changes/cinder.xml:816(td) ./doc/config-reference/conf-changes/manila.xml:380(td) ./doc/config-reference/conf-changes/keystone.xml:284(td) ./doc/config-reference/conf-changes/ironic.xml:313(td) ./doc/config-reference/conf-changes/sahara.xml:257(td) ./doc/config-reference/conf-changes/nova.xml:343(td) ./doc/config-reference/conf-changes/neutron.xml:286(td) ./doc/config-reference/conf-changes/neutron.xml:326(td) ./doc/config-reference/conf-changes/neutron.xml:371(td) ./doc/config-reference/conf-changes/ceilometer.xml:301(td) ./doc/config-reference/conf-changes/trove.xml:404(td) ./doc/config-reference/conf-changes/glance.xml:199(td) ./doc/config-reference/conf-changes/glance.xml:244(td) ./doc/config-reference/conf-changes/heat.xml:297(td) ./doc/config-reference/conf-changes/heat.xml:307(td) -msgid "0" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:504(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1052(para) -msgid "Cache prefetch value." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:513(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1061(td) -msgid "After LUN copy is enabled, the plug-in frequently queries the copy progress. You can set a value to specify the query interval." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:519(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1067(td) -msgid "432000" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:520(td) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1068(td) -msgid "Timeout interval for waiting LUN copy of a storage device to complete. The unit is second." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:528(para) -msgid "Name of a compute node initiator." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:538(para) -msgid "IP address of the iSCSI port provided for compute nodes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:549(para) -msgid "IP address of the iSCSI target port that is provided for computing nodes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:558(title) -msgid "Configuring iSCSI Multipathing" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:561(para) -msgid "Create a port group on the storage device using the DeviceManager, add service links that require multipathing into the port group." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:565(para) -msgid "Log in to the storage device using CLI commands, and enable the multiport discovery switch in the multipathing." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:570(para) -msgid "Add the port group settings in the Huawei-customized driver configuration file, and configure the port group name needed by an initiator." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:579(para) -msgid "Enable the multipathing switch of the OpenStack Nova module." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:580(para) -msgid "If the version of OpenStack is Havana and IceHouse, add libvirt_iscsi_use_multipath = True in [default] of /etc/nova/nova.conf. If the version of OpenStack is Juno, Kilo, and Liberty, add iscsi_use_multipath = True in [libvirt] of /etc/nova/nova.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:585(para) -msgid "Run the service nova-compute restart command to restart the nova-compute service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:591(title) -msgid "Configuring CHAP and ALUA" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:592(para) -msgid "On a public network, any application server whose IP address resides on the same network segment as that of the storage system's iSCSI host port can access the storage system and perform read and write operations in it. This poses risks to the data security of the storage system. To ensure the storage system access security, you can configure CHAP authentication to control application servers' access to the storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:597(para) -msgid "Configure CHAP and ALUA." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:598(para) -msgid "Configure the driver configuration file as follows: ALUA indicates a multipathing mode. 0 indicates that ALUA is disabled. 1 indicates that ALUA is enabled. CHAPinfo indicates the user name and password authenticated by CHAP. The format is mmuser; mm-user@storage. The user name and password are separated by semicolons (;)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:607(title) -msgid "Configuring the volume driver (Fibre Channel)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:608(para) -msgid "This section describes how to configure volume drivers for different products for the Fibre Channel products." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:610(para) -msgid "For a Fibre Channel network, the sg tool must be installed so that storage resources can be used." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:612(title) -msgid "Configuring the volume driver for T series V1 (Fibre Channel)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:649(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:709(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:769(para) ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:831(para) -msgid "For details about the parameters in the configuration file, see section 1.2.2.5 \"Parameters in the Configuration File\"" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:674(title) -msgid "Configuring the volume driver for T series V2 (Fibre Channel)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:734(title) -msgid "Configuring the volume driver for V3 (Fibre Channel)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:794(title) -msgid "Configuring the volume driver for OceanStor 18000 series (Fibre Channel)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:884(para) -msgid "Type of a connection protocol. The possible value is FC." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1077(title) -msgid "Configuring multi-storage support" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/huawei-storage-driver.xml:1078(para) -msgid "Example for configuring multiple storage systems:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:5(title) -msgid "IBM Storwize family and SVC volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:6(para) -msgid "The volume management driver for Storwize family and SAN Volume Controller (SVC) provides OpenStack Compute instances with access to IBM Storwize family or SVC storage systems." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:11(title) -msgid "Configure the Storwize family and SVC system" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:14(para) -msgid "The Storwize family or SVC system must be configured for iSCSI, Fibre Channel, or both." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:16(para) -msgid "If using iSCSI, each Storwize family or SVC node should have at least one iSCSI IP address. The IBM Storwize/SVC driver uses an iSCSI IP address associated with the volume's preferred node (if available) to attach the volume to the instance, otherwise it uses the first available iSCSI IP address of the system. The driver obtains the iSCSI IP address directly from the storage system; you do not need to provide these iSCSI IP addresses directly to the driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:27(para) -msgid "If using iSCSI, ensure that the compute nodes have iSCSI network access to the Storwize family or SVC system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:32(para) -msgid "OpenStack Nova's Grizzly version supports iSCSI multipath. Once this is configured on the Nova host (outside the scope of this documentation), multipath is enabled." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:37(para) -msgid "If using Fibre Channel (FC), each Storwize family or SVC node should have at least one WWPN port configured. If the storwize_svc_multipath_enabled flag is set to True in the Cinder configuration file, the driver uses all available WWPNs to attach the volume to the instance (details about the configuration flags appear in the next section). If the flag is not set, the driver uses the WWPN associated with the volume's preferred node (if available), otherwise it uses the first available WWPN of the system. The driver obtains the WWPNs directly from the storage system; you do not need to provide these WWPNs directly to the driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:54(para) -msgid "If using FC, ensure that the compute nodes have FC connectivity to the Storwize family or SVC system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:60(title) -msgid "iSCSI CHAP authentication" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:68(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:286(para) -msgid "CHAP secrets are added to existing hosts as well as newly-created ones. If the CHAP option is enabled, hosts will not be able to access the storage without the generated secrets." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:75(para) -msgid "Not all OpenStack Compute drivers support CHAP authentication. Please check compatibility before using." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:80(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:279(para) -msgid "CHAP secrets are passed from OpenStack Block Storage to Compute in clear text. This communication should be secured to ensure that CHAP secrets are not discovered." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:61(para) -msgid "If using iSCSI for data access and the storwize_svc_iscsi_chap_enabled is set to True, the driver will associate randomly-generated CHAP secrets with all hosts on the Storwize family system. OpenStack compute nodes use these secrets when creating iSCSI connections. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:87(title) -msgid "Configure storage pools" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:88(para) -msgid "Each instance of the IBM Storwize/SVC driver allocates all volumes in a single pool. The pool should be created in advance and be provided to the driver using the storwize_svc_volpool_name configuration flag. Details about the configuration flags and how to provide the flags to the driver appear in the next section." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:99(title) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:23(title) -msgid "Configure user authentication for the driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:101(para) -msgid "The driver requires access to the Storwize family or SVC system management interface. The driver communicates with the management using SSH. The driver should be provided with the Storwize family or SVC management IP using the san_ip flag, and the management port should be provided by the san_ssh_port flag. By default, the port value is configured to be port 22 (SSH)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:111(para) -msgid "Make sure the compute node running the cinder-volume management driver has SSH network access to the storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:118(para) -msgid "To allow the driver to communicate with the Storwize family or SVC system, you must provide the driver with a user on the storage system. The driver has two authentication methods: password-based authentication and SSH key pair authentication. The user should have an Administrator role. It is suggested to create a new user for the management driver. Please consult with your storage and security administrator regarding the preferred authentication method and how passwords or SSH keys should be stored in a secure manner." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:129(para) -msgid "When creating a new user on the Storwize or SVC system, make sure the user belongs to the Administrator group or to another group that has an Administrator role." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:134(para) -msgid "If using password authentication, assign a password to the user on the Storwize or SVC system. The driver configuration flags for the user and password are san_login and san_password, respectively." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:140(para) -msgid "If you are using the SSH key pair authentication, create SSH private and public keys using the instructions below or by any other method. Associate the public key with the user by uploading the public key: select the \"choose file\" option in the Storwize family or SVC management GUI under \"SSH public key\". Alternatively, you may associate the SSH public key using the command line interface; details can be found in the Storwize and SVC documentation. The private key should be provided to the driver using the san_private_key configuration flag." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:154(title) -msgid "Create a SSH key pair with OpenSSH" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:155(para) -msgid "You can create an SSH key pair using OpenSSH, by running:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:158(para) -msgid "The command prompts for a file to save the key pair. For example, if you select 'key' as the filename, two files are created: key and key.pub. The key file holds the private SSH key and key.pub holds the public SSH key." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:165(para) -msgid "The command also prompts for a pass phrase, which should be empty." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:167(para) -msgid "The private key file should be provided to the driver using the san_private_key configuration flag. The public key should be uploaded to the Storwize family or SVC system using the storage management GUI or command line interface." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:173(para) -msgid "Ensure that Cinder has read permissions on the private key file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:179(title) -msgid "Configure the Storwize family and SVC driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:181(title) -msgid "Enable the Storwize family and SVC driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:182(para) -msgid "Set the volume driver to the Storwize family and SVC driver by setting the volume_driver option in cinder.conf as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:189(title) -msgid "Storwize family and SVC driver options in cinder.conf" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:191(para) -msgid "The following options specify default values for all volumes. Some can be over-ridden using volume types, which are described below." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:195(caption) -msgid "List of configuration flags for Storwize storage and SVC driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:203(th) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:87(th) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:188(th) -msgid "Flag name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:204(th) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:217(td) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:299(td) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:88(th) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:189(th) -msgid "Type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:205(th) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:218(td) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:300(td) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:89(th) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:190(th) -msgid "Default" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:211(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:95(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:196(literal) -msgid "san_ip" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:213(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:228(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:266(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:227(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:261(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:308(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:343(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:352(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:96(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:108(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:114(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:120(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:126(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:197(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:209(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:215(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:221(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:251(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:264(para) -msgid "Required" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:215(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:98(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:199(para) -msgid "Management IP or host name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:219(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:101(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:202(literal) -msgid "san_ssh_port" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:221(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:275(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:299(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:310(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:334(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:345(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:366(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:384(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:394(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:415(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:426(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:436(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:452(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:477(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:320(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:365(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:406(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:102(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:133(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:203(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:230(para) -msgid "Optional" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:222(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:103(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:204(para) -msgid "22" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:223(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:104(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:205(para) -msgid "Management port" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:226(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:107(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:208(literal) -msgid "san_login" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:230(para) -msgid "Management login username" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:234(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:113(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:214(literal) -msgid "san_password" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:238(para) -msgid "The authentication requires either a password (san_password) or SSH private key (san_private_key). One must be specified. If both are specified, the driver uses only the SSH private key." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:236(para) -msgid "Required " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:248(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:116(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:217(para) -msgid "Management login password" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:252(literal) -msgid "san_private_key" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:254(para) -msgid "Required " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:258(para) -msgid "Management login SSH private key" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:264(literal) -msgid "storwize_svc_volpool_name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:268(para) -msgid "Default pool name for volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:273(literal) -msgid "storwize_svc_vol_rsize" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:276(para) -msgid "2" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:280(para) -msgid "The driver creates thin-provisioned volumes by default. The storwize_svc_vol_rsize flag defines the initial physical allocation percentage for thin-provisioned volumes, or if set to -1, the driver creates full allocated volumes. More details about the available options are available in the Storwize family and SVC documentation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:277(para) -msgid "Initial physical allocation (percentage) " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:297(literal) -msgid "storwize_svc_vol_warning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:300(para) -msgid "0 (disabled)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:301(para) -msgid "Space allocation warning threshold (percentage) " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:308(literal) -msgid "storwize_svc_vol_autoexpand" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:311(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:367(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:427(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:453(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:332(literal) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:793(literal) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:830(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:134(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:231(para) ./doc/config-reference/conf-changes/cinder.xml:787(td) ./doc/config-reference/conf-changes/cinder.xml:792(td) ./doc/config-reference/conf-changes/cinder.xml:797(td) ./doc/config-reference/conf-changes/manila.xml:335(td) ./doc/config-reference/conf-changes/manila.xml:340(td) ./doc/config-reference/conf-changes/manila.xml:366(td) ./doc/config-reference/conf-changes/manila.xml:371(td) ./doc/config-reference/conf-changes/keystone.xml:195(td) ./doc/config-reference/conf-changes/keystone.xml:200(td) ./doc/config-reference/conf-changes/ironic.xml:299(td) ./doc/config-reference/conf-changes/ironic.xml:304(td) ./doc/config-reference/conf-changes/sahara.xml:243(td) ./doc/config-reference/conf-changes/sahara.xml:248(td) ./doc/config-reference/conf-changes/nova.xml:319(td) ./doc/config-reference/conf-changes/nova.xml:324(td) ./doc/config-reference/conf-changes/nova.xml:334(td) ./doc/config-reference/conf-changes/neutron.xml:297(td) ./doc/config-reference/conf-changes/neutron.xml:317(td) ./doc/config-reference/conf-changes/neutron.xml:337(td) ./doc/config-reference/conf-changes/neutron.xml:342(td) ./doc/config-reference/conf-changes/neutron.xml:347(td) ./doc/config-reference/conf-changes/neutron.xml:367(td) ./doc/config-reference/conf-changes/ceilometer.xml:282(td) ./doc/config-reference/conf-changes/ceilometer.xml:287(td) ./doc/config-reference/conf-changes/ceilometer.xml:297(td) ./doc/config-reference/conf-changes/trove.xml:375(td) ./doc/config-reference/conf-changes/trove.xml:380(td) ./doc/config-reference/conf-changes/trove.xml:435(td) ./doc/config-reference/conf-changes/glance.xml:230(td) ./doc/config-reference/conf-changes/glance.xml:235(td) ./doc/config-reference/conf-changes/heat.xml:267(td) ./doc/config-reference/conf-changes/heat.xml:288(td) ./doc/config-reference/conf-changes/heat.xml:293(td) -msgid "True" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:314(para) -msgid "Defines whether thin-provisioned volumes can be auto expanded by the storage system, a value of True means that auto expansion is enabled, a value of False disables auto expansion. Details about this option can be found in the –autoexpand flag of the Storwize family and SVC command line interface mkvdisk command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:312(para) -msgid "Enable or disable volume auto expand " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:332(literal) -msgid "storwize_svc_vol_grainsize" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:335(para) -msgid "256" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:336(para) -msgid "Volume grain size in KB" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:342(literal) -msgid "storwize_svc_vol_compression" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:350(para) -msgid "Defines whether Real-time Compression is used for the volumes created with OpenStack. Details on Real-time Compression can be found in the Storwize family and SVC documentation. The Storwize or SVC system must have compression enabled for this feature to work." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:347(para) -msgid "Enable or disable Real-time Compression " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:364(literal) -msgid "storwize_svc_vol_easytier" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:370(para) -msgid "Defines whether Easy Tier is used for the volumes created with OpenStack. Details on EasyTier can be found in the Storwize family and SVC documentation. The Storwize or SVC system must have Easy Tier enabled for this feature to work." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:368(para) -msgid "Enable or disable Easy Tier " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:382(literal) -msgid "storwize_svc_vol_iogrp" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:386(para) -msgid "The I/O group in which to allocate vdisks" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:391(literal) -msgid "storwize_svc_flashcopy_timeout" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:395(para) -msgid "120" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:398(para) -msgid "The driver wait timeout threshold when creating an OpenStack snapshot. This is actually the maximum amount of time that the driver waits for the Storwize family or SVC system to prepare a new FlashCopy mapping. The driver accepts a maximum wait time of 600 seconds (10 minutes)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:396(para) -msgid "FlashCopy timeout threshold (seconds)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:412(literal) -msgid "storwize_svc_connection_protocol" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:416(para) ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:55(title) -msgid "iSCSI" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:417(para) -msgid "Connection protocol to use (currently supports 'iSCSI' or 'FC')" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:423(literal) -msgid "storwize_svc_iscsi_chap_enabled" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:428(para) -msgid "Configure CHAP authentication for iSCSI connections" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:433(literal) -msgid "storwize_svc_multipath_enabled" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:440(para) -msgid "Multipath for iSCSI connections requires no storage-side configuration and is enabled if the compute host has multipath configured." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:438(para) -msgid "Enable multipath for FC connections " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:449(literal) -msgid "storwize_svc_multihost_enabled" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:457(para) -msgid "This option allows the driver to map a vdisk to more than one host at a time. This scenario occurs during migration of a virtual machine with an attached volume; the volume is simultaneously mapped to both the source and destination compute hosts. If your deployment does not require attaching vdisks to multiple hosts, setting this flag to False will provide added safety." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:454(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:136(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:233(para) -msgid "Enable mapping vdisks to multiple hosts " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:474(literal) -msgid "storwize_svc_vol_nofmtdisk" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:481(para) -msgid "Defines whether or not the fast formatting of thick-provisioned volumes is disabled at creation. The default value is False and a value of True means that fast format is disabled. Details about this option can be found in the –nofmtdisk flag of the Storwize family and SVC command line interface mkvdisk command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:479(para) -msgid "Enable or disable fast format " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:504(title) -msgid "Placement with volume types" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:505(para) -msgid "The IBM Storwize/SVC driver exposes capabilities that can be added to the extra specs of volume types, and used by the filter scheduler to determine placement of new volumes. Make sure to prefix these keys with capabilities: to indicate that the scheduler should use them. The following extra specs are supported:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:516(para) -msgid "capabilities:volume_back-end_name - Specify a specific back-end where the volume should be created. The back-end name is a concatenation of the name of the IBM Storwize/SVC storage system as shown in lssystem, an underscore, and the name of the pool (mdisk group). For example: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:527(para) -msgid "capabilities:compression_support - Specify a back-end according to compression support. A value of True should be used to request a back-end that supports compression, and a value of False will request a back-end that does not support compression. If you do not have constraints on compression support, do not set this key. Note that specifying True does not enable compression; it only requests that the volume be placed on a back-end that supports compression. Example syntax: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:543(para) -msgid "capabilities:easytier_support - Similar semantics as the compression_support key, but for specifying according to support of the Easy Tier feature. Example syntax: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:551(para) -msgid "capabilities:storage_protocol - Specifies the connection protocol used to attach volumes of this type to instances. Legal values are iSCSI and FC. This extra specs value is used for both placement and setting the protocol used for this volume. In the example syntax, note <in> is used as opposed to <is> used in the previous examples. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:566(title) -msgid "Configure per-volume creation options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:567(para) -msgid "Volume types can also be used to pass options to the IBM Storwize/SVC driver, which over-ride the default values set in the configuration file. Contrary to the previous examples where the \"capabilities\" scope was used to pass parameters to the Cinder scheduler, options can be passed to the IBM Storwize/SVC driver with the \"drivers\" scope." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:574(para) -msgid "The following extra specs keys are supported by the IBM Storwize/SVC driver:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:578(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:718(para) -msgid "rsize" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:581(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:721(para) -msgid "warning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:584(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:724(para) -msgid "autoexpand" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:587(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:727(para) -msgid "grainsize" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:590(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:730(para) -msgid "compression" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:593(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:733(para) -msgid "easytier" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:596(para) -msgid "multipath" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:599(para) ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:736(para) -msgid "iogrp" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:602(para) -msgid "These keys have the same semantics as their counterparts in the configuration file. They are set similarly; for example, rsize=2 or compression=False." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:608(title) -msgid "Example: Volume types" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:609(para) -msgid "In the following example, we create a volume type to specify a controller that supports iSCSI and compression, to use iSCSI when attaching the volume, and to enable compression:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:615(para) -msgid "We can then create a 50GB volume using this type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:619(para) -msgid "Volume types can be used, for example, to provide users with different" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:623(para) -msgid "performance levels (such as, allocating entirely on an HDD tier, using Easy Tier for an HDD-SDD mix, or allocating entirely on an SSD tier)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:629(para) -msgid "resiliency levels (such as, allocating volumes in pools with different RAID levels)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:634(para) -msgid "features (such as, enabling/disabling Real-time Compression)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:640(title) -msgid "QOS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:641(para) -msgid "The Storwize driver provides QOS support for storage volumes by controlling the I/O amount. QOS is enabled by editing the etc/cinder/cinder.conf file and setting the storwize_svc_allow_tenant_qos to True." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:646(para) -msgid "There are three ways to set the Storwize IOThrotting parameter for storage volumes:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:650(para) -msgid "Add the qos:IOThrottling key into a QOS specification and associate it with a volume type." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:655(para) -msgid "Add the qos:IOThrottling key into an extra specification with a volume type." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:659(para) -msgid "Add the qos:IOThrottling key to the storage volume metadata." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:664(para) -msgid "If you are changing a volume type with QOS to a new volume type without QOS, the QOS configuration settings will be removed." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:671(title) -msgid "Operational notes for the Storwize family and SVC driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:674(title) -msgid "Migrate volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:675(para) -msgid "In the context of OpenStack Block Storage's volume migration feature, the IBM Storwize/SVC driver enables the storage's virtualization technology. When migrating a volume from one pool to another, the volume will appear in the destination pool almost immediately, while the storage moves the data in the background." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:683(para) -msgid "To enable this feature, both pools involved in a given volume migration must have the same values for extent_size. If the pools have different values for extent_size, the data will still be moved directly between the pools (not host-side copy), but the operation will be synchronous." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:694(title) -msgid "Extend volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:695(para) -msgid "The IBM Storwize/SVC driver allows for extending a volume's size, but only for volumes without snapshots." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:700(title) ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:171(title) -msgid "Snapshots and clones" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:701(para) -msgid "Snapshots are implemented using FlashCopy with no background copy (space-efficient). Volume clones (volumes created from existing volumes) are implemented with FlashCopy, but with background copy enabled. This means that volume clones are independent, full copies. While this background copy is taking place, attempting to delete or extend the source volume will result in that operation waiting for the copy to complete." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:712(title) -msgid "Volume retype" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:713(para) -msgid "The IBM Storwize/SVC driver enables you to modify volume types. When you modify volume types, you can also change these extra specs properties:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:739(para) -msgid "nofmtdisk" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:743(para) -msgid "When you change the rsize, grainsize or compression properties, volume copies are asynchronously synchronized on the array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-storwize-svc-driver.xml:750(para) -msgid "To change the iogrp property, IBM Storwize/SVC firmware version 6.4.0 or later is required." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:6(title) -msgid "Dell Storage Center Fibre Channel and iSCSI drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:7(para) -msgid "The Dell Storage Center volume driver interacts with configured Storage Center arrays." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:9(para) -msgid "The Dell Storage Center driver manages Storage Center arrays through Enterprise Manager. Enterprise Manager connection settings and Storage Center options are defined in the cinder.conf file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:13(para) -msgid "Prerequisite: Dell Enterprise Manager 2015 R1 or later must be used." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:18(para) -msgid "The Dell Storage Center volume driver provides the following Cinder volume operations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:22(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:21(para) -msgid "Create, delete, attach (map), and detach (unmap) volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:28(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:69(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:27(para) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:262(para) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:82(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:47(para) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:28(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:299(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:68(para) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:60(para) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:27(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:108(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:60(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:65(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:27(para) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:65(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:60(para) -msgid "Create a volume from a snapshot." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:31(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:72(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:30(para) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:265(para) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:85(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:50(para) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:31(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:302(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:71(para) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:45(para) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:30(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:111(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:64(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:69(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:30(para) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:71(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:64(para) -msgid "Copy an image to a volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:34(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:75(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:33(para) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:278(para) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:88(para) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:34(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:305(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:74(para) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:48(para) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:33(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:114(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:68(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:73(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:33(para) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:68(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:68(para) -msgid "Copy a volume to an image." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:37(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:78(para) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:18(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:36(para) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:293(para) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:91(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:53(para) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:37(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:308(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:77(para) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:51(para) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:36(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:117(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:72(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:77(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:36(para) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:74(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:72(para) -msgid "Clone a volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:40(para) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:81(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:39(para) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:322(para) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:94(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:56(para) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:40(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:311(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:80(para) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:54(para) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:39(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:120(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:76(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:81(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:39(para) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:77(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:76(para) -msgid "Extend a volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:45(title) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:586(title) -msgid "Extra spec options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:46(para) -msgid "Volume type extra specs can be used to select different Storage Profiles." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:48(para) -msgid "Storage Profiles control how Storage Center manages volume data. For a given volume, the selected Storage Profile dictates which disk tier accepts initial writes, as well as how data progression moves data between tiers to balance performance and cost. Predefined Storage Profiles are the most effective way to manage data in Storage Center." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:54(para) -msgid "By default, if no Storage Profile is specified in the volume extra specs, the default Storage Profile for the user account configured for the Block Storage driver is used. The extra spec key storagetype:storageprofile with the value of the name of the Storage Profile on the Storage Center can be set to allow to use Storage Profiles other than the default." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:60(para) -msgid "For ease of use from the command line, spaces in Storage Profile names are ignored. As an example, here is how to define two volume types using the High Priority and Low Priority Storage Profiles:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:70(title) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:165(title) -msgid "iSCSI configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:73(title) -msgid "Sample iSCSI Configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:71(para) -msgid "Use the following instructions to update the configuration file for iSCSI: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:106(title) -msgid "Fibre Channel configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:109(title) -msgid "Sample FC configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:107(para) -msgid "Use the following instructions to update the configuration file for fibre channel: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-storagecenter-driver.xml:140(para) -msgid "The following table contains the configuration options specific to the Dell Storage Center volume driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:4(title) -msgid "HPE LeftHand/StoreVirtual driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:5(para) -msgid "The HPELeftHandISCSIDriver is based on the Block Storage service (Cinder) plug-in architecture. Volume operations are run by communicating with the HPE LeftHand/StoreVirtual system over HTTPS, or SSH connections. HTTPS communications use the python-lefthandclient, which is part of the Python standard library." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:15(para) -msgid "The HPELeftHandISCSIDriver can be configured to run using a REST client to communicate with the array. For performance improvements and new functionality the python-lefthandclient must be downloaded, and HP LeftHand/StoreVirtual Operating System software version 11.5 or higher is required on the array. To configure the driver, see ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:27(para) -msgid "For information about how to manage HPE LeftHand/StoreVirtual storage systems, see the HPE LeftHand/StoreVirtual user documentation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:31(title) -msgid "HPE LeftHand/StoreVirtual REST driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:32(para) -msgid "This section describes how to configure the HPE LeftHand/StoreVirtual Cinder driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:37(para) -msgid "To use the HPE LeftHand/StoreVirtual driver, do the following:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:42(para) -msgid "Install LeftHand/StoreVirtual Operating System software version 11.5 or higher on the HPE LeftHand/StoreVirtual storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:48(para) -msgid "Create a cluster group." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:51(para) -msgid "Install the python-lefthandclient version 2.0.0 from the Python Package Index on the system with the enabled Block Storage service volume drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:63(para) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:12(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:21(para) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:246(para) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:76(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:41(para) ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:22(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:293(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:62(para) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:39(para) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:21(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:102(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:52(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:57(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:52(para) -msgid "Create, delete, attach, and detach volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:87(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:123(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:80(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:85(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:80(para) -msgid "Migrate a volume with back-end assistance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:90(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:62(para) ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:57(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:126(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:84(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:89(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:84(para) -msgid "Retype a volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:96(para) -msgid "When you use back-end assisted volume migration, both source and destination clusters must be in the same HPE LeftHand/StoreVirtual management group. The HPE LeftHand/StoreVirtual array will use native LeftHand APIs to migrate the volume. The volume cannot be attached or have snapshots to migrate." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:103(para) -msgid "Volume type support for the driver includes the ability to set the following capabilities in the OpenStack Cinder API cinder.api.contrib.types_extra_specs volume type extra specs extension module." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:112(literal) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:157(term) -msgid "hpelh:provisioning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:117(literal) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:140(term) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:163(term) -msgid "hpelh:ao" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:122(literal) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:144(term) ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:169(term) -msgid "hpelh:data_pl" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:126(para) -msgid "To work with the default filter scheduler, the key-value pairs are case-sensitive and scoped with 'hpelh:'. For information about how to set the key-value pairs and associate them with a volume type, run the following command:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:137(para) -msgid "The following keys require the HPE LeftHand/StoreVirtual storage array be configured for" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:141(para) -msgid "The HPE LeftHand/StoreVirtual storage array must be configured for Adaptive Optimization." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:145(para) -msgid "The HPE LeftHand/StoreVirtual storage array must be able to support the Data Protection level specified by the extra spec." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:154(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:177(para) -msgid "If volume types are not used or a particular key is not set for a volume type, the following defaults are used:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:158(para) -msgid "Defaults to thin provisioning, the valid values are, thin and full" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:164(para) -msgid "Defaults to true, the valid values are, true and false." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:170(para) -msgid "Defaults to r-0, Network RAID-0 (None), the valid values are," -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:173(para) -msgid "r-0, Network RAID-0 (None)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:177(para) -msgid "r-5, Network RAID-5 (Single Parity)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:181(para) -msgid "r-10-2, Network RAID-10 (2-Way Mirror)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:185(para) -msgid "r-10-3, Network RAID-10 (3-Way Mirror)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:189(para) -msgid "r-10-4, Network RAID-10 (4-Way Mirror)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:193(para) -msgid "r-6, Network RAID-6 (Dual Parity)," -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:205(title) -msgid "Enable the HPE LeftHand/StoreVirtual iSCSI driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:207(para) -msgid "The HPELeftHandISCSIDriver is installed with the OpenStack software." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:214(para) -msgid "Install the python-lefthandclient Python package on the OpenStack Block Storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:222(para) -msgid "If you are not using an existing cluster, create a cluster on the HPE LeftHand storage system to be used as the cluster for creating volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:228(para) -msgid "Make the following changes in the /etc/cinder/cinder.conf file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:233(emphasis) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:386(emphasis) -msgid "## REQUIRED SETTINGS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:236(replaceable) -msgid "https://10.10.0.141:8081/lhos" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:239(replaceable) -msgid "lhuser" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:242(replaceable) -msgid "lhpass" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:245(replaceable) -msgid "ClusterLefthand" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:250(emphasis) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:421(emphasis) -msgid "## OPTIONAL SETTINGS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:266(para) -msgid "You can enable only one driver on each cinder instance unless you enable multiple back end support. See the Cinder multiple back end support instructions to enable this feature." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:272(para) -msgid "If the is set to true, the driver will associate randomly-generated CHAP secrets with all hosts on the HPE LeftHand/StoreVirtual system. OpenStack Compute nodes use these secrets when creating iSCSI connections. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:295(para) ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:471(para) -msgid "Save the changes to the cinder.conf file and restart the cinder-volume service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-lefthand-driver.xml:303(para) -msgid "The HPE LeftHand/StoreVirtual driver is now enabled on your OpenStack system. If you experience problems, review the Block Storage service log files for errors." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:6(title) -msgid "Dell EqualLogic volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:7(para) -msgid "The Dell EqualLogic volume driver interacts with configured EqualLogic arrays and supports various operations." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:21(para) -msgid "The OpenStack Block Storage service supports:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:23(para) -msgid "Multiple instances of Dell EqualLogic Groups or Dell EqualLogic Group Storage Pools and multiple pools on a single array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:26(para) -msgid "Multiple instances of Dell EqualLogic Groups or Dell EqualLogic Group Storage Pools or multiple pools on a single array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:30(para) -msgid "The Dell EqualLogic volume driver's ability to access the EqualLogic Group is dependent upon the generic block storage driver's SSH settings in the /etc/cinder/cinder.conf file (see for reference)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:35(para) -msgid "The following sample /etc/cinder/cinder.conf configuration lists the relevant settings for a typical Block Storage service using a single Dell EqualLogic Group:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:38(title) -msgid "Default (single-instance) configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:43(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:65(term) -msgid "IP_EQLX" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:44(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:72(term) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:144(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:153(replaceable) -msgid "SAN_UNAME" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:45(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:79(term) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:145(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:154(replaceable) -msgid "SAN_PW" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:46(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:87(term) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:146(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:155(replaceable) -msgid "EQLX_GROUP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:47(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:94(term) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:147(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:156(replaceable) -msgid "EQLX_POOL" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:51(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:52(replaceable) -msgid "true|false" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:53(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:103(term) -msgid "EQLX_UNAME" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:54(replaceable) ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:111(term) -msgid "EQLX_PW" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:58(replaceable) -msgid "SAN_KEY_PATH" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:62(para) -msgid "In this example, replace the following variables accordingly:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:67(para) -msgid "The IP address used to reach the Dell EqualLogic Group through SSH. This field has no default value." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:74(para) -msgid "The user name to login to the Group manager via SSH at the san_ip. Default user name is grpadmin." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:81(para) -msgid "The corresponding password of SAN_UNAME. Not used when san_private_key is set. Default password is password." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:89(para) -msgid "The group to be used for a pool where the Block Storage service will create volumes and snapshots. Default group is group-0." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:96(para) -msgid "The pool where the Block Storage service will create volumes and snapshots. Default pool is default. This option cannot be used for multiple pools utilized by the Block Storage service on a single Dell EqualLogic Group." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:105(para) -msgid "The CHAP login account for each volume in a pool, if eqlx_use_chap is set to true. Default account name is chapadmin." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:113(para) -msgid "The corresponding password of EQLX_UNAME. The default password is randomly generated in hexadecimal, so you must set this password manually." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:119(term) -msgid "SAN_KEY_PATH (optional)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:121(para) -msgid "The filename of the private key used for SSH authentication. This provides password-less login to the EqualLogic Group. Not used when san_password is set. There is no default value." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:128(para) -msgid "In addition, enable thin provisioning for SAN volumes using the default san_thin_provision = true setting." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:132(title) -msgid "Multi back-end Dell EqualLogic configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:133(para) -msgid "The following example shows the typical configuration for a Block Storage service that uses two Dell EqualLogic back ends:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:138(replaceable) -msgid "true" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:142(replaceable) -msgid "backend1" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:143(replaceable) -msgid "IP_EQLX1" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:151(replaceable) -msgid "backend2" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:152(replaceable) -msgid "IP_EQLX2" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:157(para) -msgid "In this example:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:159(para) -msgid "Thin provisioning for SAN volumes is enabled (san_thin_provision = true). This is recommended when setting up Dell EqualLogic back ends." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:162(para) -msgid "Each Dell EqualLogic back-end configuration ([backend1] and [backend2]) has the same required settings as a single back-end configuration, with the addition of volume_backend_name." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:166(para) -msgid "The san_ssh_port option is set to its default value, 22. This option sets the port used for SSH." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:169(para) -msgid "The ssh_conn_timeout option is also set to its default value, 30. This option sets the timeout in seconds for CLI commands over SSH." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:172(para) -msgid "The IP_EQLX1 and IP_EQLX2 refer to the IP addresses used to reach the Dell EqualLogic Group of backend1 and backend2 through SSH, respectively." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dell-equallogic-driver.xml:178(para) -msgid "For information on configuring multiple back ends, see Configure a multiple-storage back end." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:9(title) -msgid "HDS HNAS iSCSI and NFS driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:11(para) -msgid "This OpenStack Block Storage volume driver provides iSCSI and NFS support for Hitachi NAS Platform Models 3080, 3090, 4040, 4060, 4080 and 4100." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:18(para) -msgid "The NFS and iSCSI drivers support these operations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:50(title) -msgid "HNAS storage requirements" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:51(para) -msgid "Before using iSCSI and NFS services, use the HNAS configuration and management GUI (SMU) or SSC CLI to create storage pool(s), file system(s), and assign an EVS. Make sure that the file system used is not created as a replication target. Additionally:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:58(emphasis) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:603(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:659(para) -msgid "For NFS:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:60(para) -msgid "Create NFS exports, choose a path for them (it must be different from \"/\") and set the Show snapshots option to hide and disable access." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:66(para) -msgid "Also, in the \"Access Configuration\" set the option norootsquash , e.g. so HNAS cinder driver can change the permissions of its volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:71(para) -msgid "In order to use the hardware accelerated features of NFS HNAS, we recommend setting max-nfs-version to 3. Refer to HNAS command line reference to see how to configure this option." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:80(emphasis) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:628(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:662(para) -msgid "For iSCSI:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:82(para) -msgid "You need to set an iSCSI domain." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:88(title) -msgid "Block storage host requirements" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:89(para) -msgid "The HNAS driver is supported for Red Hat Enterprise Linux OpenStack Platform, SUSE OpenStack Cloud, and Ubuntu OpenStack. The following packages must be installed:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:94(para) -msgid "nfs-utils for Red Hat Enterprise Linux OpenStack Platform" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:98(para) -msgid "nfs-client for SUSE OpenStack Cloud" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:102(para) -msgid "nfs-common, libc6-i386 for Ubuntu OpenStack" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:108(para) -msgid "If you are not using SSH, you need the HDS SSC to communicate with an HNAS array using the commands. This utility package is available in the RPM package distributed with the hardware through physical media or it can be manually copied from the SMU to the Block Storage host." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:120(title) -msgid "Package installation" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:122(para) -msgid "If you are installing the driver from a RPM or DEB package, follow the steps bellow:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:124(para) -msgid "Install the dependencies:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:125(para) -msgid "In Red Hat:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:127(para) -msgid "Or in Ubuntu:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:129(para) -msgid "Or in SUSE:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:131(para) -msgid "If you are using Ubuntu 12.04, you also need to install libc6-i386" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:135(para) -msgid "Configure the driver as described in the \"Driver configuration\" section." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:137(para) -msgid "Restart all cinder services (volume, scheduler and backup)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:142(title) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:137(title) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:93(title) -msgid "Driver configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:143(para) -msgid "The HDS driver supports the concept of differentiated services (also referred as quality of service) by mapping volume types to services provided through HNAS." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:146(para) -msgid "HNAS supports a variety of storage options and file system capabilities, which are selected through the definition of volume types and the use of multiple back ends. The driver maps up to four volume types into separated exports or file systems, and can support any number if using multiple back ends." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:158(para) -msgid "The configuration file location may differ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:152(para) -msgid "The configuration for the driver is read from an XML-formatted file (one per back end), which you need to create and set its path in the cinder.conf configuration file. Below are the configuration needed in the cinder.conf configuration file :" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:163(para) -msgid "For HNAS iSCSI driver create this section:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:166(replaceable) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:171(replaceable) -msgid "/path/to/config/hnas_config_file.xml" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:167(replaceable) -msgid "HNAS-ISCSI" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:168(para) -msgid "For HNAS NFS driver create this section:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:172(replaceable) -msgid "HNAS-NFS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:173(para) -msgid "The XML file has the following format:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:197(title) -msgid "HNAS volume driver XML configuration options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:203(para) -msgid "There is no relative precedence or weight among these four labels." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:198(para) -msgid "An OpenStack Block Storage node using HNAS drivers can have up to four services. Each service is defined by a svc_n tag (svc_0, svc_1, svc_2, or svc_3, for example). These are the configuration options available for each service label:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:209(caption) -msgid "Configuration options for service labels" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:230(para) -msgid "When a create_volume call with a certain volume type happens, the volume type will try to be matched up with this tag. In each configuration file you must define the default volume type in the service labels and, if no volume type is specified, the default is used. Other labels are case sensitive and should match exactly. If no configured volume types match the incoming requested type, an error occurs in the volume creation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:249(para) -msgid "Required only for iSCSI" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:252(para) -msgid "An iSCSI IP address dedicated to the service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:264(para) -msgid "For iSCSI driver: virtual file system label associated with the service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:268(para) -msgid "For NFS driver: path to the volume (<ip_address>:/<path>) associated with the service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:273(para) -msgid "Additionally, this entry must be added in the file used to list available NFS shares. This file is located, by default, in /etc/cinder/nfs_shares or you can specify the location in the nfs_shares_config option in the cinder.conf configuration file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:286(para) -msgid "These are the configuration options available to the config section of the XML config file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:311(para) -msgid "Management Port 0 IP address. Should be the IP address of the \"Admin\" EVS." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:321(para) -msgid "ssc" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:323(para) -msgid "Command to communicate to HNAS array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:331(para) -msgid "Optional (iSCSI only)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:334(para) -msgid "Boolean tag used to enable CHAP authentication protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:344(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:353(para) -msgid "supervisor" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:346(para) -msgid "It's always required on HNAS." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:355(para) -msgid "Password is always required on HNAS." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:368(para) -msgid "(at least one label has to be defined)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:373(para) -msgid "Service labels: these four predefined names help four different sets of configuration options. Each can specify HDP and a unique volume type." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:388(para) -msgid "Optional if is True" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:394(para) -msgid "The address of HNAS cluster admin." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:412(para) -msgid "Enables SSH authentication between Block Storage host and the SMU." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:425(para) -msgid "Required if ssh_enabled is True" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:432(para) -msgid "Path to the SSH private key used to authenticate in HNAS SMU. The public key must be uploaded to HNAS SMU using ssh-register-public-key (this is an SSH subcommand). Note that copying the public key HNAS using ssh-copy-id doesn't work properly as the SMU periodically wipe out those keys." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:448(title) -msgid "Service labels" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:449(para) -msgid "HNAS driver supports differentiated types of service using the service labels. It is possible to create up to four types of them, as gold, platinum, silver and ssd, for example." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:454(para) -msgid "After creating the services in the XML configuration file, you must configure one volume_type per service. Each volume_type must have the metadata service_label with the same name configured in the <volume_type> section of that service. If this is not set, OpenStack Block Storage will schedule the volume creation to the pool with largest available free space or other criteria configured in volume filters." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:472(title) -msgid "Multi-back-end configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:473(para) -msgid "If you use multiple back ends and intend to enable the creation of a volume in a specific back end, you must configure volume types to set the volume_backend_name option to the appropriate back end. Then, create volume_type configurations with the same volume_backend_name ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:485(para) -msgid "You can deploy multiple OpenStack HNAS drivers instances that each control a separate HNAS array. Each service (svc_0, svc_1, svc_2, svc_3) on the instances need to have a volume_type and service_label metadata associated with it. If no metadata is associated with a pool, OpenStack Block Storage filtering algorithm selects the pool with the largest available free space." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:495(title) -msgid "SSH configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:496(para) -msgid "Instead of using on the Block Storage host and store its credential on the XML configuration file, HNAS driver supports authentication. To configure that:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:503(para) -msgid "If you don't have a pair of public keys already generated, create it in the Block Storage host (leave the pass-phrase empty):" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:508(replaceable) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:516(replaceable) -msgid "/opt/hds/ssh" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:509(replaceable) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:546(replaceable) -msgid "/opt/hds/ssh/hnaskey" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:512(para) -msgid "Change the owner of the key to cinder (or the user the volume service will be run):" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:519(para) -msgid "Create the directory \"ssh_keys\" in the SMU server:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:525(para) -msgid "Copy the public key to the \"ssh_keys\" directory:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:528(replaceable) -msgid "/opt/hds/ssh/hnaskey.pub" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:531(para) -msgid "Access the SMU server:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:537(para) -msgid "Run the command to register the SSH keys:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:543(para) -msgid "Check the communication with HNAS in the Block Storage host:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:549(para) -msgid "<cluster_admin_ip0> is \"localhost\" for single node deployments. This should return a list of available file systems on HNAS." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:556(title) -msgid "Editing the XML config file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:559(para) -msgid "Set the \"username\"." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:564(para) -msgid "Enable SSH adding the line \"<ssh_enabled> True</ssh_enabled>\" under \"<config>\" section." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:571(para) -msgid "Set the private key path: \"<ssh_private_key> /opt/hds/ssh/hnaskey</ssh_private_key>\" under \"<config>\" section." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:578(para) -msgid "If the HNAS is in a multi-cluster configuration set \"<cluster_admin_ip0>\" to the cluster node admin IP. In a single node HNAS, leave it empty." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:585(para) -msgid "Restart cinder services." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:590(para) -msgid "Note that copying the public key HNAS using ssh-copy-id doesn't work properly as the SMU periodically wipe out those keys." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:593(title) -msgid "Manage and unmanage" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:594(para) -msgid "The manage and unmanage are two new API extensions that add some new features to the driver. The manage action on an existing volume is very similar to a volume creation. It creates a volume entry on OpenStack Block Storage DB, but instead of creating a new volume in the back end, it only adds a 'link' to an existing volume. Volume name, description, volume_type, metadata and availability_zone are supported as in a normal volume creation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:597(para) -msgid "The unmanage action on an existing volume removes the volume from the OpenStack Block Storage DB, but keeps the actual volume in the back-end. From an OpenStack Block Storage perspective the volume would be deleted, but it would still exist for outside use." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:601(title) -msgid "How to Manage:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:602(para) -msgid "On the Dashboard:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:606(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:631(para) -msgid "Under the tab System -> Volumes choose the option [ + Manage Volume ]" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:612(para) -msgid "Identifier: ip:/type/volume_name Example: 172.24.44.34:/silver/volume-test" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:615(para) -msgid "Host: host@backend-name#pool_name Example: ubuntu@hnas-nfs#test_silver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:618(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:643(para) -msgid "Volume Name: volume_name Example: volume-test" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:621(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:646(para) -msgid "Volume Type: choose a type of volume Example: silver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:609(para) -msgid "Fill the fields Identifier, Host and Volume Type with volume information to be managed: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:637(para) -msgid "Identifier: filesystem-name/volume-name Example: filesystem-test/volume-test" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:640(para) -msgid "Host: host@backend-name#pool_name Example: ubuntu@hnas-iscsi#test_silver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:634(para) -msgid "Fill the fields Identifier, Host, Volume Name and Volume Type with volume information to be managed: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:653(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:680(para) -msgid "By CLI:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:658(para) ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:682(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:561(para) -msgid "Example:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:667(title) -msgid "How to Unmanage:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:668(para) -msgid "On Dashboard:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:671(para) -msgid "Under the tab [ System -> Volumes ] choose a volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:674(para) -msgid "On the volume options, choose [ +Unmanage Volume ]" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:677(para) -msgid "Check the data and confirm." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:687(title) -msgid "Additional notes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:690(para) -msgid "The get_volume_stats() function always provides the available capacity based on the combined sum of all the HDPs that are used in these services labels." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:698(para) -msgid "After changing the configuration on the storage, the OpenStack Block Storage driver must be restarted." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:705(para) -msgid "On Red Hat, if the system is configured to use SELinux, you need to set for NFS driver work properly." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hds-hnas-driver.xml:713(para) -msgid "It is not possible to manage a volume if there is a slash ('/') or a colon (':') on the volume name." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:6(title) -msgid "VMware VMDK driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:8(para) -msgid "Use the VMware VMDK driver to enable management of the OpenStack Block Storage volumes on vCenter-managed data stores. Volumes are backed by VMDK files on data stores that use any VMware-compatible storage technology such as NFS, iSCSI, FiberChannel, and vSAN." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:14(title) -msgid "Functional context" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:15(para) -msgid "The VMware VMDK driver connects to vCenter, through which it can dynamically access all the data stores visible from the ESX hosts in the managed cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:18(para) -msgid "When you create a volume, the VMDK driver creates a VMDK file on demand. The VMDK file creation completes only when the volume is subsequently attached to an instance. The reason for this requirement is that data stores visible to the instance determine where to place the volume. Before the service creates the VMDK file, attach a volume to the target instance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:25(para) -msgid "The running vSphere VM is automatically reconfigured to attach the VMDK file as an extra disk. Once attached, you can log in to the running vSphere VM to rescan and discover this extra disk." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:29(para) -msgid "With the update to ESX version 6.0, the VMDK driver now supports NFS version 4.1." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:34(para) -msgid "The recommended volume driver for OpenStack Block Storage is the VMware vCenter VMDK driver. When you configure the driver, you must match it with the appropriate OpenStack Compute driver from VMware and both drivers must point to the same server." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:39(para) -msgid "In the nova.conf file, use this option to define the Compute driver:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:42(para) -msgid "In the cinder.conf file, use this option to define the volume driver:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:45(para) -msgid "The following table lists various options that the drivers support for the OpenStack Block Storage configuration (cinder.conf):" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:52(para) -msgid "The VMware VMDK drivers support the creation of VMDK disk file types thin, lazyZeroedThick (sometimes called thick or flat), or eagerZeroedThick." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:56(para) -msgid "A thin virtual disk is allocated and zeroed on demand as the space is used. Unused space on a Thin disk is available to other users." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:60(para) -msgid "A lazy zeroed thick virtual disk will have all space allocated at disk creation. This reserves the entire disk space, so it is not available to other users at any time." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:65(para) -msgid "An eager zeroed thick virtual disk is similar to a lazy zeroed thick disk, in that the entire disk is allocated at creation. However, in this type, any previous data will be wiped clean on the disk before the write. This can mean that the disk will take longer to create, but can also prevent issues with stale data on physical media." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:72(para) -msgid "Use the vmware:vmdk_type extra spec key with the appropriate value to specify the VMDK disk file type. This table shows the mapping between the extra spec entry and the VMDK disk file type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:78(caption) -msgid "Extra spec entry to VMDK disk file type mapping" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:82(td) -msgid "Disk file type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:83(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:128(td) -msgid "Extra spec key" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:84(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:129(td) -msgid "Extra spec value" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:90(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:95(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:100(td) -msgid "vmware:vmdk_type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:94(td) -msgid "lazyZeroedThick" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:96(td) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:626(literal) -msgid "thick" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:99(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:101(td) -msgid "eagerZeroedThick" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:105(para) -msgid "If you do not specify a vmdk_type extra spec entry, the disk file type will default to thin." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:107(para) -msgid "The following example shows how to create a lazyZeroedThick VMDK volume by using the appropriate vmdk_type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:115(title) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:127(td) -msgid "Clone type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:116(para) -msgid "With the VMware VMDK drivers, you can create a volume from another source volume or a snapshot point. The VMware vCenter VMDK driver supports the full and linked/fast clone types. Use the vmware:clone_type extra spec key to specify the clone type. The following table captures the mapping for clone types:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:124(caption) -msgid "Extra spec entry to clone type mapping" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:134(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:136(td) -msgid "full" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:135(td) ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:140(td) -msgid "vmware:clone_type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:139(td) -msgid "linked/fast" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:141(td) -msgid "linked" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:145(para) -msgid "If you do not specify the clone type, the default is full." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:147(para) -msgid "The following example shows linked cloning from a source volume, which is created from an image:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:155(title) -msgid "Use vCenter storage policies to specify back-end data stores" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:158(para) -msgid "This section describes how to configure back-end data stores using storage policies. In vCenter 5.5 and greater, you can create one or more storage policies and expose them as a Block Storage volume-type to a vmdk volume. The storage policies are exposed to the vmdk driver through the extra spec property with the vmware:storage_profile key." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:165(para) -msgid "For example, assume a storage policy in vCenter named gold_policy. and a Block Storage volume type named vol1 with the extra spec key vmware:storage_profile set to the value gold_policy. Any Block Storage volume creation that uses the vol1 volume type places the volume only in data stores that match the gold_policy storage policy." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:174(para) -msgid "The Block Storage back-end configuration for vSphere data stores is automatically determined based on the vCenter configuration. If you configure a connection to connect to vCenter version 5.5 or later in the cinder.conf file, the use of storage policies to configure back-end data stores is automatically supported." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:182(para) -msgid "You must configure any data stores that you configure for the Block Storage service for the Compute service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:187(title) -msgid "To configure back-end data stores by using storage policies" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:190(para) -msgid "In vCenter, tag the data stores to be used for the back end." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:192(para) -msgid "OpenStack also supports policies that are created by using vendor-specific capabilities; for example vSAN-specific storage policies." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:196(para) -msgid "The tag value serves as the policy. For details, see ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:202(para) -msgid "Set the extra spec key vmware:storage_profile in the desired Block Storage volume types to the policy name that you created in the previous step." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:209(para) -msgid "Optionally, for the parameter, enter the version number of your vSphere platform. For example, ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:214(para) -msgid "This setting overrides the default location for the corresponding WSDL file. Among other scenarios, you can use this setting to prevent WSDL error messages during the development phase or to work with a newer version of vCenter." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:226(para) -msgid "The following considerations apply to configuring SPBM for the Block Storage service:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:230(para) -msgid "Any volume that is created without an associated policy (that is to say, without an associated volume type that specifies vmware:storage_profile extra spec), there is no policy-based placement for that volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:242(para) -msgid "The VMware vCenter VMDK driver supports these operations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:248(para) -msgid "When a volume is attached to an instance, a reconfigure operation is performed on the instance to add the volume's VMDK to it. The user must manually rescan and mount the device from within the guest operating system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:257(para) -msgid "Allowed only if volume is not attached to an instance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:267(para) -msgid "Only images in vmdk disk format with bare container format are supported. The property of the image can be preallocated, sparse, streamOptimized or thin." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:282(para) -msgid "Allowed only if the volume is not attached to an instance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:286(para) -msgid "This operation creates a streamOptimized disk image." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:295(para) -msgid "Supported only if the source volume is not attached to an instance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:300(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:42(para) -msgid "Backup a volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:302(para) -msgid "This operation creates a backup of the volume in streamOptimized disk format." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:307(para) ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:45(para) -msgid "Restore backup to new or existing volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:309(para) -msgid "Supported only if the existing volume doesn't contain snapshots." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:314(para) -msgid "Change the type of a volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:316(para) -msgid "This operation is supported only if the volume state is available." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:328(title) -msgid "Storage policy-based configuration in vCenter" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:329(para) -msgid "You can configure Storage Policy-Based Management (SPBM) profiles for vCenter data stores supporting the Compute, Image Service, and Block Storage components of an OpenStack implementation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:333(para) -msgid "In a vSphere OpenStack deployment, SPBM enables you to delegate several data stores for storage, which reduces the risk of running out of storage space. The policy logic selects the data store based on accessibility and available storage space." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:340(title) -msgid "Prerequisites" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:343(para) -msgid "Determine the data stores to be used by the SPBM policy." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:347(para) -msgid "Determine the tag that identifies the data stores in the OpenStack component configuration." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:352(para) -msgid "Create separate policies or sets of data stores for separate OpenStack components." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:358(title) -msgid "Create storage policies in vCenter" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:360(title) -msgid "To create storage policies in vCenter" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:362(para) -msgid "In vCenter, create the tag that identifies the data stores:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:366(para) -msgid "From the Home screen, click Tags." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:370(para) -msgid "Specify a name for the tag." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:373(para) -msgid "Specify a tag category. For example, spbm-cinder." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:379(para) -msgid "Apply the tag to the data stores to be used by the SPBM policy." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:382(para) -msgid "For details about creating tags in vSphere, see the vSphere documentation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:389(para) -msgid "In vCenter, create a tag-based storage policy that uses one or more tags to identify a set of data stores." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:393(para) -msgid "For details about creating storage policies in vSphere, see the vSphere documentation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:402(title) -msgid "Data store selection" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:403(para) -msgid "If storage policy is enabled, the driver initially selects all the data stores that match the associated storage policy." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:406(para) -msgid "If two or more data stores match the storage policy, the driver chooses a data store that is connected to the maximum number of hosts." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:409(para) -msgid "In case of ties, the driver chooses the data store with lowest space utilization, where space utilization is defined by the (1-freespace/totalspace) meters." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:414(para) -msgid "These actions reduce the number of volume migrations while attaching the volume to instances." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/vmware-vmdk-driver.xml:416(para) -msgid "The volume must be migrated if the ESX host for the instance cannot access the data store that contains the volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/datera-volume-driver.xml:6(title) -msgid "Datera volume driver" -msgstr "" - -#. The volume driver configures XXX. More information about the driver is available at -#. example.com. -#. -#: ./doc/config-reference/block-storage/drivers/datera-volume-driver.xml:12(para) -msgid "Set the following in your cinder.conf to use the Datera volume driver:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/datera-volume-driver.xml:17(para) -msgid "Use the following options to configure the volume driver:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:8(title) -msgid "Violin Memory 6000 series AFA volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:9(para) -msgid "The OpenStack V6000 driver package from Violin Memory adds block storage service support for Violin 6000 Series All Flash Arrays." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:13(para) -msgid "The driver package release can be used with any OpenStack Liberty deployment for all 6000 series all-flash arrays for release 6.3.1 and later using either Fibre Channel or iSCSI HBAs." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:19(para) -msgid "The Violin 6000 series AFA driver is recommended as an evaluation product only, for existing 6000 series customers. The driver will be deprecated or removed in the next OpenStack release. Future development and support will be focused on the 7000 series FSP driver only." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:30(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:20(para) -msgid "To use the Violin driver, the following are required:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:36(para) -msgid "Concerto OS version 6.3.1 or later" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:39(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:27(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:27(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:27(para) -msgid "iSCSI or FC host interfaces" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:42(para) -msgid "Storage network connectivity between all target and initiator ports" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:33(para) -msgid "Violin 6000 series AFA with: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:49(para) -msgid "The Violin volume driver: The driver implements the Block Storage API calls. Both FC and iSCSI driver support is included with the OpenStack Liberty release." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:56(para) -msgid "The vmemclient library: This is the Violin Array Communications library to the Flash Storage Platform through a REST-like interface. The client can be installed using the python 'pip' installer tool. Further information on vmemclient can be found here: PyPI ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:98(para) -msgid "All listed operations are supported for both thick and thin LUNs. However, over-subscription is not supported." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:106(title) -msgid "Array configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:107(para) -msgid "After installing and configuring your V6000 array per the installation guide provided with your array, please follow these additional steps to prepare your array for use with OpenStack." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:114(para) -msgid "Ensure your client initiator interfaces are all zoned or VLAN'd so that they can communicate with ALL of the target ports on the array. See your array installation or user guides for more information." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:122(para) -msgid "Set the array's provisioning threshold value." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:124(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:130(replaceable) -msgid "CONTAINER_NAME" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:128(para) -msgid "Set the array's used-space threshold value." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:138(para) -msgid "Once the array is configured, it is simply a matter of editing the cinder configuration file to add or modify the parameters. Contents will differ depending on whether you are setting up a fibre channel or iSCSI environment." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:146(title) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:102(title) -msgid "Fibre channel configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:147(para) -msgid "Set the following in your cinder.conf configuration file for setup of a Fibre channel array, replacing the variables using the guide in the following section:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:156(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:174(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:192(term) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:112(replaceable) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:142(term) -msgid "VMEM_MGMT_IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:157(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:175(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:201(term) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:113(replaceable) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:152(term) -msgid "VMEM_USER_NAME" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:158(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:176(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:211(term) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:114(replaceable) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:162(term) -msgid "VMEM_PASSWORD" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:159(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:180(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:230(term) -msgid "VMEM_MGA_IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:160(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:181(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:239(term) -msgid "VMEM_MGB_IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:166(para) -msgid "Set the following in your cinder.conf configuration file for setup of an iSCSI array, replacing the variables using the guide in the following section:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:179(replaceable) ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:219(term) -msgid "CINDER_INITIATOR_IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:186(title) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:120(title) -msgid "Configuration parameters" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:187(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:121(para) -msgid "Description of configuration value placeholders:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:194(para) -msgid "Cluster master IP address or hostname of the Violin 6000 Array. Can be an IP address or hostname." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:203(para) -msgid "Log-in user name for the Violin 6000 Memory Gateways. This user must have administrative rights on the array. Typically this is the 'admin' user." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:213(para) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:164(para) -msgid "Log-in user's password." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:221(para) -msgid "The IP address assigned to the primary iSCSI interface on the cinder-volume client. This IP address must be able to communicate with all target ports that are active on the array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:232(para) -msgid "The IP or hostname of the gateway node marked 'A', commonly referred to as 'MG-A'." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v6000-driver.xml:241(para) -msgid "The IP or hostname of the gateway node marked 'B', commonly referred to as 'MG-B'." -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:159(None) -msgid "@@image: '../../../common/figures/emc/enabler.png'; md5=207fa592a61286e0628b66554554ca4c" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:7(para) -msgid "EMC VNX driver consists of EMCCLIISCSIDriver and EMCCLIFCDriver, and supports both iSCSI and FC protocol. EMCCLIISCSIDriver (VNX iSCSI driver) and EMCCLIFCDriver (VNX FC driver) are separately based on the ISCSIDriver and FCDriver defined in Block Storage." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:15(title) -msgid "Overview" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:16(para) -msgid "The VNX iSCSI driver and VNX FC driver perform the volume operations by executing Navisphere CLI (NaviSecCLI) which is a command line interface used for management, diagnostics, and reporting functions for VNX." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:24(para) -msgid "VNX Operational Environment for Block version 5.32 or higher." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:28(para) -msgid "VNX Snapshot and Thin Provisioning license should be activated for VNX." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:32(para) -msgid "Navisphere CLI v7.32 or higher is installed along with the driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:59(para) -msgid "Migrate a volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:68(para) -msgid "Create and delete consistency groups." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:74(para) -msgid "Modify consistency groups." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:77(para) -msgid "Efficient non-disruptive volume backup." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:83(title) -msgid "Preparation" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:84(para) -msgid "This section contains instructions to prepare the Block Storage nodes to use the EMC VNX driver. You install the Navisphere CLI, install the driver, ensure you have correct zoning configurations, and register the driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:89(title) -msgid "Install Navisphere CLI" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:90(para) -msgid "Navisphere CLI needs to be installed on all Block Storage nodes within an OpenStack deployment. You need to download different versions for different platforms." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:95(para) -msgid "For Ubuntu x64, DEB is available at EMC OpenStack Github." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:100(para) -msgid "For all other variants of Linux, Navisphere CLI is available at Downloads for VNX2 Series or Downloads for VNX1 Series." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:107(para) -msgid "After installation, set the security level of Navisphere CLI to low:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:114(title) -msgid "Check array software" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:115(para) -msgid "Make sure your have following software installed for certain features." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:117(caption) -msgid "Required software" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:122(td) -msgid "Feature" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:123(td) -msgid "Software Required" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:129(para) -msgid "ThinProvisioning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:133(para) -msgid "VNXSnapshots" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:136(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:779(title) -msgid "FAST cache support" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:137(para) -msgid "FASTCache" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:140(para) -msgid "Create volume with type compressed" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:141(para) -msgid "Compression" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:144(para) -msgid "Create volume with type deduplicated" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:145(para) -msgid "Deduplication" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:149(para) -msgid "You can check the status of your array software in the \"Software\" page of \"Storage System Properties\". Here is how it looks like." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:155(title) -msgid "Installed software on VNX" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:165(title) -msgid "Install EMC VNX driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:166(para) -msgid "Both EMCCLIISCSIDriver and EMCCLIFCDriver are included in the Block Storage installer package:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:171(filename) -msgid "emc_vnx_cli.py" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:174(para) -msgid "emc_cli_fc.py (for )" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:178(para) -msgid "emc_cli_iscsi.py (for )" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:185(para) -msgid "For FC Driver, FC zoning is properly configured between hosts and VNX. Check for reference." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:190(para) -msgid "For iSCSI Driver, make sure your VNX iSCSI port is accessible by your hosts. Check for reference." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:195(para) -msgid "You can use initiator_auto_registration=True configuration to avoid register the ports manually. Please check the detail of the configuration in for reference." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:201(para) -msgid "If you are trying to setup multipath, please refer to Multipath Setup in ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:210(para) -msgid "Make the following changes in /etc/cinder/cinder.conf file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:213(para) -msgid "Changes to your configuration won't take effect until your restart your cinder service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:217(title) -msgid "Minimum configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:218(para) -msgid "Here is a sample of minimum backend configuration. See following sections for the detail of each option Replace EMCCLIFCDriver to EMCCLIISCSIDriver if your are using the iSCSI driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:237(title) -msgid "Multi-backend configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:238(para) -msgid "Here is a sample of a multi-backend configuration. See following sections for the detail of each option. Replace EMCCLIFCDriver to EMCCLIISCSIDriver if your are using the iSCSI driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:264(para) -msgid "For more details on multi-backends, see OpenStack Cloud Administration Guide" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:271(title) -msgid "Required configurations" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:273(title) -msgid "IP of the VNX Storage Processors" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:274(para) -msgid "Specify the SP A and SP B IP to connect." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:281(title) -msgid "VNX login credentials" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:282(para) -msgid "There are two ways to specify the credentials." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:287(para) -msgid "Use plain text username and password." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:292(para) -msgid "Supply for plain username and password as below." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:298(para) -msgid "Valid values for storage_vnx_authentication_type are: global (default), local, ldap" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:306(para) -msgid "Use Security file" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:309(para) -msgid "This approach avoids the plain text password in your cinder configuration file. Supply a security file as below:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:314(para) -msgid "Please check Unisphere CLI user guide or for how to create a security file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:318(title) -msgid "Path to your Unisphere CLI" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:319(para) -msgid "Specify the absolute path to your naviseccli." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:325(title) -msgid "Driver name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:328(para) -msgid "For the FC Driver, add the following option:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:334(para) -msgid "For iSCSI Driver, add following option:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:341(title) -msgid "Optional configurations" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:343(title) -msgid "VNX pool names" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:344(para) -msgid "Specify the list of pools to be managed, separated by ','. They should already exist in VNX." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:349(para) -msgid "If this value is not specified, all pools of the array will be used." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:355(title) -msgid "Initiator auto registration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:356(para) -msgid "When initiator_auto_registration=True, the driver will automatically register initiators to all working target ports of the VNX array during volume attaching (The driver will skip those initiators that have already been registered) if the option io_port_list is not specified in cinder.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:364(para) -msgid "If the user wants to register the initiators with some specific ports but not register with the other ports, this functionality should be disabled." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:369(para) -msgid "When a comma-separated list is given to io_port_list, the driver will only register the initiator to the ports specified in the list and only return target port(s) which belong to the target ports in the io_port_list instead of all target ports." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:378(para) -msgid "Example for FC ports:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:382(para) -msgid "a or B is Storage Processor, number 1 and 3 are Port ID." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:390(para) -msgid "Example for iSCSI ports:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:394(para) -msgid "a or B is Storage Processor, the first numbers 1 and 3 are Port ID and the second number 0 is Virtual Port ID" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:407(para) -msgid "Rather than de-registered, the registered ports will be simply bypassed whatever they are in 'io_port_list' or not." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:413(para) -msgid "The driver will raise an exception if ports in io_port_list are not existed in VNX during startup." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:423(title) -msgid "Force delete volumes in storage group" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:424(para) -msgid "Some available volumes may remain in storage group on the VNX array due to some OpenStack timeout issue. But the VNX array do not allow the user to delete the volumes which are in storage group. Option force_delete_lun_in_storagegroup is introduced to allow the user to delete the available volumes in this tricky situation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:433(para) -msgid "When force_delete_lun_in_storagegroup=True in the back-end section, the driver will move the volumes out of storage groups and then delete them if the user tries to delete the volumes that remain in storage group on the VNX array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:439(para) -msgid "The default value of force_delete_lun_in_storagegroup is False." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:446(title) -msgid "Over subscription in thin provisioning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:447(para) -msgid "Over subscription allows that the sum of all volumes' capacity (provisioned capacity) to be larger than the pool's total capacity." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:452(para) -msgid "max_over_subscription_ratio in the back-end section is the ratio of provisioned capacity over total capacity." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:457(para) -msgid "The default value of max_over_subscription_ratio is 20.0, which means the provisioned capacity can not exceed the total capacity. If the value of this ratio is set larger than 1.0, the provisioned capacity can exceed the total capacity." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:466(title) -msgid "Storage group automatic deletion" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:467(para) -msgid "For volume attaching, the driver has a storage group on VNX for each compute node hosting the vm instances which are going to consume VNX Block Storage (using compute node's hostname as storage group's name). All the volumes attached to the VM instances in a compute node will be put into the storage group. If destroy_empty_storage_group=True, the driver will remove the empty storage group after its last volume is detached. For data safety, it does not suggest to set destroy_empty_storage_group=True unless the VNX is exclusively managed by one Block Storage node because consistent lock_path is required for operation synchronization for this behavior." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:483(title) -msgid "Initiator auto deregistration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:484(para) -msgid "Enabling storage group automatic deletion is the precondition of this function. If initiator_auto_deregistration=True is set, the driver will deregister all the initiators of the host after its storage group is deleted." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:493(title) -msgid "FC SAN auto zoning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:494(para) -msgid "The EMC VNX FC driver supports FC SAN auto zoning when ZoneManager is configured. Set zoning_mode to fabric in DEFAULT section to enable this feature. For ZoneManager configuration, please refer to Block Storage official guide." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:503(title) -msgid "Volume number threshold" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:504(para) -msgid "In VNX, there is a limitation on the number of pool volumes that can be created in the system. When the limitation is reached, no more pool volumes can be created even if there is remaining capacity in the storage pool. In other words, if the scheduler dispatches a volume creation request to a back end that has free capacity but reaches the volume limitation, the creation fails." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:512(para) -msgid "The default value of check_max_pool_luns_threshold is False. When check_max_pool_luns_threshold=True, the pool-based back end will check the limit and will report 0 free capacity to the scheduler if the limit is reached. So the scheduler will be able to skip this kind of pool-based back end that runs out of the pool volume number." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:524(title) -msgid "iSCSI initiators" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:525(para) -msgid "iscsi_initiators is a dictionary of IP addresses of the iSCSI initiator ports on OpenStack Nova/Cinder nodes which want to connect to VNX via iSCSI. If this option is configured, the driver will leverage this information to find an accessible iSCSI target portal for the initiator when attaching volume. Otherwise, the iSCSI target portal will be chosen in a relative random way." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:535(emphasis) -msgid "This option is only valid for iSCSI driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:537(para) -msgid "Here is an example. VNX will connect host1 with 10.0.0.1 and 10.0.0.2. And it will connect host2 with 10.0.0.3." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:543(para) -msgid "The key name (like host1 in the example) should be the output of command hostname." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:550(title) -msgid "Default timeout" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:551(para) -msgid "Specify the timeout(minutes) for operations like LUN migration, LUN creation, etc. For example, LUN migration is a typical long running operation, which depends on the LUN size and the load of the array. An upper bound in the specific deployment can be set to avoid unnecessary long wait." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:558(para) -msgid "The default value for this option is infinite." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:567(title) -msgid "Max LUNs per storage group" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:568(para) -msgid "max_luns_per_storage_group specify the max number of LUNs in a storage group. Default value is 255. It is also the max value supportedby VNX." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:575(title) -msgid "Ignore pool full threshold" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:576(para) -msgid "if ignore_pool_full_threshold is set to True, driver will force LUN creation even if the full threshold of pool is reached. Default to False" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:587(para) -msgid "Extra specs are used in volume types created in cinder as the preferred property of the volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:591(para) -msgid "The Block storage scheduler will use extra specs to find the suitable back end for the volume and the Block storage driver will create the volume based on the properties specified by the extra spec." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:596(para) -msgid "Use following command to create a volume type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:600(para) -msgid "Use following command to update the extra spec of a volume type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:604(para) -msgid "Volume types can also be configured in OpenStack Horizon." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:607(para) -msgid "In VNX Driver, we defined several extra specs. They are introduced below:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:612(title) -msgid "Provisioning type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:615(para) -msgid "Key: provisioning:type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:620(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:720(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:787(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:824(para) -msgid "Possible Values:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:630(para) -msgid "Volume is fully provisioned." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:634(title) -msgid "creating a thick volume type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:645(para) -msgid "Volume is virtually provisioned" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:649(title) -msgid "creating a thin volume type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:656(literal) -msgid "deduplicated" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:660(para) -msgid "Volume is thin and deduplication is enabled. The administrator shall go to VNX to configure the system level deduplication settings. To create a deduplicated volume, the VNX Deduplication license must be activated on VNX, and specify deduplication_support=True to let Block Storage scheduler find the proper volume back end." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:670(title) -msgid "creating a deduplicated volume type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:677(literal) -msgid "compressed" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:681(para) -msgid "Volume is thin and compression is enabled. The administrator shall go to the VNX to configure the system level compression settings. To create a compressed volume, the VNX Compression license must be activated on VNX , and use compression_support=True to let Block Storage scheduler find a volume back end. VNX does not support creating snapshots on a compressed volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:691(title) -msgid "creating a compressed volume type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:697(para) -msgid "Default: thick" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:703(para) -msgid "provisioning:type replaces the old spec key storagetype:provisioning. The latter one will be obsoleted in the next release. If both provisioning:typeand storagetype:provisioning are set in the volume type, the value of provisioning:type will be used." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:712(title) -msgid "Storage tiering support" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:715(para) -msgid "Key: storagetype:tiering" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:726(literal) -msgid "StartHighThenAuto" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:731(literal) -msgid "Auto" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:736(literal) -msgid "HighestAvailable" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:741(literal) -msgid "LowestAvailable" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:746(literal) -msgid "NoMovement" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:752(para) -msgid "Default: StartHighThenAuto" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:757(para) -msgid "VNX supports fully automated storage tiering which requires the FAST license activated on the VNX. The OpenStack administrator can use the extra spec key storagetype:tiering to set the tiering policy of a volume and use the key fast_support='<is> True' to let Block Storage scheduler find a volume back end which manages a VNX with FAST license activated. Here are the five supported values for the extra spec key storagetype:tiering:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:768(title) -msgid "creating a volume types with tiering policy:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:773(para) -msgid "Tiering policy can not be applied to a deduplicated volume. Tiering policy of the deduplicated LUN align with the settings of the pool." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:782(para) -msgid "Key: fast_cache_enabled" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:804(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:841(para) -msgid "Default: False" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:809(para) -msgid "VNX has FAST Cache feature which requires the FAST Cache license activated on the VNX. Volume will be created on the backend with FAST cache enabled when True is specified." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:816(title) -msgid "Snap-copy" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:819(para) -msgid "Key: copytype:snap" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:846(para) -msgid "The VNX driver supports snap-copy, which extremely accelerates the process for creating a copied volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:850(para) -msgid "By default, the driver will do full data copy when creating a volume from a snapshot or cloning a volume, which is time-consuming especially for large volumes. When the snap-copy is used, the driver will simply create a snapshot and mount it as a volume for the 2 kinds of operations which will be instant even for large volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:858(para) -msgid "To enable this functionality, the source volume should have copytype:snap=True in the extra specs of its volume type. Then the new volume cloned from the source or copied from the snapshot for the source, will be in fact a snap-copy instead of a full copy. If a full copy is needed, retype/migration can be used to convert the snap-copy volume to a full-copy volume which may be time-consuming." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:869(para) -msgid "User can determine whether the volume is a snap-copy volume or not by showing its metadata. If the 'lun_type' in metadata is 'smp', the volume is a snap-copy volume. Otherwise, it is a full-copy volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:877(emphasis) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:986(emphasis) -msgid "Constraints:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:881(para) -msgid "copytype:snap=True is not allowed in the volume type of a consistency group." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:887(para) -msgid "Clone and snapshot creation are not allowed on a copied volume created through the snap-copy before it is converted to a full copy." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:894(para) -msgid "The number of snap-copy volume created from a source volume is limited to 255 at one point in time." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:900(para) -msgid "The source volume which has snap-copy volume can not be deleted." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:908(title) -msgid "Pool name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:911(para) -msgid "Key: pool_name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:916(para) -msgid "Possible Values: name of the storage pool managed by cinder" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:921(para) -msgid "Default: None" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:926(para) -msgid "If the user wants to create a volume on a certain storage pool in a backend that manages multiple pools, a volume type with a extra spec specified storage pool should be created first, then the user can use this volume type to create the volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:933(title) -msgid "Creating the volume type:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:939(title) -msgid "Obsoleted extra specs in Liberty" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:940(para) -msgid "Please avoid using following extra spec keys." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:946(literal) -msgid "storagetype:provisioning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:951(literal) -msgid "storagetype:pool" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:958(title) -msgid "Advanced features" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:960(title) -msgid "Read-only volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:961(para) -msgid "OpenStack supports read-only volumes. The following command can be used to set a volume as read-only." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:966(para) -msgid "After a volume is marked as read-only, the driver will forward the information when a hypervisor is attaching the volume and the hypervisor will make sure the volume is read-only." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:973(title) -msgid "Efficient non-disruptive volume backup" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:974(para) -msgid "The default implementation in Cinder for non-disruptive volume backup is not efficient since a cloned volume will be created during backup." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:979(para) -msgid "The approach of efficient backup is to create a snapshot for the volume and connect this snapshot (a mount point in VNX) to the Cinder host for volume backup. This eliminates migration time involved in volume clone." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:990(para) -msgid "Backup creation for a snap-copy volume is not allowed if the volume status is in-use since snapshot cannot be taken from this volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1000(title) -msgid "Best practice" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1002(title) -msgid "Multipath setup" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1003(para) -msgid "Enabling multipath volume access is recommended for robust data access. The major configuration includes:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1009(para) -msgid "Install multipath-tools, sysfsutils and sg3-utils on nodes hosting Nova-Compute and Cinder-Volume services (Please check the operating system manual for the system distribution for specific installation steps. For Red Hat based distributions, they should be device-mapper-multipath, sysfsutils and sg3_utils)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1022(para) -msgid "Specify use_multipath_for_image_xfer=true in cinder.conf for each FC/iSCSI back end." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1028(para) -msgid "Specify iscsi_use_multipath=True in libvirt section of nova.conf. This option is valid for both iSCSI and FC driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1036(para) -msgid "For multipath-tools, here is an EMC recommended sample of /etc/multipath.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1040(para) -msgid "user_friendly_names is not specified in the configuration and thus it will take the default value no. It is NOT recommended to set it to yes because it may fail operations such as VM live migration." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1082(para) -msgid "When multipath is used in OpenStack, multipath faulty devices may come out in Nova-Compute nodes due to different issues (Bug 1336683 is a typical example)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1087(para) -msgid "A solution to completely avoid faulty devices has not been found yet. faulty_device_cleanup.py mitigates this issue when VNX iSCSI storage is used. Cloud administrators can deploy the script in all Nova-Compute nodes and use a CRON job to run the script on each Nova-Compute node periodically so that faulty devices will not stay too long. Please refer to: VNX faulty device cleanup for detailed usage and the script." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1100(title) -msgid "Restrictions and limitations" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1102(title) -msgid "iSCSI port cache" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1103(para) -msgid "EMC VNX iSCSI driver caches the iSCSI ports information, so that the user should restart the cinder-volume service or wait for seconds (which is configured by periodic_interval in cinder.conf) before any volume attachment operation after changing the iSCSI port configurations. Otherwise the attachment may fail because the old iSCSI port configurations were used." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1115(title) -msgid "No extending for volume with snapshots" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1116(para) -msgid "VNX does not support extending the thick volume which has a snapshot. If the user tries to extend a volume which has a snapshot, the status of the volume would change to error_extending." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1124(title) -msgid "Limitations for deploying cinder on computer node" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1125(para) -msgid "It is not recommended to deploy the driver on a compute node if cinder upload-to-image --force True is used against an in-use volume. Otherwise, cinder upload-to-image --force True will terminate the data access of the vm instance to the volume." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1134(title) -msgid "Storage group with host names in VNX" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1135(para) -msgid "When the driver notices that there is no existing storage group that has the host name as the storage group name, it will create the storage group and also add the compute node's or Block Storage nodes' registered initiators into the storage group." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1141(para) -msgid "If the driver notices that the storage group already exists, it will assume that the registered initiators have also been put into it and skip the operations above for better performance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1146(para) -msgid "It is recommended that the storage administrator does not create the storage group manually and instead relies on the driver for the preparation. If the storage administrator needs to create the storage group manually for some special requirements, the correct registered initiators should be put into the storage group as well (otherwise the following volume attaching operations will fail )." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1156(title) -msgid "EMC storage-assisted volume migration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1157(para) -msgid "EMC VNX driver supports storage-assisted volume migration, when the user starts migrating with cinder migrate --force-host-copy False <volume_id> <host> or cinder migrate <volume_id> <host>, cinder will try to leverage the VNX's native volume migration functionality." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1166(para) -msgid "In following scenarios, VNX storage-assisted volume migration will not be triggered:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1172(para) -msgid "Volume migration between back ends with different storage protocol, ex, FC and iSCSI." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1178(para) -msgid "Volume is to be migrated across arrays." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1186(title) -msgid "Appendix" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1188(title) -msgid "Authenticate by security file" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1189(para) -msgid "VNX credentials are necessary when the driver connects to the VNX system. Credentials in global, local and ldap scopes are supported. There are two approaches to provide the credentials:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1194(para) -msgid "The recommended one is using the Navisphere CLI security file to provide the credentials which can get rid of providing the plain text credentials in the configuration file. Following is the instruction on how to do this." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1202(para) -msgid "Find out the Linux user id of the cinder-volume processes. Assuming the service cinder-volume is running by the account cinder." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1210(para) -msgid "Run su as root user." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1215(para) -msgid "In /etc/passwd, change cinder:x:113:120::/var/lib/cinder:/bin/false to cinder:x:113:120::/var/lib/cinder:/bin/bash (This temporary change is to make step 4 work.)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1224(para) -msgid "Save the credentials on behave of cinder user to a security file (assuming the array credentials are admin/admin in global scope). In the command below, the '-secfilepath' switch is used to specify the location to save the security file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1234(para) -msgid "Change cinder:x:113:120::/var/lib/cinder:/bin/bash back to cinder:x:113:120::/var/lib/cinder:/bin/false in /etc/passwd" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1243(para) -msgid "Remove the credentials options san_login, san_password and storage_vnx_authentication_type from cinder.conf. (normally it is /etc/cinder/cinder.conf). Add option storage_vnx_security_file_dir and set its value to the directory path of your security file generated in step 4. Omit this option if -secfilepath is not used in step 4." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1256(para) -msgid "Restart the cinder-volume service to validate the change." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1264(title) -msgid "Register FC port with VNX" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1265(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1326(para) -msgid "This configuration is only required when initiator_auto_registration=False." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1269(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1330(para) -msgid "To access VNX storage, the compute nodes should be registered on VNX first if initiator auto registration is not enabled." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1273(para) -msgid "To perform \"Copy Image to Volume\" and \"Copy Volume to Image\" operations, the nodes running the cinder-volume service (Block Storage nodes) must be registered with the VNX as well." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1280(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1340(para) -msgid "The steps mentioned below are for the compute nodes. Please follow the same steps for the Block Storage nodes also (The steps can be skipped if initiator auto registration is enabled)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1287(para) -msgid "Assume 20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2 is the WWN of a FC initiator port name of the compute node whose hostname and IP are myhost1 and 10.10.61.1. Register 20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2 in Unisphere:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1294(para) -msgid "Login to Unisphere, go to FNM0000000000->Hosts->Initiators." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1297(para) -msgid "Refresh and wait until the initiator 20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2 with SP Port A-1 appears." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1300(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1381(para) -msgid "Click the Register button, select CLARiiON/VNX and enter the hostname (which is the output of the linux command hostname) and IP address:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1306(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1387(para) -msgid "Hostname : myhost1" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1309(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1390(para) -msgid "IP : 10.10.61.1" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1312(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1393(para) -msgid "Click Register" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1316(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1397(para) -msgid "Then host 10.10.61.1 will appear under Hosts->Host List as well." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1321(para) -msgid "Register the wwn with more ports if needed." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1325(title) -msgid "Register iSCSI port with VNX" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1334(para) -msgid "To perform \"Copy Image to Volume\" and \"Copy Volume to Image\" operations, the nodes running the cinder-volume service (Block Storage nodes) must be registered with the VNX as well." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1346(para) -msgid "On the compute node with IP address 10.10.61.1 and hostname myhost1, execute the following commands (assuming 10.10.61.35 is the iSCSI target):" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1351(para) -msgid "Start the iSCSI initiator service on the node" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1354(para) -msgid "Discover the iSCSI target portals on VNX" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1357(para) -msgid "Enter /etc/iscsi" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1360(para) -msgid "Find out the iqn of the node" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1365(para) -msgid "Login to VNX from the compute node using the target corresponding to the SPA port:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1370(para) -msgid "Assume iqn.1993-08.org.debian:01:1a2b3c4d5f6g is the initiator name of the compute node. Register iqn.1993-08.org.debian:01:1a2b3c4d5f6g in Unisphere:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1375(para) -msgid "Login to Unisphere, go to FNM0000000000->Hosts->Initiators ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1378(para) -msgid "Refresh and wait until the initiator iqn.1993-08.org.debian:01:1a2b3c4d5f6g with SP Port A-8v0 appears." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1402(para) ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1413(para) -msgid "Logout iSCSI on the node:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1406(para) -msgid "Login to VNX from the compute node using the target corresponding to the SPB port:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1411(para) -msgid "In Unisphere register the initiator with the SPB port." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vnx-driver.xml:1417(para) -msgid "Register the iqn with more ports if needed." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:6(title) -msgid "Quobyte driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:8(para) -msgid "The Quobyte volume driver enables storing Block Storage service volumes on a Quobyte storage back end. Block Storage service back ends are mapped to Quobyte volumes and individual Block Storage service volumes are stored as files on a Quobyte volume. Selection of the appropriate Quobyte volume is done by the aforementioned back end configuration that specifies the Quobyte volume explicitely." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:16(para) -msgid "Note the dual use of the term 'volume' in the context of Block Storage service volumes and in the context of Quobyte volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:20(para) -msgid "For more information see the Quobyte support webpage ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:24(para) -msgid "The Quobyte volume driver supports the following volume operations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:27(para) -msgid "Create, delete, attach, and detach volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:29(para) -msgid "Secure NAS operation (Starting with Mitaka release secure NAS operation is optional but still default)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:35(para) -msgid "Copy a volume to image" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:36(para) -msgid "Generic volume migration (no back end optimization)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:41(para) -msgid "When running VM instances off Quobyte volumes, ensure that the Quobyte Compute service driver has been configured in your OpenStack cloud." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:46(para) -msgid "To activate the Quobyte volume driver, configure the corresponding volume_driver parameter: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/quobyte-driver.xml:51(para) -msgid "The following table contains the configuration options supported by the Quobyte driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:6(title) -msgid "Sheepdog driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:7(para) -msgid "Sheepdog is an open-source distributed storage system that provides a virtual storage pool utilizing internal disk of commodity servers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:10(para) -msgid "Sheepdog scales to several hundred nodes, and has powerful virtual disk management features like snapshotting, cloning, rollback, and thin provisioning." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:13(para) -msgid "More information can be found on Sheepdog Project." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:15(para) -msgid "This driver enables the use of Sheepdog through Qemu/KVM." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:19(para) -msgid "Sheepdog driver supports these operations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:47(para) -msgid "Set the following volume_driver in cinder.conf:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/sheepdog-driver.xml:51(para) -msgid "The following table contains the configuration options supported by the Sheepdog driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:7(title) -msgid "EMC XtremIO Block Storage driver configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:9(para) -msgid "The high performance XtremIO All Flash Array (AFA) offers Block Storage services to OpenStack. Using the driver, OpenStack Block Storage hosts can connect to an XtermIO Storage cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:11(para) -msgid "This section explains how to configure and connect an OpenStack block storage host to an XtremIO storage cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:16(para) -msgid "Xtremapp: Version 3.0 and 4.0" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:42(para) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:38(para) -msgid "Manage and unmanage a volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:50(title) -msgid "XtremIO Block Storage driver configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:51(para) -msgid "Edit the cinder.conf file by adding the configuration below under the [DEFAULT] section of the file in case of a single back end or under a separate section in case of multiple back ends (for example [XTREMIO]). The configuration file is usually located under the following path /etc/cinder/cinder.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:55(para) -msgid "For a configuration example, refer to the configuration example." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:57(title) -msgid "XtremIO driver name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:61(para) -msgid "For iSCSI volume_driver = cinder.volume.drivers.emc.xtremio.XtremIOIscsiDriver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:64(para) -msgid "For Fibre Channel volume_driver = cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:69(title) -msgid "XtremIO management server (XMS) IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:70(para) -msgid "To retrieve the management IP, use the CLI command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:71(para) -msgid "Configure the management IP by adding the following parameter: san_ip = XMS Management IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:75(title) -msgid "XtremIO cluster name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:76(para) -msgid "In XtremIO version 4.0, a single XMS can manage multiple cluster back ends. In such setups, the administrator is required to specify the cluster name (in addition to the XMS IP). Each cluster must be defined as a separate back end." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:78(para) -msgid "To retrieve the Cluster Name, run the CLI command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:79(para) -msgid "Configure the cluster name by adding the xtremio_cluster_name = Cluster-Name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:80(para) -msgid "When a single cluster is managed in XtremIO version 4.0, the cluster name is not required." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:83(title) -msgid "XtremIO user credentials" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:84(para) -msgid "OpenStack Block Storage requires an XtremIO XMS user with administrative privileges. XtremIO recommends creating a dedicated OpenStack user account that holds an administrative user role." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:86(para) -msgid "Refer to the XtremIO User Guide for details on user account management" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:87(para) -msgid "Create an XMS account using either the XMS GUI or the CLI command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:89(replaceable) -msgid "XMS username" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:90(replaceable) -msgid "XMS username password" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:100(title) -msgid "Setting thin provisioning and multipathing parameters" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:101(para) -msgid "To support thin provisioning and multipathing in the XtremIO Array, the following parameters from the Nova and Cinder configuration files should be modified as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:104(para) -msgid "Thin Provisioning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:105(para) -msgid "All XtremIO volumes are thin provisioned. The default value of 20 should be maintained for the max_over_subscription_ratio parameter." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:107(para) -msgid "The use_cow_images parameter in thenova.conffile should be set to False as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:109(code) -msgid "use_cow_images = false" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:111(para) -msgid "Multipathing" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:112(para) -msgid "The use_multipath_for_image_xfer parameter in thecinder.conf file should be set to True as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:114(code) -msgid "use_multipath_for_image_xfer = true" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:124(title) -msgid "Configuring CHAP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:125(para) -msgid "The XtremIO Block Storage driver supports CHAP initiator authentication. If CHAP initiator authentication is required, set the CHAP Authentication mode to initiator." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:127(para) -msgid "To set the CHAP initiator mode using CLI, run the following CLI command:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:129(para) -msgid "The CHAP initiator mode can also be set via the XMS GUI" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:130(para) -msgid "Refer to XtremIO User Guide for details on CHAP configuration via GUI and CLI." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:131(para) -msgid "The CHAP initiator authentication credentials (username and password) are generated automatically by the Block Storage driver. Therefore, there is no need to configure the initial CHAP credentials manually in XMS." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:145(replaceable) -msgid "XMS_IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:146(replaceable) -msgid "Cluster01" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:147(replaceable) -msgid "XMS_USER" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:148(replaceable) -msgid "XMS_PASSWD" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-xtremio-driver.xml:149(replaceable) -msgid "XtremIOAFA" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:4(title) -msgid "IBM GPFS volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:5(para) -msgid "IBM General Parallel File System (GPFS) is a cluster file system that provides concurrent access to file systems from multiple nodes. The storage provided by these nodes can be direct attached, network attached, SAN attached, or a combination of these methods. GPFS provides many features beyond common data access, including data replication, policy based storage management, and space efficient file snapshot and clone operations." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:14(title) -msgid "How the GPFS driver works" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:15(para) -msgid "The GPFS driver enables the use of GPFS in a fashion similar to that of the NFS driver. With the GPFS driver, instances do not actually access a storage device at the block level. Instead, volume backing files are created in a GPFS file system and mapped to instances, which emulate a block device." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:23(para) -msgid "GPFS software must be installed and running on nodes where Block Storage and Compute services run in the OpenStack environment. A GPFS file system must also be created and mounted on these nodes before starting the cinder-volume service. The details of these GPFS specific steps are covered in GPFS: Concepts, Planning, and Installation Guide and GPFS: Administration and Programming Reference." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:35(para) -msgid "Optionally, the Image Service can be configured to store images on a GPFS file system. When a Block Storage volume is created from an image, if both image data and volume data reside in the same GPFS file system, the data from image file is moved efficiently to the volume file using copy-on-write optimization strategy." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:43(title) -msgid "Enable the GPFS driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:44(para) -msgid "To use the Block Storage service with the GPFS driver, first set the volume_driver in cinder.conf:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:48(para) -msgid "The following table contains the configuration options supported by the GPFS driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:53(para) -msgid "The gpfs_images_share_mode flag is only valid if the Image Service is configured to use GPFS with the gpfs_images_dir flag. When the value of this flag is copy_on_write, the paths specified by the gpfs_mount_point_base and gpfs_images_dir flags must both reside in the same GPFS file system and in the same GPFS file set." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:66(title) -msgid "Volume creation options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:67(para) -msgid "It is possible to specify additional volume configuration options on a per-volume basis by specifying volume metadata. The volume is created using the specified options. Changing the metadata after the volume is created has no effect. The following table lists the volume creation options supported by the GPFS volume driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:75(caption) -msgid "Volume Create Options for GPFS Volume Drive" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:79(th) -msgid "Metadata Item Name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:85(literal) ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:98(literal) ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:99(literal) -msgid "fstype" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:88(literal) -msgid "fstype=swap" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:86(td) -msgid "Specifies whether to create a file system or a swap area on the new volume. If is specified, the mkswap command is used to create a swap area. Otherwise the mkfs command is passed the specified file system type, for example ext3, ext4 or ntfs." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:96(literal) -msgid "fslabel" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:97(td) -msgid "Sets the file system label for the file system specified by option. This value is only used if is specified." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:103(literal) -msgid "data_pool_name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:105(para) -msgid "Specifies the GPFS storage pool to which the volume is to be assigned. Note: The GPFS storage pool must already have been created." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:111(literal) -msgid "replicas" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:113(para) -msgid "Specifies how many copies of the volume file to create. Valid values are 1, 2, and, for GPFS V3.5.0.7 and later, 3. This value cannot be greater than the value of the MaxDataReplicas attribute of the file system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:122(literal) -msgid "dio" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:124(para) -msgid "Enables or disables the Direct I/O caching policy for the volume file. Valid values are yes and no." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:130(literal) -msgid "write_affinity_depth" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:132(para) -msgid "Specifies the allocation policy to be used for the volume file. Note: This option only works if allow-write-affinity is set for the GPFS data pool." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:139(literal) -msgid "block_group_factor" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:141(para) -msgid "Specifies how many blocks are laid out sequentially in the volume file to behave as a single large block. Note: This option only works if allow-write-affinity is set for the GPFS data pool." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:149(literal) -msgid "write_affinity_failure_group" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:151(para) -msgid "Specifies the range of nodes (in GPFS shared nothing architecture) where replicas of blocks in the volume file are to be written. See GPFS: Administration and Programming Reference for more details on this option." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:162(title) -msgid "Example: Volume creation options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:163(para) -msgid "This example shows the creation of a 50GB volume with an ext4 file system labeled newfs and direct IO enabled:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:169(title) -msgid "Operational notes for GPFS driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:172(para) -msgid "Volume snapshots are implemented using the GPFS file clone feature. Whenever a new snapshot is created, the snapshot file is efficiently created as a read-only clone parent of the volume, and the volume file uses copy-on-write optimization strategy to minimize data movement." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-gpfs-volume-driver.xml:178(para) -msgid "Similarly when a new volume is created from a snapshot or from an existing volume, the same approach is taken. The same approach is also used when a new volume is created from an Image Service image, if the source image is in raw format, and gpfs_images_share_mode is set to copy_on_write." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:5(title) -msgid "IBM FlashSystem volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:6(para) -msgid "The volume driver for FlashSystem provides OpenStack Block Storage hosts with access to IBM FlashSystems." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:9(title) -msgid "Configure FlashSystem" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:11(title) -msgid "Configure storage array" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:12(para) -msgid "The volume driver requires a pre-defined array. Users need to create an array on FlashSystem before using the volume driver. An existing array also can be used and existing data will not be deleted." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:17(para) -msgid "FlashSystem can only create one array, so no configuration option is needed for the IBM FlashSystem driver to assign it." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:25(para) -msgid "The driver requires access to the FlashSystem management interface using SSH. It should be provided with the FlashSystem management IP using the san_ip flag, and the management port should be provided by the san_ssh_port flag. By default, the port value is configured to be port 22 (SSH)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:33(para) -msgid "Make sure the compute node running the cinder-volume driver has SSH network access to the storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:38(para) -msgid "Using password authentication, assign a password to the user on the FlashSystem. See the driver configuration flags for the user and password in or ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:47(title) -msgid "IBM FlashSystem FC driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:49(title) -msgid "Data Path configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:50(para) -msgid "Using Fiber Channel (FC), each FlashSystem node should have at least one WWPN port configured. If the flashsystem_multipath_enabled flag is set to True in the Cinder configuration file, the driver uses all available WWPNs to attach the volume to the instance (details about the configuration flags appear in section \"Enable IBM FlashSystem FC driver\"). If the flag is not set, the driver uses the WWPN associated with the volume's preferred node (if available), otherwise it uses the first available WWPN of the system. The driver obtains the WWPNs directly from the storage system. You do not need to provide these WWPNs to the driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:65(para) -msgid "Using FC, ensure that the block storage hosts have FC connectivity to the FlashSystem." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:70(title) -msgid "Enable IBM FlashSystem FC driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:71(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:172(para) -msgid "Set the volume driver to the FlashSystem driver by setting the volume_driver option in configuration file cinder.conf as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:76(para) -msgid "To enable IBM FlashSystem FC driver, configure the following options in configuration file cinder.conf:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:80(caption) -msgid "List of configuration flags for IBM FlashSystem FC driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:110(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:211(para) -msgid "Management login user name" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:119(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:220(literal) -msgid "flashsystem_connection_protocol" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:122(para) -msgid "Connection protocol should be set to FC" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:125(literal) -msgid "flashsystem_multipath_enabled" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:128(para) -msgid "Enable multipath for FC connections" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:131(literal) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:228(literal) -msgid "flashsystem_multihost_enabled" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:138(para) ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:235(para) -msgid "This option allows the driver to map a vdisk to more than one host at a time. This scenario occurs during migration of a virtual machine with an attached volume; the volume is simultaneously mapped to both the source and destination compute hosts. If your deployment does not require attaching vdisks to multiple hosts, setting this flag to False will provide added safety." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:157(title) -msgid "IBM FlashSystem iSCSI driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:160(para) -msgid "Using iSCSI, each FlashSystem node should have at least one iSCSI port configured. iSCSI IP addresses of IBM FlashSystem can be obtained by FlashSystem GUI or CLI. Please refer to the redbook of FlashSystem." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:166(para) -msgid "Using iSCSI, ensure that the compute nodes have iSCSI network access to IBM FlashSystem." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:171(title) -msgid "Enable IBM FlashSystem iSCSI driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:177(para) -msgid "To enable IBM FlashSystem iSCSI driver, configure the following options in configuration file cinder.conf:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:181(caption) -msgid "List of configuration flags for IBM FlashSystem iSCSI driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:224(para) -msgid "Connection protocol should be set to iSCSI" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:250(literal) -msgid "iscsi_ip_address" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:256(para) -msgid "On cluster of FlashSystem, iSCSI IP address column is the seventh column IP_address of the output of lsportip." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:254(para) -msgid "Set to one of the iSCSI IP addresses obtained by FlashSystem GUI or CLI " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:263(literal) -msgid "flashsystem_iscsi_portid" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:269(para) -msgid "On cluster of FlashSystem, port ID column is the first column id of the output of lsportip, not the sixth column port_id." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:267(para) -msgid "Set to the id of the iscsi_ip_address obtained by FlashSystem GUI or CLI " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:281(title) -msgid "Limitation and known issues" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:284(para) -msgid "IBM FlashSystem only works when:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-flashsystem-volume-driver.xml:290(para) -msgid "These operations are supported:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:6(title) -msgid "Violin Memory 7000 Series FSP volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:7(para) -msgid "The OpenStack V7000 driver package from Violin Memory adds block storage service support for Violin 7300 Flash Storage Platforms (FSPs) and 7700 FSP controllers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:12(para) -msgid "The driver package release can be used with any OpenStack Liberty deployment for all 7300 FSPs and 7700 FSP controllers running Concerto 7.5.3 and later using Fibre Channel HBAs." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:26(para) -msgid "Concerto OS version 7.5.3 or later" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:29(para) -msgid "Fibre channel host interfaces" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:23(para) -msgid "Violin 7300/7700 series FSP with: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:35(para) -msgid "The Violin block storage driver: This driver implements the block storage API calls. The driver is included with the OpenStack Liberty release." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:42(para) -msgid "The vmemclient library: This is the Violin Array Communications library to the Flash Storage Platform through a REST-like interface. The client can be installed using the python 'pip' installer tool. Further information on vmemclient can be found on PyPI ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:84(para) -msgid "Listed operations are supported for thick, thin, and dedup luns, with the exception of cloning. Cloning operations are supported only on thick luns." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:94(para) -msgid "Once the array is configured per the installation guide, it is simply a matter of editing the cinder configuration file to add or modify the parameters. The driver currently only supports fibre channel configuration." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:103(para) -msgid "Set the following in your cinder.conf configuration file, replacing the variables using the guide in the following section:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:111(replaceable) ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:126(term) -msgid "VMEM_CAPABILITIES" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:128(para) -msgid "User defined capabilities, a JSON formatted string specifying key/value pairs (string value). The ones particularly supported are dedup and thin. Only these two capabilities are listed here in cinder.conf, indicating this backend be selected for creating luns which have a volume type associated with them that have 'dedup' or 'thin' extra_specs specified. For example, if the FSP is configured to support dedup luns, set the associated driver capabilities to: {\"dedup\":\"True\",\"thin\":\"True\"}." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:144(para) -msgid "External IP address or hostname of the Violin 7300 Memory Gateway. This can be an IP address or hostname." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/violin-v7000-driver.xml:154(para) -msgid "Log-in user name for the Violin 7300 Memory Gateway or 7700 FSP controller. This user must have administrative rights on the array or controller." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:8(title) -msgid "EMC VMAX iSCSI and FC drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:9(para) -msgid "The EMC VMAX drivers, EMCVMAXISCSIDriver and EMCVMAXFCDriver, support the use of EMC VMAX storage arrays under OpenStack Block Storage. They both provide equivalent functions and differ only in support for their respective host attachment methods." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:14(para) -msgid "The drivers perform volume operations by communicating with the backend VMAX storage. It uses a CIM client in Python called PyWBEM to perform CIM operations over HTTP." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:17(para) -msgid "The EMC CIM Object Manager (ECOM) is packaged with the EMC SMI-S provider. It is a CIM server that enables CIM clients to perform CIM operations over HTTP by using SMI-S in the back-end for VMAX storage operations." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:21(para) -msgid "The EMC SMI-S Provider supports the SNIA Storage Management Initiative (SMI), an ANSI standard for storage management. It supports the VMAX storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:26(para) -msgid "EMC SMI-S Provider V4.6.2.8 and higher is required. You can download SMI-S from the EMC's support web site (login is required). See the EMC SMI-S Provider release notes for installation instructions." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:32(para) -msgid "EMC storage VMAX Family is supported." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:36(para) -msgid "VMAX drivers support these operations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:63(para) -msgid "VMAX drivers also support the following features:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:66(para) -msgid "FAST automated storage tiering policy." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:69(para) -msgid "Dynamic masking view creation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:72(para) -msgid "Striped volume creation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:78(title) -msgid "Set up the VMAX drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:80(title) -msgid "To set up the EMC VMAX drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:82(para) -msgid "Install the python-pywbem package for your distribution. See ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:87(para) -msgid "Download SMI-S from PowerLink and install it. Add your VMAX arrays to SMI-S." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:89(para) -msgid "For information, see and the SMI-S release notes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:94(para) -msgid "Change configuration files. See and ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:99(para) -msgid "Configure connectivity. For FC driver, see . For iSCSI driver, see ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:106(title) -msgid "Install the python-pywbem package" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:108(para) -msgid "Install the python-pywbem package for your distribution, as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:112(para) -msgid "On Ubuntu:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:116(para) -msgid "On openSUSE:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:120(para) -msgid "On Red Hat Enterprise Linux, CentOS, and Fedora:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:126(title) -msgid "Set up SMI-S" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:127(para) -msgid "You can install SMI-S on a non-OpenStack host. Supported platforms include different flavors of Windows, Red Hat, and SUSE Linux. SMI-S can be installed on a physical server or a VM hosted by an ESX server. Note that the supported hypervisor for a VM running SMI-S is ESX only. See the EMC SMI-S Provider release notes for more information on supported platforms and installation instructions." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:137(para) -msgid "You must discover storage arrays on the SMI-S server before you can use the VMAX drivers. Follow instructions in the SMI-S release notes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:142(para) -msgid "SMI-S is usually installed at /opt/emc/ECIM/ECOM/bin on Linux and C:\\Program Files\\EMC\\ECIM\\ECOM\\bin on Windows. After you install and configure SMI-S, go to that directory and type ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:149(para) -msgid "Use in to add an array. Use and examine the output after the array is added. Make sure that the arrays are recognized by the SMI-S server before using the EMC VMAX drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:157(title) -msgid "cinder.conf configuration file" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:159(para) -msgid "Make the following changes in /etc/cinder/cinder.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:161(para) -msgid "Add the following entries, where 10.10.61.45 is the IP address of the VMAX iSCSI target:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:174(para) -msgid "In this example, two backend configuration groups are enabled: CONF_GROUP_ISCSI and CONF_GROUP_FC. Each configuration group has a section describing unique parameters for connections, drivers, the volume_backend_name, and the name of the EMC-specific configuration file containing additional settings. Note that the file name is in the format /etc/cinder/cinder_emc_config_[confGroup].xml." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:184(para) -msgid "Once the cinder.conf and EMC-specific configuration files have been created, cinder commands need to be issued in order to create and associate OpenStack volume types with the declared volume_backend_names:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:193(para) -msgid "By issuing these commands, the Block Storage volume type VMAX_ISCSI is associated with the ISCSI_backend, and the type VMAX_FC is associated with the FC_backend." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:197(para) -msgid "Restart the cinder-volume service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:201(title) -msgid "cinder_emc_config_CONF_GROUP_ISCSI.xml configuration file" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:203(para) -msgid "Create the /etc/cinder/cinder_emc_config_CONF_GROUP_ISCSI.xml file. You do not need to restart the service for this change." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:207(para) -msgid "Add the following lines to the XML file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:222(para) -msgid "Where:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:225(para) -msgid "EcomServerIp and EcomServerPort are the IP address and port number of the ECOM server which is packaged with SMI-S." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:231(para) -msgid "EcomUserName and EcomPassword are credentials for the ECOM server." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:236(para) -msgid "PortGroups supplies the names of VMAX port groups that have been pre-configured to expose volumes managed by this backend. Each supplied port group should have sufficient number and distribution of ports (across directors and switches) as to ensure adequate bandwidth and failure protection for the volume connections. PortGroups can contain one or more port groups of either iSCSI or FC ports. When a dynamic masking view is created by the VMAX driver, the port group is chosen randomly from the PortGroup list, to evenly distribute load across the set of groups provided. Make sure that the PortGroups set contains either all FC or all iSCSI port groups (for a given backend), as appropriate for the configured driver (iSCSI or FC)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:258(para) -msgid "The Array tag holds the unique VMAX array serial number." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:262(para) -msgid "The Pool tag holds the unique pool name within a given array. For backends not using FAST automated tiering, the pool is a single pool that has been created by the administrator. For backends exposing FAST policy automated tiering, the pool is the bind pool to be used with the FAST policy." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:271(para) -msgid "The FastPolicy tag conveys the name of the FAST Policy to be used. By including this tag, volumes managed by this backend are treated as under FAST control. Omitting the FastPolicy tag means FAST is not enabled on the provided storage pool." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:282(title) -msgid "FC Zoning with VMAX" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:283(para) -msgid "Zone Manager is recommended when using the VMAX FC driver, especially for larger configurations where pre-zoning would be too complex and open-zoning would raise security concerns." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:289(title) -msgid "iSCSI with VMAX" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:292(para) -msgid "Make sure the iscsi-initiator-utils package is installed on the host (use apt-get, zypper, or yum, depending on Linux flavor)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:298(para) -msgid "Verify host is able to ping VMAX iSCSI target ports." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:305(title) -msgid "VMAX masking view and group naming info" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:307(title) -msgid "Masking view names" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:308(para) -msgid "Masking views are dynamically created by the VMAX FC and iSCSI drivers using the following naming conventions:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:315(title) -msgid "Initiator group names" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:316(para) -msgid "For each host that is attached to VMAX volumes using the drivers, an initiator group is created or re-used (per attachment type). All initiators of the appropriate type known for that host are included in the group. At each new attach volume operation, the VMAX driver retrieves the initiators (either WWNNs or IQNs) from OpenStack and adds or updates the contents of the Initiator Group as required. Names are of the following format:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:327(para) -msgid "Hosts attaching to VMAX storage managed by the OpenStack environment cannot also be attached to storage on the same VMAX not being managed by OpenStack. This is due to limitations on VMAX Initiator Group membership." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:335(title) -msgid "FA port groups" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:336(para) -msgid "VMAX array FA ports to be used in a new masking view are chosen from the list provided in the EMC configuration file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:341(title) -msgid "Storage group names" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:342(para) -msgid "As volumes are attached to a host, they are either added to an existing storage group (if it exists) or a new storage group is created and the volume is then added. Storage groups contain volumes created from a pool (either single-pool or FAST-controlled), attached to a single host, over a single connection type (iSCSI or FC). Names are formed:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:354(title) -msgid "Concatenated or striped volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:355(para) -msgid "In order to support later expansion of created volumes, the VMAX Block Storage drivers create concatenated volumes as the default layout. If later expansion is not required, users can opt to create striped volumes in order to optimize I/O performance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/emc-vmax-driver.xml:360(para) -msgid "Below is an example of how to create striped volumes. First, create a volume type. Then define the extra spec for the volume type storagetype:stripecount representing the number of meta members in the striped volume. The example below means that each volume created under the GoldStriped volume type will be striped and made up of 4 meta members." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-xiv-volume-driver.xml:5(title) -msgid "IBM XIV and DS8000 volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-xiv-volume-driver.xml:7(para) -msgid "The IBM Storage Driver for OpenStack is a Block Storage driver that supports IBM XIV and IBM DS8000 storage systems over Fiber channel and iSCSI." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-xiv-volume-driver.xml:12(para) -msgid "Set the following in your cinder.conf, and use the following options to configure it." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-xiv-volume-driver.xml:20(para) -msgid "To use the IBM Storage Driver for OpenStack you must download and install the package available at: http://www.ibm.com/support/fixcentral/swg/selectFixes?parent=Enterprise%2BStorage%2BServers&product=ibm/Storage_Disk/XIV+Storage+System+%282810,+2812%29&release=All&platform=All&function=all" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ibm-xiv-volume-driver.xml:27(para) -msgid "For full documentation refer to IBM's online documentation available at http://pic.dhe.ibm.com/infocenter/strhosts/ic/topic/com.ibm.help.strghosts.doc/nova-homepage.html." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:5(title) -msgid "NetApp unified driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:6(para) -msgid "The NetApp unified driver is a block storage driver that supports multiple storage families and protocols. A storage family corresponds to storage systems built on different NetApp technologies such as clustered Data ONTAP, Data ONTAP operating in 7-Mode, and E-Series. The storage protocol refers to the protocol used to initiate data storage and access operations on those storage systems like iSCSI and NFS. The NetApp unified driver can be configured to provision and manage OpenStack volumes on a given storage family using a specified storage protocol. The OpenStack volumes can then be used for accessing and storing data using the storage protocol on the storage family system. The NetApp unified driver is an extensible interface that can support new storage families and protocols." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:21(para) -msgid "With the Juno release of OpenStack, OpenStack Block Storage has introduced the concept of \"storage pools\", in which a single OpenStack Block Storage back end may present one or more logical storage resource pools from which OpenStack Block Storage will select as a storage location when provisioning volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:27(para) -msgid "In releases prior to Juno, the NetApp unified driver contained some \"scheduling\" logic that determined which NetApp storage container (namely, a FlexVol volume for Data ONTAP, or a dynamic disk pool for E-Series) that a new OpenStack Block Storage volume would be placed into." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:33(para) -msgid "With the introduction of pools, all scheduling logic is performed completely within the OpenStack Block Storage scheduler, as each NetApp storage container is directly exposed to the OpenStack Block Storage scheduler as a storage pool; whereas previously, the NetApp unified driver presented an aggregated view to the scheduler and made a final placement decision as to which NetApp storage container the OpenStack Block Storage volume would be provisioned into." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:44(title) -msgid "NetApp clustered Data ONTAP storage family" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:45(para) -msgid "The NetApp clustered Data ONTAP storage family represents a configuration group which provides OpenStack compute instances access to clustered Data ONTAP storage systems. At present it can be configured in OpenStack Block Storage to work with iSCSI and NFS storage protocols." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:51(title) -msgid "NetApp iSCSI configuration for clustered Data ONTAP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:53(para) -msgid "The NetApp iSCSI configuration for clustered Data ONTAP is an interface from OpenStack to clustered Data ONTAP storage systems for provisioning and managing the SAN block storage entity; that is, a NetApp LUN which can be accessed using the iSCSI protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:58(para) -msgid "The iSCSI configuration for clustered Data ONTAP is a direct interface from OpenStack Block Storage to the clustered Data ONTAP instance and as such does not require additional management software to achieve the desired functionality. It uses NetApp APIs to interact with the clustered Data ONTAP instance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:65(title) -msgid "Configuration options for clustered Data ONTAP family with iSCSI protocol" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:67(para) -msgid "Configure the volume driver, storage family and storage protocol to the NetApp unified driver, clustered Data ONTAP, and iSCSI respectively by setting the , and options in cinder.conf as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:78(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:142(replaceable) -msgid "openstack-vserver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:79(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:143(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:351(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:408(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:487(replaceable) -msgid "myhostname" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:80(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:144(replaceable) -msgid "port" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:84(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:356(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:504(para) -msgid "To use the iSCSI protocol, you must override the default value of with iscsi." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:94(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:158(para) -msgid "If you specify an account in the that only has virtual storage server (Vserver) administration privileges (rather than cluster-wide administration privileges), some advanced features of the NetApp unified driver will not work and you may see warnings in the OpenStack Block Storage logs." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:104(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:277(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:366(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:423(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:514(para) -msgid "For more information on these options and other deployment and operational scenarios, visit the NetApp OpenStack Deployment and Operations Guide." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:114(title) -msgid "NetApp NFS configuration for clustered Data ONTAP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:116(para) -msgid "The NetApp NFS configuration for clustered Data ONTAP is an interface from OpenStack to a clustered Data ONTAP system for provisioning and managing OpenStack volumes on NFS exports provided by the clustered Data ONTAP system that are accessed using the NFS protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:122(para) -msgid "The NFS configuration for clustered Data ONTAP is a direct interface from OpenStack Block Storage to the clustered Data ONTAP instance and as such does not require any additional management software to achieve the desired functionality. It uses NetApp APIs to interact with the clustered Data ONTAP instance." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:129(title) -msgid "Configuration options for the clustered Data ONTAP family with NFS protocol" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:131(para) -msgid "Configure the volume driver, storage family, and storage protocol to NetApp unified driver, clustered Data ONTAP, and NFS respectively by setting the , and options in cinder.conf as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:147(replaceable) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:412(replaceable) -msgid "/etc/cinder/nfs_shares" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:151(para) -msgid "Additional NetApp NFS configuration options are shared with the generic NFS driver. These options can be found here: ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:169(title) -msgid "NetApp NFS Copy Offload client" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:170(para) -msgid "A feature was added in the Icehouse release of the NetApp unified driver that enables Image Service images to be efficiently copied to a destination Block Storage volume. When the Block Storage and Image Service are configured to use the NetApp NFS Copy Offload client, a controller-side copy will be attempted before reverting to downloading the image from the Image Service. This improves image provisioning times while reducing the consumption of bandwidth and CPU cycles on the host(s) running the Image and Block Storage services. This is due to the copy operation being performed completely within the storage cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:179(para) -msgid "The NetApp NFS Copy Offload client can be used in either of the following scenarios:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:183(para) -msgid "The Image Service is configured to store images in an NFS share that is exported from a NetApp FlexVol volume and the destination for the new Block Storage volume will be on an NFS share exported from a different FlexVol volume than the one used by the Image Service. Both FlexVols must be located within the same cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:190(para) -msgid "The source image from the Image Service has already been cached in an NFS image cache within a Block Storage backend. The cached image resides on a different FlexVol volume than the destination for the new Block Storage volume. Both FlexVols must be located within the same cluster." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:196(para) -msgid "To use this feature, you must configure the Image Service, as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:200(para) -msgid "Set the configuration option to file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:205(para) -msgid "Set the configuration option to the path to the Image Service NFS export." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:210(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:215(para) -msgid "Set the configuration option to True." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:220(para) -msgid "Set the configuration option to a metadata file. The metadata file should contain a JSON object that contains the correct information about the NFS export used by the Image Service, similar to:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:234(para) -msgid "To use this feature, you must configure the Block Storage service, as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:238(para) -msgid "Set the configuration option to the path to the NetApp Copy Offload binary." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:243(para) -msgid "Set the configuration option to 2." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:249(para) -msgid "This feature requires that:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:252(para) -msgid "The storage system must have Data ONTAP v8.2 or greater installed." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:256(para) -msgid "The vStorage feature must be enabled on each storage virtual machine (SVM, also known as a Vserver) that is permitted to interact with the copy offload client." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:263(para) -msgid "To configure the copy offload workflow, enable NFS v4.0 or greater and export it from the SVM." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:270(para) -msgid "To download the NetApp copy offload binary to be utilized in conjunction with the configuration option, please visit the Utility Toolchest page at the NetApp Support portal (login is required)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:287(title) -msgid "NetApp-supported extra specs for clustered Data ONTAP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:289(para) -msgid "Extra specs enable vendors to specify extra filter criteria that the Block Storage scheduler uses when it determines which volume node should fulfill a volume provisioning request. When you use the NetApp unified driver with a clustered Data ONTAP storage system, you can leverage extra specs with OpenStack Block Storage volume types to ensure that OpenStack Block Storage volumes are created on storage back ends that have certain properties. For example, when you configure QoS, mirroring, or compression for a storage back end." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:299(para) -msgid "Extra specs are associated with OpenStack Block Storage volume types, so that when users request volumes of a particular volume type, the volumes are created on storage back ends that meet the list of requirements. For example, the back ends have the available space or extra specs. You can use the specs in the following table when you define OpenStack Block Storage volume types by using the command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:312(title) -msgid "NetApp Data ONTAP operating in 7-Mode storage family" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:314(para) -msgid "The NetApp Data ONTAP operating in 7-Mode storage family represents a configuration group which provides OpenStack compute instances access to 7-Mode storage systems. At present it can be configured in OpenStack Block Storage to work with iSCSI and NFS storage protocols." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:320(title) -msgid "NetApp iSCSI configuration for Data ONTAP operating in 7-Mode" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:322(para) -msgid "The NetApp iSCSI configuration for Data ONTAP operating in 7-Mode is an interface from OpenStack to Data ONTAP operating in 7-Mode storage systems for provisioning and managing the SAN block storage entity, that is, a LUN which can be accessed using iSCSI protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:328(para) -msgid "The iSCSI configuration for Data ONTAP operating in 7-Mode is a direct interface from OpenStack to Data ONTAP operating in 7-Mode storage system and it does not require additional management software to achieve the desired functionality. It uses NetApp ONTAPI to interact with the Data ONTAP operating in 7-Mode storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:336(title) -msgid "Configuration options for the Data ONTAP operating in 7-Mode storage family with iSCSI protocol" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:339(para) -msgid "Configure the volume driver, storage family and storage protocol to the NetApp unified driver, Data ONTAP operating in 7-Mode, and iSCSI respectively by setting the , and options in cinder.conf as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:376(title) -msgid "NetApp NFS configuration for Data ONTAP operating in 7-Mode" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:378(para) -msgid "The NetApp NFS configuration for Data ONTAP operating in 7-Mode is an interface from OpenStack to Data ONTAP operating in 7-Mode storage system for provisioning and managing OpenStack volumes on NFS exports provided by the Data ONTAP operating in 7-Mode storage system which can then be accessed using NFS protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:385(para) -msgid "The NFS configuration for Data ONTAP operating in 7-Mode is a direct interface from OpenStack Block Storage to the Data ONTAP operating in 7-Mode instance and as such does not require any additional management software to achieve the desired functionality. It uses NetApp ONTAPI to interact with the Data ONTAP operating in 7-Mode storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:393(title) -msgid "Configuration options for the Data ONTAP operating in 7-Mode family with NFS protocol" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:396(para) -msgid "Configure the volume driver, storage family, and storage protocol to the NetApp unified driver, Data ONTAP operating in 7-Mode, and NFS respectively by setting the , and options in cinder.conf as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:416(para) -msgid "Additional NetApp NFS configuration options are shared with the generic NFS driver. For a description of these, see ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:434(title) -msgid "NetApp E-Series storage family" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:435(para) -msgid "The NetApp E-Series storage family represents a configuration group which provides OpenStack compute instances access to E-Series storage systems. At present it can be configured in OpenStack Block Storage to work with the iSCSI storage protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:441(title) -msgid "NetApp iSCSI configuration for E-Series" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:442(para) -msgid "The NetApp iSCSI configuration for E-Series is an interface from OpenStack to E-Series storage systems for provisioning and managing the SAN block storage entity; that is, a NetApp LUN which can be accessed using the iSCSI protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:447(para) -msgid "The iSCSI configuration for E-Series is an interface from OpenStack Block Storage to the E-Series proxy instance and as such requires the deployment of the proxy instance in order to achieve the desired functionality. The driver uses REST APIs to interact with the E-Series proxy instance, which in turn interacts directly with the E-Series controllers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:453(para) -msgid "The use of multipath and DM-MP are required when using the OpenStack Block Storage driver for E-Series. In order for OpenStack Block Storage and OpenStack Compute to take advantage of multiple paths, the following configuration options must be correctly configured:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:460(para) -msgid "The option should be set to True in the cinder.conf file within the driver-specific stanza (for example, [myDriver])." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:467(para) -msgid "The option should be set to True in the nova.conf file within the [libvirt] stanza." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:474(title) -msgid "Configuration options for E-Series storage family with iSCSI protocol" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:476(para) -msgid "Configure the volume driver, storage family, and storage protocol to the NetApp unified driver, E-Series, and iSCSI respectively by setting the , and options in cinder.conf as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:491(replaceable) -msgid "1.2.3.4,5.6.7.8" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:492(replaceable) -msgid "arrayPassword" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:493(replaceable) -msgid "pool1,pool2" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:496(para) -msgid "To use the E-Series driver, you must override the default value of with eseries." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:525(title) -msgid "Upgrading prior NetApp drivers to the NetApp unified driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:527(para) -msgid "NetApp introduced a new unified block storage driver in Havana for configuring different storage families and storage protocols. This requires defining upgrade path for NetApp drivers which existed in releases prior to Havana. This section covers the upgrade configuration for NetApp drivers to the new unified configuration and a list of deprecated NetApp drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:535(title) -msgid "Upgraded NetApp drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:536(para) -msgid "This section describes how to update OpenStack Block Storage configuration from a pre-Havana release to the unified driver format." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:540(title) -msgid "Driver upgrade configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:543(para) -msgid "NetApp iSCSI direct driver for Clustered Data ONTAP in Grizzly (or earlier)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:546(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:556(para) -msgid "NetApp unified driver configuration." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:553(para) -msgid "NetApp NFS direct driver for Clustered Data ONTAP in Grizzly (or earlier)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:563(para) -msgid "NetApp iSCSI direct driver for Data ONTAP operating in 7-Mode storage controller in Grizzly (or earlier)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:567(para) ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:578(para) -msgid "NetApp unified driver configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:574(para) -msgid "NetApp NFS direct driver for Data ONTAP operating in 7-Mode storage controller in Grizzly (or earlier)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:589(title) -msgid "Deprecated NetApp drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:590(para) -msgid "This section lists the NetApp drivers in earlier releases that are deprecated in Havana." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:594(para) -msgid "NetApp iSCSI driver for clustered Data ONTAP." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:599(para) -msgid "NetApp NFS driver for clustered Data ONTAP." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:604(para) -msgid "NetApp iSCSI driver for Data ONTAP operating in 7-Mode storage controller." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:609(para) -msgid "NetApp NFS driver for Data ONTAP operating in 7-Mode storage controller." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/netapp-volume-driver.xml:615(para) -msgid "For support information on deprecated NetApp drivers in the Havana release, visit the NetApp OpenStack Deployment and Operations Guide." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:4(title) -msgid "X-IO volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:5(para) -msgid "The X-IO volume driver for OpenStack Block Storage enables ISE products to be managed by OpenStack Block Storage nodes. This driver can be configured to work with iSCSI and Fibre Channel storage protocols. The X-IO volume driver allows the cloud operator to take advantage of ISE features like Quality of Service and Continuous Adaptive Data Placement (CADP). It also supports creating thin volumes and specifying volume media affinity." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:12(para) -msgid "ISE FW 2.8.0 or ISE FW 3.1.0 is required for OpenStack Block Storage support. The X-IO volume driver will not work with older ISE FW." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:38(para) -msgid "Create volumes with QoS specifications." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:43(title) -msgid "Configure X-IO Volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:44(para) -msgid "To configure the use of an ISE product with OpenStack Block Storage, modify your cinder.conf file as follows. Be careful to use the one that matches the storage protocol in use:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:48(title) -msgid "Fibre Channel" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:63(title) -msgid "Optional configuration parameters" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:67(title) -msgid "Multipath" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:68(para) -msgid "The X-IO ISE supports a multipath configuration, but multipath must be enabled on the compute node (see ISE Storage Blade Best Practices Guide). For more information, see www.openstack.org." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:74(title) -msgid "Volume types" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:78(title) -msgid "Extra specs" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:75(para) -msgid "OpenStack Block Storage uses volume types to help the administrator specify attributes for volumes. These attributes are called extra-specs. The X-IO volume driver support the following extra-specs.Extra-specs nameValid valuesDescriptionFeature:Raid1, 5RAID level for volume.Feature:Pool1 - n (n being number of pools on ISE)Pool to create volume in.Affinity:Typecadp, flash, hddVolume media affinity type.Alloc:Type0 (thick), 1 (thin)Allocation type for volume. Thick or thinQoS:minIOPSn (value less than maxIOPS)Minimum IOPS setting for volume.QoS:maxIOPSn (value bigger than minIOPS)Maximum IOPS setting for volume.QoS:burstIOPSn (value bigger than minIOPS)Burst IOPS setting for volume.
" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:131(para) -msgid "Create a volume type called xio1-flash for volumes that should reside on ssd storage: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/xio-volume-driver.xml:136(para) -msgid "Create a volume type called xio1 and set QoS min and max: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:8(title) -msgid "Windows iSCSI volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:10(para) -msgid "Windows Server 2012 and Windows Storage Server 2012 offer an integrated iSCSI Target service that can be used with OpenStack Block Storage in your stack. Being entirely a software solution, consider it in particular for mid-sized networks where the costs of a SAN might be excessive." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:15(para) -msgid "The Windows cinder-volume driver works with OpenStack Compute on any hypervisor. It includes snapshotting support and the \"boot from volume\" feature." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:19(para) -msgid "This driver creates volumes backed by fixed-type VHD images on Windows Server 2012 and dynamic-type VHDX on Windows Server 2012 R2, stored locally on a user-specified path. The system uses those images as iSCSI disks and exports them through iSCSI targets. Each volume has its own iSCSI target." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:24(para) -msgid "This driver has been tested with Windows Server 2012 and Windows Server R2 using the Server and Storage Server distributions." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:27(para) -msgid "Install the cinder-volume service as well as the required Python components directly onto the Windows node." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:31(para) -msgid "You may install and configure cinder-volume and its dependencies manually using the following guide or you may use the Cinder Volume Installer, presented below." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:36(title) -msgid "Installing using the OpenStack cinder volume installer" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:38(para) -msgid "In case you want to avoid all the manual setup, you can use Cloudbase Solutions' installer. You can find it at https://www.cloudbase.it/downloads/CinderVolumeSetup_Beta.msi. It installs an independent Python environment, in order to avoid conflicts with existing applications, dynamically generates a cinder.conf file based on the parameters provided by you." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:46(para) -msgid "cinder-volume will be configured to run as a Windows Service, which can be restarted using:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:52(para) -msgid "The installer can also be used in unattended mode. More details about how to use the installer and its features can be found at https://www.cloudbase.it" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:58(title) -msgid "Windows Server configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:60(para) -msgid "The required service in order to run cinder-volume on Windows is wintarget. This will require the iSCSI Target Server Windows feature to be installed. You can install it by running the following command:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:69(para) -msgid "The Windows Server installation requires at least 16 GB of disk space. The volumes hosted by this node need the extra space." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:73(para) -msgid "For cinder-volume to work properly, you must configure NTP as explained in ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:77(para) -msgid "Next, install the requirements as described in ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:82(title) -msgid "Getting the code" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:84(para) -msgid "Git can be used to download the necessary source code. The installer to run Git on Windows can be downloaded here:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:91(para) -msgid "Once installed, run the following to clone the OpenStack Block Storage code." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:98(title) -msgid "Configure cinder-volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:100(para) -msgid "The cinder.conf file may be placed in C:\\etc\\cinder. Below is a config sample for using the Windows iSCSI Driver:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:119(para) -msgid "The following table contains a reference to the only driver specific option that will be used by the Block Storage Windows driver:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:126(title) -msgid "Running cinder-volume" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/windows-iscsi-volume-driver.xml:128(para) -msgid "After configuring cinder-volume using the cinder.conf file, you may use the following commands to install and run the service (note that you must replace the variables with the proper paths):" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lvm-volume-driver.xml:6(title) -msgid "LVM" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lvm-volume-driver.xml:7(para) -msgid "The default volume back-end uses local volumes managed by LVM." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lvm-volume-driver.xml:8(para) -msgid "This driver supports different transport protocols to attach volumes, currently iSCSI and iSER." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lvm-volume-driver.xml:10(para) -msgid "Set the following in your cinder.conf configuration file, and use the following options to configure for iSCSI transport:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lvm-volume-driver.xml:19(para) -msgid "Use the following options to configure for the iSER transport:" -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:19(None) -msgid "@@image: '../../../common/figures/ceph/ceph-architecture.png'; md5=f7e854c9dbfb64534c47c3583e774c81" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:4(title) -msgid "Ceph RADOS Block Device (RBD)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:5(para) -msgid "If you use KVM or QEMU as your hypervisor, you can configure the Compute service to use Ceph RADOS block devices (RBD) for volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:15(title) -msgid "Ceph architecture" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:9(para) -msgid "Ceph is a massively scalable, open source, distributed storage system. It is comprised of an object store, block store, and a POSIX-compliant distributed file system. The platform can auto-scale to the exabyte level and beyond. It runs on commodity hardware, is self-healing and self-managing, and has no single point of failure. Ceph is in the Linux kernel and is integrated with the OpenStack cloud operating system. Due to its open-source nature, you can install and use this portable storage platform in public or private clouds. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:25(title) -msgid "RADOS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:26(para) -msgid "Ceph is based on RADOS: Reliable Autonomic Distributed Object Store. RADOS distributes objects across the storage cluster and replicates objects for fault tolerance. RADOS contains the following major components:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:32(para) -msgid "Object Storage Device (OSD) Daemon. The storage daemon for the RADOS service, which interacts with the OSD (physical or logical storage unit for your data)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:35(para) -msgid "You must run this daemon on each server in your cluster. For each OSD, you can have an associated hard drive disk. For performance purposes, pool your hard drive disk with raid arrays, logical volume management (LVM), or B-tree file system (Btrfs) pooling. By default, the following pools are created: data, metadata, and RBD." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:42(para) -msgid "Meta-Data Server (MDS). Stores metadata. MDSs build a POSIX file system on top of objects for Ceph clients. However, if you do not use the Ceph file system, you do not need a metadata server." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:49(para) -msgid "Monitor (MON). A lightweight daemon that handles all communications with external applications and clients. It also provides a consensus for distributed decision making in a Ceph/RADOS cluster. For instance, when you mount a Ceph shared on a client, you point to the address of a MON server. It checks the state and the consistency of the data. In an ideal setup, you must run at least three ceph-mon daemons on separate servers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:58(para) -msgid "Ceph developers recommend XFS for production deployments, Btrfs for testing, development, and any non-critical deployments. Btrfs has the correct feature set and roadmap to serve Ceph in the long-term, but XFS and ext4 provide the necessary stability for today’s deployments." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:63(para) -msgid "If using Btrfs, ensure that you use the correct version (see Ceph Dependencies)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:66(para) -msgid "For more information about usable file systems, see ceph.com/ceph-storage/file-system/." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:72(title) -msgid "Ways to store, use, and expose data" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:73(para) -msgid "To store and access your data, you can use the following storage systems:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:77(para) -msgid "RADOS. Use as an object, default storage mechanism." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:81(para) -msgid "RBD. Use as a block device. The Linux kernel RBD (RADOS block device) driver allows striping a Linux block device over multiple distributed object store data objects. It is compatible with the KVM RBD image." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:88(para) -msgid "CephFS. Use as a file, POSIX-compliant file system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:92(para) -msgid "Ceph exposes RADOS; you can access it through the following interfaces:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:95(para) -msgid "RADOS Gateway. OpenStack Object Storage and Amazon-S3 compatible RESTful interface (see RADOS_Gateway)." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:101(para) -msgid "librados, and its related C/C++ bindings." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:104(para) -msgid "RBD and QEMU-RBD. Linux kernel and QEMU block devices that stripe data across multiple objects." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:112(para) -msgid "The following table contains the configuration options supported by the Ceph RADOS Block Device driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:115(title) -msgid "Deprecation notice" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/ceph-rbd-volume-driver.xml:116(para) -msgid "The option has been deprecated and replaced by ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:6(title) -msgid "ProphetStor Fibre Channel and iSCSI drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:7(para) -msgid "ProhetStor Fibre Channel and iSCSI drivers add support for ProphetStor Flexvisor through OpenStack Block Storage. ProphetStor Flexvisor enables commodity x86 hardware as software-defined storage leveraging well-proven ZFS for disk management to provide enterprise grade storage services such as snapshots, data protection with different RAID levels, replication, and deduplication." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:13(para) -msgid "The DPLFCDriver and DPLISCSIDriver drivers run volume operations by communicating with the ProphetStor storage system over HTTPS." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:44(title) -msgid "Enable the Fibre Channel or iSCSI drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:45(para) -msgid "The DPLFCDriver and DPLISCSIDriver are installed with the OpenStack software." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:50(para) -msgid "Query storage pool id for configure dpl_pool of the cinder.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:54(para) -msgid "Logon onto the storage system with administrator access." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:56(replaceable) ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:77(replaceable) -msgid "STORAGE IP ADDRESS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:59(para) -msgid "View the current usable pool id." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:64(para) -msgid "Use d5bd40b58ea84e9da09dcf25a01fdc07 to config the dpl_pool of /etc/cinder/cinder.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:69(para) -msgid "Other management command can reference by command help ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:73(para) -msgid "Make the following changes on the volume node /etc/cinder/cinder.conf file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:104(para) -msgid "Save the changes to the /etc/cinder/cinder.conf file and restart the cinder-volume service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:110(para) -msgid "The ProphetStor Fibre Channel or iSCSI drivers are now enabled on your OpenStack system. If you experience problems, review the Block Storage service log files for errors." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/prophetstor-dpl-driver.xml:114(para) -msgid "The following table contains the options supported by the ProphetStor storage driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:4(title) -msgid "HPE 3PAR Fibre Channel and iSCSI drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:5(para) -msgid "The HPE3PARFCDriver and HPE3PARISCSIDriver drivers, which are based on the Block Storage service (Cinder) plug-in architecture, run volume operations by communicating with the HPE 3PAR storage system over HTTP, HTTPS, and SSH connections. The HTTP and HTTPS communications use python-3parclient, which is part of the Python standard library." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:13(para) -msgid "For information about how to manage HPE 3PAR storage systems, see the HPE 3PAR user documentation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:17(para) -msgid "To use the HPE 3PAR drivers, install the following software and components on the HPE 3PAR storage system:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:22(para) -msgid "HPE 3PAR Operating System software version 3.1.3 MU1 or higher." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:26(para) -msgid "Deduplication provisioning requires SSD disks and HPE 3PAR Operating System software version 3.2.1 MU1 or higher." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:31(para) -msgid "Enabling Flash Cache Policy requires the following:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:35(para) -msgid "Array must contain SSD disks." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:39(para) -msgid "HPE 3PAR Operating System software version 3.2.1 MU2 or higher." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:44(para) -msgid "python-3parclient version 4.0.0 or newer." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:48(para) -msgid "Array must have the Adaptive Flash Cache license installed." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:53(para) -msgid "Flash Cache must be enabled on the array with the CLI command , where size must be in 16 GB increments. For example, will create 128 GB of Flash Cache for each node pair in the array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:65(para) -msgid "The Dynamic Optimization license is required to support any feature that results in a volume changing provisioning type or CPG. This may apply to the volume , , and commands." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:74(para) -msgid "The Virtual Copy License is required to support any feature that involves volume snapshots. This applies to the volume commands." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:83(para) -msgid "HPE 3PAR Web Services API Server must be enabled and running" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:87(para) -msgid "One Common Provisioning Group (CPG)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:90(para) -msgid "Additionally, you must install the python-3parclient version 4.0.0 or newer from the Python standard library on the system with the enabled Block Storage service volume drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:132(para) -msgid "Volume type support for both HPE 3PAR drivers includes the ability to set the following capabilities in the OpenStack Block Storage API cinder.api.contrib.types_extra_specs volume type extra specs extension module:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:139(literal) -msgid "hpe3par:cpg" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:140(para) -msgid "This setting is ignored as of Kilo. Instead, use the hpe3par_cpg setting in cinder.conf to list the valid CPGs for a backend. CPGs should now be controlled by configuring separate backends with pools." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:147(literal) -msgid "hpe3par:snap_cpg" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:150(literal) -msgid "hpe3par:provisioning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:153(literal) -msgid "hpe3par:persona" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:156(literal) -msgid "hpe3par:vvs" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:159(literal) -msgid "hpe3par:flash_cache" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:162(para) -msgid "To work with the default filter scheduler, the key values are case sensitive and scoped with hpe3par:. For information about how to set the key-value pairs and associate them with a volume type, run the following command: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:171(para) -msgid "Volumes that are cloned only support extra specs keys cpg, snap_cpg, provisioning and vvs. The others are ignored. In addition the comments section of the cloned volume in the HPE 3PAR StoreServ storage array is not populated." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:182(para) -msgid "hpe3par:cpg - Defaults to the hpe3par_cpg setting in the cinder.conf file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:187(para) -msgid "hpe3par:snap_cpg - Defaults to the hpe3par_snap setting in the cinder.conf file. If hpe3par_snap is not set, it defaults to the hpe3par_cpg setting." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:195(para) -msgid "hpe3par:provisioning - Defaults to thin provisioning, the valid values are thin, full, and dedup." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:201(para) -msgid "hpe3par:persona - Defaults to the 2 - Generic-ALUA persona. The valid values are, 1 - Generic, 2 - Generic-ALUA, 3 - Generic-legacy, 4 - HPUX-legacy, 5 - AIX-legacy, 6 - EGENERA, 7 - ONTAP-legacy, 8 - VMware, 9 - OpenVMS, 10 - HPUX, and 11 - WindowsServer." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:215(para) -msgid "hpe3par:flash_cache - Defaults to false, the valid values are true and false." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:221(para) -msgid "QoS support for both HPE 3PAR drivers includes the ability to set the following capabilities in the OpenStack Block Storage API cinder.api.contrib.qos_specs_manage qos specs extension module:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:228(literal) -msgid "minBWS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:231(literal) -msgid "maxBWS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:234(literal) -msgid "minIOPS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:237(literal) -msgid "maxIOPS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:240(literal) ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:198(replaceable) -msgid "latency" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:243(literal) -msgid "priority" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:246(para) -msgid "The qos keys above no longer require to be scoped but must be created and associated to a volume type. For information about how to set the key-value pairs and associate them with a volume type, run the following commands: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:256(para) -msgid "The following keys require that the HPE 3PAR StoreServ storage array has a Priority Optimization license installed." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:261(para) -msgid "hpe3par:vvs - The virtual volume set name that has been predefined by the Administrator with Quality of Service (QoS) rules associated to it. If you specify extra_specs hpe3par:vvs, the qos_specs minIOPS, maxIOPS, minBWS, and maxBWS settings are ignored." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:272(para) -msgid "minBWS - The QoS I/O issue bandwidth minimum goal in MBs. If not set, the I/O issue bandwidth rate has no minimum goal." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:277(para) -msgid "maxBWS - The QoS I/O issue bandwidth rate limit in MBs. If not set, the I/O issue bandwidth rate has no limit." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:282(para) -msgid "minIOPS - The QoS I/O issue count minimum goal. If not set, the I/O issue count has no minimum goal." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:287(para) -msgid "maxIOPS - The QoS I/O issue count rate limit. If not set, the I/O issue count rate has no limit." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:292(para) -msgid "latency - The latency goal in milliseconds." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:296(para) -msgid "priority - The priority of the QoS rule over other rules. If not set, the priority is normal, valid values are low, normal and high." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:303(para) -msgid "Since the Icehouse release, minIOPS and maxIOPS must be used together to set I/O limits. Similarly, minBWS and maxBWS must be used together. If only one is set the other will be set to the same value." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:309(para) -msgid "The following keys require that the HPE 3PAR StoreServ storage array has an Adaptive Flash Cache license installed." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:314(para) -msgid "hpe3par:flash_cache - The flash-cache policy, which can be turned on and off by setting the value to true or false." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:323(title) -msgid "Enable the HPE 3PAR Fibre Channel and iSCSI drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:325(para) -msgid "The HPE3PARFCDriver and HPE3PARISCSIDriver are installed with the OpenStack software." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:330(para) -msgid "Install the python-3parclient Python package on the OpenStack Block Storage system. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:337(para) -msgid "Verify that the HPE 3PAR Web Services API server is enabled and running on the HPE 3PAR storage system." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:342(para) -msgid "Log onto the HPE 3PAR storage system with administrator access." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:345(replaceable) -msgid "<HPE 3PAR IP Address>" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:348(para) -msgid "View the current state of the Web Services API Server." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:355(para) -msgid "If the Web Services API Server is disabled, start it." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:364(para) -msgid "If the HTTP or HTTPS state is disabled, enable one of them. or " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:371(para) -msgid "To stop the Web Services API Server, use the stopwsapi command. For other options run the command." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:378(para) -msgid "If you are not using an existing CPG, create a CPG on the HPE 3PAR storage system to be used as the default location for creating volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:445(para) -msgid "You can enable only one driver on each cinder instance unless you enable multiple back-end support. See the Cinder multiple back-end support instructions to enable this feature." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:452(para) -msgid "You can configure one or more iSCSI addresses by using the option. When you configure multiple addresses, the driver selects the iSCSI port with the fewest active volumes at attach time. The IP address might include an IP port by using a colon (:) to separate the address from port. If you do not define an IP port, the default port 3260 is used. Separate IP addresses with a comma (,). The / options might be used as an alternative to for single port iSCSI configuration." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:477(para) -msgid "The HPE 3PAR Fibre Channel and iSCSI drivers are now enabled on your OpenStack system. If you experience problems, review the Block Storage service log files for errors." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hpe-3par-driver.xml:481(para) -msgid "The following table contains all the configuration options supported by the HPE 3PAR Fibre Channel and iSCSI drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:10(title) -msgid "HP MSA Fibre Channel and iSCSI drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:12(para) -msgid "The HPMSAFCDriver and HPMSAISCSIDriver Cinder drivers allow HP MSA 2040 or 1040 arrays to be used for block storage in OpenStack deployments." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:19(para) -msgid "To use the HP MSA drivers, the following are required:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:23(para) -msgid "HP MSA 2040 or 1040 array with:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:31(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:31(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:31(para) -msgid "G22x firmware or later" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:37(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:42(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:37(para) -msgid "Network connectivity between the OpenStack host and the array management interfaces" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:42(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:47(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:42(para) -msgid "HTTPS or HTTP must be enabled on the array" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:94(title) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:99(title) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:94(title) -msgid "Configuring the array" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:98(para) -msgid "Verify that the array can be managed via an HTTPS connection. HTTP can also be used if hpmsa_api_protocol=http is placed into the appropriate sections of the cinder.conf file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:103(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:108(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:103(para) -msgid "Confirm that virtual pools A and B are present if you plan to use virtual pools for OpenStack storage." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:106(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:111(para) -msgid "If you plan to use vdisks instead of virtual pools, create or identify one or more vdisks to be used for OpenStack storage; typically this will mean creating or setting aside one disk group for each of the A and B controllers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:113(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:118(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:108(para) -msgid "Edit the cinder.conf file to define an storage backend entry for each storage pool on the array that will be managed by OpenStack. Each entry consists of a unique section name, surrounded by square brackets, followed by options specified in key=value format." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:121(para) -msgid "The hpmsa_backend_name value specifies the name of the storage pool or vdisk on the array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:127(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:132(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:122(para) -msgid "The volume_backend_name option value can be a unique value, if you wish to be able to assign volumes to a specific storage pool on the array, or a name that's shared among multiple storage pools to let the volume scheduler choose where new volumes are allocated." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:135(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:140(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:130(para) -msgid "The rest of the options will be repeated for each storage pool in a given array: the appropriate Cinder driver name; IP address or hostname of the array management interface; the username and password of an array user account with manage privileges; and the iSCSI IP addresses for the array if using the iSCSI transport protocol." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:144(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:149(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:139(para) -msgid "In the examples below, two backends are defined, one for pool A and one for pool B, and a common volume_backend_name is used so that a single volume type definition can be used to allocate volumes from both pools." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:151(title) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:156(title) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:146(title) -msgid "iSCSI example backend entries" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:173(title) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:178(title) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:168(title) -msgid "Fibre Channel example backend entries" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:194(para) -msgid "If any volume_backend_name value refers to a vdisk rather than a virtual pool, add an additional statement hpmsa_backend_type = linear to that backend entry." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:201(para) -msgid "If HTTPS is not enabled in the array, include hpmsa_api_protocol = http in each of the backend definitions." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:207(para) -msgid "If HTTPS is enabled, you can enable certificate verification with the option hpmsa_verify_certificate=True. You may also use the hpmsa_verify_certificate_path parameter to specify the path to a CA_BUNDLE file containing CAs other than those in the default list." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:216(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:221(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:203(para) -msgid "Modify the [DEFAULT] section of the cinder.conf file to add an enabled_backends parameter specifying the backend entries you added, and a default_volume_type parameter specifying the name of a volume type that you will create in the next step." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:224(title) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:229(title) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:211(title) -msgid "[DEFAULT] section changes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:242(title) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:247(title) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:229(title) -msgid "Creating a volume type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:244(para) ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:246(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:249(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:251(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:231(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:233(para) -msgid "$ " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:235(para) -msgid "Create a new volume type for each distinct volume_backend_name value that you added to cinder.conf. The example below assumes that the same volume_backend_name=hpmsa-array option was specified in all of the entries, and specifies that the volume type hpmsa can be used to allocate volumes from any of them. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:252(para) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:257(para) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:239(para) -msgid "After modifying cinder.conf, restart the cinder-volume service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:259(title) ./doc/config-reference/block-storage/drivers/dothill-driver.xml:264(title) ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:246(title) -msgid "Driver-specific options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hp-msa-driver.xml:261(para) -msgid "The following table contains the configuration options that are specific to the HP MSA drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:4(title) -msgid "NFS driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:5(para) -msgid "The Network File System (NFS) is a distributed file system protocol originally developed by Sun Microsystems in 1984. An NFS server exports one or more of its file systems, known as shares. An NFS client can mount these exported shares on its own file system. You can perform file actions on this mounted remote file system as if the file system were local." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:13(title) -msgid "How the NFS driver works" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:14(para) -msgid "The NFS driver, and other drivers based on it, work quite differently than a traditional block storage driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:17(para) -msgid "The NFS driver does not actually allow an instance to access a storage device at the block level. Instead, files are created on an NFS share and mapped to instances, which emulates a block device. This works in a similar way to QEMU, which stores instances in the /var/lib/nova/instances directory." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:26(title) -msgid "Enable the NFS driver and related options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:27(para) -msgid "To use Cinder with the NFS driver, first set the volume_driver in cinder.conf:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:31(para) -msgid "The following table contains the options supported by the NFS driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:36(para) -msgid "As of the Icehouse release, the NFS driver (and other drivers based off it) will attempt to mount shares using version 4.1 of the NFS protocol (including pNFS). If the mount attempt is unsuccessful due to a lack of client or server support, a subsequent mount attempt that requests the default behavior of the command will be performed. On most distributions, the default behavior is to attempt mounting first with NFS v4.0, then silently fall back to NFS v3.0 if necessary. If the configuration option contains a request for a specific version of NFS to be used, or if specific options are specified in the shares configuration file specified by the configuration option, the mount will be attempted as requested with no subsequent attempts." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:55(title) -msgid "How to use the NFS driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:58(para) -msgid "Access to one or more NFS servers. Creating an NFS server is outside the scope of this document. This example assumes access to the following NFS servers and mount points:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:64(literal) -msgid "192.168.1.200:/storage" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:67(literal) -msgid "192.168.1.201:/storage" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:70(literal) -msgid "192.168.1.202:/storage" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:73(para) -msgid "This example demonstrates the use of with this driver with multiple NFS servers. Multiple servers are not required. One is usually enough." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:78(para) -msgid "Add your list of NFS servers to the file you specified with the nfs_shares_config option. For example, if the value of this option was set to /etc/cinder/shares.txt, then:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:88(para) -msgid "Comments are allowed in this file. They begin with a #." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:92(para) -msgid "Configure the nfs_mount_point_base option. This is a directory where cinder-volume mounts all NFS shares stored in shares.txt. For this example, /var/lib/cinder/nfs is used. You can, of course, use the default value of $state_path/mnt." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:103(para) -msgid "Start the cinder-volume service. /var/lib/cinder/nfs should now contain a directory for each NFS share specified in shares.txt. The name of each directory is a hashed name:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:115(para) -msgid "You can now create volumes as you normally would:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:120(para) -msgid "This volume can also be attached and deleted just like other volumes. However, snapshotting is not supported." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:127(title) -msgid "NFS driver notes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:130(para) -msgid "cinder-volume manages the mounting of the NFS shares as well as volume creation on the shares. Keep this in mind when planning your OpenStack architecture. If you have one master NFS server, it might make sense to only have one cinder-volume service to handle all requests to that NFS server. However, if that single server is unable to handle all requests, more than one cinder-volume service is needed as well as potentially more than one NFS server." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:146(para) -msgid "Because data is stored in a file and not actually on a block storage device, you might not see the same IO performance as you would with a traditional block storage driver. Please test accordingly." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:153(para) -msgid "Despite possible IO performance loss, having volume data stored in a file might be beneficial. For example, backing up volumes can be as easy as copying the volume files." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nfs-volume-driver.xml:158(para) -msgid "Regular IO flushing and syncing still stands." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:10(title) -msgid "Dot Hill AssuredSAN Fibre Channel and iSCSI drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:12(para) -msgid "The DotHillFCDriver and DotHillISCSIDriver Cinder drivers allow Dot Hill arrays to be used for block storage in OpenStack deployments." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:19(para) -msgid "To use the Dot Hill drivers, the following are required:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:23(para) -msgid "Dot Hill AssuredSAN array with:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:35(para) -msgid "Appropriate licenses for the snapshot and copy volume features" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:103(para) -msgid "Verify that the array can be managed via an HTTPS connection. HTTP can also be used if dothill_api_protocol=http is placed into the appropriate sections of the cinder.conf file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:126(para) -msgid "The dothill_backend_name value specifies the name of the storage pool or vdisk on the array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:199(para) -msgid "If any volume_backend_name value refers to a vdisk rather than a virtual pool, add an additional statement dothill_backend_type = linear to that backend entry." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:206(para) -msgid "If HTTPS is not enabled in the array, include dothill_api_protocol = http in each of the backend definitions." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:212(para) -msgid "If HTTPS is enabled, you can enable certificate verification with the option dothill_verify_certificate=True. You may also use the dothill_verify_certificate_path parameter to specify the path to a CA_BUNDLE file containing CAs other than those in the default list." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:240(para) -msgid "Create a new volume type for each distinct volume_backend_name value that you added to cinder.conf. The example below assumes that the same volume_backend_name=dothill-array option was specified in all of the entries, and specifies that the volume type dothill can be used to allocate volumes from any of them. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/dothill-driver.xml:266(para) -msgid "The following table contains the configuration options that are specific to the Dot Hill drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/smbfs-volume-driver.xml:6(title) -msgid "SambaFS driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/smbfs-volume-driver.xml:7(para) -msgid "There is a volume back-end for Samba filesystems. Set the following in your cinder.conf, and use the following options to configure it." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/smbfs-volume-driver.xml:11(para) -msgid "The SambaFS driver requires version 1.7 or higher on Linux nodes, and version 1.6 or higher on Windows nodes." -msgstr "" - -#. When image changes, this message will be marked fuzzy or untranslated for you. -#. It doesn't matter what you translate it to: it's not used at all. -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:34(None) -msgid "@@image: 'figures/blockbridge/bb-cinder-fig1.png'; md5=b6cb55e827fd9fafaf6859f06568bd9f" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:7(title) -msgid "Blockbridge EPS" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:12(para) -msgid "Blockbridge is software that transforms commodity infrastructure into secure multi-tenant storage that operates as a programmable service. It provides automatic encryption, secure deletion, quality of service, replication, and programmable security capabilities on your choice of hardware. Blockbridge uses micro-segmentation to provide isolation that allows you to concurrently operate OpenStack, Docker, and bare-metal workflows on shared resources. When used with OpenStack, isolated management domains are dynamically created on a per-project basis. All volumes and clones, within and between projects, are automatically cryptographically isolated and implement secure deletion." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:26(title) -msgid "Architecture reference" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:29(title) -msgid "Blockbridge architecture" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:40(title) -msgid "Control paths" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:41(para) -msgid "The Blockbridge driver is packaged with the core distribution of OpenStack. Operationally, it executes in the context of the Block Storage service. The driver communicates with an OpenStack-specific API provided by the Blockbridge EPS platform. Blockbridge optionally communicates with OpenStack Identity, Compute, and Block Storage services." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:50(title) -msgid "Block storage API" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:51(para) -msgid "Blockbridge is API driven software-defined storage. The system implements a native HTTP API that is tailored to the specific needs of OpenStack. Each Block Storage service operation maps to a single back-end API request that provides ACID semantics. The API is specifically designed to reduce, if not eliminate, the possibility of inconsistencies between the Block Storage service and external storage infrastructure in the event of hardware, software or data center failure." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:62(title) -msgid "Extended management" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:63(para) -msgid "OpenStack users may utilize Blockbridge interfaces to manage replication, auditing, statistics, and performance information on a per-project and per-volume basis. In addition, they can manage low-level data security functions including verification of data authenticity and encryption key delegation. Native integration with the Identity Service allows tenants to use a single set of credentials. Integration with Block storage and Compute services provides dynamic metadata mapping when using Blockbridge management APIs and tools." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:75(title) -msgid "Attribute-based provisioning" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:76(para) -msgid "Blockbridge organizes resources using descriptive identifiers called attributes. Attributes are assigned by administrators of the infrastructure. They are used to describe the characteristics of storage in an application-friendly way. Applications construct queries that describe storage provisioning constraints and the Blockbridge storage stack assembles the resources as described." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:83(para) -msgid "Any given instance of a Blockbridge volume driver specifies a query for resources. For example, a query could specify '+ssd +10.0.0.0 +6nines -production iops.reserve=1000 capacity.reserve=30%'. This query is satisfied by selecting SSD resources, accessible on the 10.0.0.0 network, with high resiliency, for non-production workloads, with guaranteed IOPS of 1000 and a storage reservation for 30% of the volume capacity specified at create time. Queries and parameters are completely administrator defined: they reflect the layout, resource, and organizational goals of a specific deployment." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:125(title) -msgid "Supported protocols" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:126(para) -msgid "Blockbridge provides iSCSI access to storage. A unique iSCSI data fabric is programmatically assembled when a volume is attached to an instance. A fabric is disassembled when a volume is detached from an instance. Each volume is an isolated SCSI device that supports persistent reservations." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:134(title) -msgid "Configuration steps" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:136(title) -msgid "Create an authentication token" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:137(para) -msgid "Whenever possible, avoid using password-based authentication. Even if you have created a role-restricted administrative user via Blockbridge, token-based authentication is preferred. You can generate persistent authentication tokens using the Blockbridge command-line tool as follows:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:167(title) -msgid "Create volume type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:168(para) -msgid "Before configuring and enabling the Blockbridge volume driver, register an OpenStack volume type and associate it with a volume_backend_name. In this example, a volume type, 'Production', is associated with the volume_backend_name 'blockbridge_prod': " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:178(title) -msgid "Specify volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:179(para) -msgid "Configure the Blockbridge volume driver in /etc/cinder/cinder.conf. Your volume_backend_name must match the value specified in the command in the previous step. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:189(title) -msgid "Specify API endpoint and authentication" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:190(para) -msgid "Configure the API endpoint and authentication. The following example uses an authentication token. You must create your own as described above. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:199(title) -msgid "Specify resource query" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:200(para) -msgid "By default, a single pool is configured (implied) with a default resource query of '+openstack'. Within Blockbridge, datastore resources that advertise the 'openstack' attribute will be selected to fulfill OpenStack provisioning requests. If you prefer a more specific query, define a custom pool configuration. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:207(para) -msgid "Pools support storage systems that offer multiple classes of service. You may wish to configure multiple pools to implement more sophisticated scheduling capabilities." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:241(title) -msgid "Multiple volume types" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:242(para) -msgid "Volume types are exposed to tenants, pools are not. To offer multiple classes of storage to OpenStack tenants, you should define multiple volume types. Simply repeat the process above for each desired type. Be sure to specify a unique volume_backend_name and pool configuration for each type. The cinder.conf example included with this documentation illustrates configuration of multiple types." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:254(title) -msgid "Testing resources" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/blockbridge-eps-driver.xml:255(para) -msgid "Blockbridge is freely available for testing purposes and deploys in seconds as a Docker container. This is the same container used to run continuous integration for OpenStack. For more information visit www.blockbridge.io." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:6(title) -msgid "Nimble Storage volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:8(para) -msgid "Nimble Storage fully integrates with the OpenStack platform through the Nimble Cinder driver, allowing a host to configure and manage Nimble Storage array features through Block Storage interfaces." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:11(para) -msgid "Support for the Liberty release is available from Nimble OS 2.3.8 or later." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:41(para) -msgid "Enable encryption and default performance policy for a volume-type using extra-specs" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:46(para) -msgid "The Nimble Storage implementation uses iSCSI only. Fibre Channel is not supported." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:51(title) -msgid "Nimble Storage driver configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:52(para) -msgid "Update the file /etc/cinder/cinder.conf with the given configuration." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:54(para) -msgid "In case of a basic (single back-end) configuration, add the parameters within the [default] section as follows." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:59(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:73(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:93(term) -msgid "NIMBLE_MGMT_IP" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:60(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:74(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:99(term) -msgid "NIMBLE_USER" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:61(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:75(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:106(term) -msgid "NIMBLE_PASSWORD" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:64(para) -msgid "In case of multi back-end configuration, for example, configuration which supports multiple Nimble Storage arrays or a single Nimble Storage array with arrays from other vendors, use the following parameters." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:77(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:88(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:112(term) -msgid "NIMBLE_BACKEND_NAME" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:79(para) -msgid "In case of multi back-end configuration, Nimble Storage volume-type is created and associated with a back-end name as follows." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:82(para) -msgid "Single back-end configuration users do not need to create the volume-type." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:85(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:87(replaceable) ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:120(term) -msgid "NIMBLE_VOLUME_TYPE" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:90(para) -msgid "This section explains the variables used above:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:95(para) -msgid "Management IP address of Nimble Storage array/group." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:101(para) -msgid "Nimble Storage account login with minimum \"power user\"(admin) privilege if RBAC is used." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:108(para) -msgid "Password of the admin account for nimble array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:114(para) -msgid "A volume back-end name which is specified in cinder.conf. This is also used while assigning a back- end name to the Nimble volume-type." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:122(para) -msgid "The Nimble volume-type which is created from the CLI and associated with NIMBLE_BACKEND_NAME." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:129(para) -msgid "Restart the cinder-api, cinder-scheduler, and cinder-volume services after updating the cinder.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:134(title) -msgid "Nimble driver extra spec options" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:135(para) -msgid "The Nimble volume driver also supports the following extra spec options:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:140(term) -msgid "'nimble:encryption'='yes'" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:142(para) -msgid "Used to enable encryption for a volume-type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:146(term) -msgid "'nimble:perfpol-name'=PERF_POL_NAME" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:149(para) -msgid "PERF_POL_NAME is the name of a performance policy which exists on the Nimble array and should be enabled for every volume in a volume-type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:155(term) -msgid "'nimble:multi-initiator'='true'" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:157(para) -msgid "Used to enable multi-initiator access for a volume-type" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:161(para) -msgid "These extra-specs can be enabled by using the following command:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:163(replaceable) -msgid "\\ VOLUME_TYPE" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:164(replaceable) -msgid "KEY" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:164(replaceable) -msgid "\\ VALUE" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:166(para) -msgid "VOLUME_TYPE is the Nimble volume-type and KEY and VALUE are the options mentioned above." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/nimble-volume-driver.xml:171(para) -msgid "The Nimble storage driver supports these configuration options:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:6(title) -msgid "Scality SOFS driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:7(para) -msgid "The Scality SOFS volume driver interacts with configured sfused mounts." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:9(para) -msgid "The Scality SOFS driver manages volumes as sparse files stored on a Scality Ring through sfused. Ring connection settings and sfused options are defined in the cinder.conf file and the configuration file pointed to by the option, typically /etc/sfused.conf." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:17(para) -msgid "The Scality SOFS volume driver provides the following Block Storage volume operations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:53(title) -msgid "Sample Block Storage Configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:51(para) -msgid "Use the following instructions to update the cinder.conf configuration file: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:68(title) -msgid "Sample Compute Configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/scality-sofs-driver.xml:66(para) -msgid "Use the following instructions to update the nova.conf configuration file: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:9(title) -msgid "Hitachi storage volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:10(para) -msgid "Hitachi storage volume driver provides iSCSI and Fibre Channel support for Hitachi storages." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:14(para) -msgid "Supported storages:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:17(para) -msgid "Hitachi Virtual Storage Platform G1000 (VSP G1000)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:20(para) -msgid "Hitachi Virtual Storage Platform (VSP)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:23(para) -msgid "Hitachi Unified Storage VM (HUS VM)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:26(para) -msgid "Hitachi Unified Storage 100 Family (HUS 100 Family)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:29(para) -msgid "Required software:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:32(para) -msgid "RAID Manager Ver 01-32-03/01 or later for VSP G1000/VSP/HUS VM" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:35(para) -msgid "Hitachi Storage Navigator Modular 2 (HSNM2) Ver 27.50 or later for HUS 100 Family" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:39(para) -msgid "HSNM2 needs to be installed under /usr/stonavm." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:41(para) -msgid "Required licenses:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:44(para) -msgid "Hitachi In-System Replication Software for VSP G1000/VSP/HUS VM" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:47(para) -msgid "(Mandatory) ShadowImage in-system replication for HUS 100 Family" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:50(para) -msgid "(Optional) Copy-on-Write Snapshot for HUS 100 Family" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:53(para) -msgid "Additionally, the pexpect package is required." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:59(para) -msgid "Create, delete, attach and detach volumes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:62(para) -msgid "Create, list and delete volume snapshots." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:87(title) -msgid "Set up Hitachi storage" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:88(para) -msgid "You need to specify settings as described below. For details about each step, see the user's guide of the storage device. Use a storage administrative software such as Storage Navigator to set up the storage device so that LDEVs and host groups can be created and deleted, and LDEVs can be connected to the server and can be asynchronously copied." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:91(para) -msgid "Create a Dynamic Provisioning pool." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:94(para) -msgid "Connect the ports at the storage to the Controller node and Compute nodes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:97(para) -msgid "For VSP G1000/VSP/HUS VM, set \"port security\" to \"enable\" for the ports at the storage." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:100(para) -msgid "For HUS 100 Family, set \"Host Group security\"/\"iSCSI target security\" to \"ON\" for the ports at the storage." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:103(para) -msgid "For the ports at the storage, create host groups (iSCSI targets) whose names begin with HBSD- for the Controller node and each Compute node. Then register a WWN (initiator IQN) for each of the Controller node and Compute nodes." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:106(para) -msgid "For VSP G1000/VSP/HUS VM, perform the following:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:109(para) -msgid "Create a storage device account belonging to the Administrator User Group. (To use multiple storage devices, create the same account name for all the target storage devices, and specify the same resource group and permissions.)" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:113(para) -msgid "Create a command device (In-Band), and set user authentication to ON." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:116(para) -msgid "Register the created command device to the host group for the Controller node." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:119(para) -msgid "To use the Thin Image function, create a pool for Thin Image." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:124(para) -msgid "For HUS 100 Family, perform the following:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:127(para) -msgid "Use the command to register the unit name and controller of the storage device to HSNM2." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:130(para) -msgid "When connecting via iSCSI, if you are using CHAP certification, specify the same user and password as that used for the storage port." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:137(title) -msgid "Set up Hitachi Gigabit Fibre Channel adaptor" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:138(para) -msgid "Change a parameter of the hfcldd driver and update the initram file if Hitachi Gigabit Fibre Channel adaptor is used." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:142(replaceable) ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:142(replaceable) -msgid "KERNEL_VERSION" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:146(title) -msgid "Set up Hitachi storage volume driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:149(para) -msgid "Create directory." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:154(para) -msgid "Create \"volume type\" and \"volume key\"." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:155(para) -msgid "This example shows that HUS100_SAMPLE is created as \"volume type\" and hus100_backend is registered as \"volume key\"." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:161(para) -msgid "Please specify any identical \"volume type\" name and \"volume key\"." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:163(para) -msgid "To confirm the created \"volume type\", please execute the following command:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:168(para) -msgid "Edit /etc/cinder/cinder.conf as follows." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:169(para) -msgid "If you use Fibre Channel:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:171(para) -msgid "If you use iSCSI:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:173(para) -msgid "Also, set created by " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:175(para) -msgid "This table shows configuration options for Hitachi storage volume driver." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:179(para) -msgid "Restart Block Storage service." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/hitachi-storage-volume-driver.xml:181(para) -msgid "When the startup is done, \"MSGID0003-I: The storage backend can be used.\" is output into /var/log/cinder/volume.log as follows." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:10(title) -msgid "Lenovo Fibre Channel and iSCSI drivers" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:12(para) -msgid "The LenovoFCDriver and LenovoISCSIDriver Cinder drivers allow Lenovo S3200 or S2200 arrays to be used for block storage in OpenStack deployments." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:19(para) -msgid "To use the Lenovo drivers, the following are required:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:23(para) -msgid "Lenovo S3200 or S2200 array with:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:98(para) -msgid "Verify that the array can be managed via an HTTPS connection. HTTP can also be used if lenovo_api_protocol=http is placed into the appropriate sections of the cinder.conf file." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:116(para) -msgid "The lenovo_backend_name value specifies the name of the storage pool on the array." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:188(para) -msgid "If HTTPS is not enabled in the array, include lenovo_api_protocol = http in each of the backend definitions." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:194(para) -msgid "If HTTPS is enabled, you can enable certificate verification with the option lenovo_verify_certificate=True. You may also use the lenovo_verify_certificate_path parameter to specify the path to a CA_BUNDLE file containing CAs other than those in the default list." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:222(para) -msgid "Create a new volume type for each distinct volume_backend_name value that you added to cinder.conf. The example below assumes that the same volume_backend_name=lenovo-array option was specified in all of the entries, and specifies that the volume type lenovo can be used to allocate volumes from any of them. " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/lenovo-driver.xml:248(para) -msgid "The following table contains the configuration options that are specific to the Lenovo drivers." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:5(title) -msgid "Oracle ZFS Storage Appliance NFS driver" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:6(para) -msgid "The Oracle ZFS Storage Appliance (ZFSSA) NFS driver enables the ZFSSA to be used seamlessly as a block storage resource. The driver enables you to to create volumes on a ZFS share that is NFS mounted." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:12(para) -msgid "Oracle ZFS Storage Appliance Software version 2013.1.2.0 or later" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:18(para) -msgid "Create, extend, delete volumes" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:24(para) -msgid "Create, delete snapshots" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:47(title) -msgid "Appliance configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:48(para) -msgid "Appliance configuration using the command line interface (CLI) is described below. To access the CLI, ensure SSH remote access is enabled, which is the default. You can also perform configuration using the browser user interface (BUI) or the RESTful API. Please refer to the Oracle ZFS Storage Appliance documentation for details on how to configure the Oracle ZFS Storage Appliance using the BUI, CLI and RESTful API." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:58(para) -msgid "Log in to the Oracle ZFS Storage Appliance CLI and enable the REST service. REST service needs to stay online for this driver to function." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:63(para) -msgid "Create a new storage pool on the appliance if you do not want to use an existing one. This storage pool is named 'mypool' for the sake of this documentation." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:67(para) -msgid "Create a new project and share in the storage pool (mypool) if you do not want to use existing ones. This driver will create a project and share by the names specified in cinder.conf, if the a project or share by that name does not already exist in the storage pool (mypool). The project and share are named 'NFSProject' and 'nfs_share' in the sample cinder.conf entries below." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:73(para) -msgid "To perform driver operations, create a role with the following authorizations:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:77(code) -msgid "scope=svc - allow_administer=true, allow_restart=true, allow_configure=true" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:80(code) -msgid "scope=nas - pool=pool_name, project=project_name, share=share_name, allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:83(para) -msgid "The following examples show how to create a role with authorizations." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:96(para) -msgid "The following properties need to be set when the scope of this role needs to be limited to a pool (mypool), a project (NFSProject) and a share (nfs_share) created in the steps above. This will prevent the user assigned to this role from being used to modify other pools, projects and shares." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:102(para) -msgid "The following properties only need to be set when a share or a project has not been created following the steps above and wish to allow the driver to create them for you." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:114(para) -msgid "Create a new user or modify an existing one and assign the new role to the user." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:116(para) -msgid "The following example shows how to create a new user and assign the new role to the user." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:126(para) -msgid "Ensure that NFS and HTTP services on the appliance are online. Note the HTTPS port number for later entry in the cinder service configuration file (cinder.conf). This driver uses WebDAV over HTTPS to create snapshots and clones of volumes, and therefore needs to have the HTTP service online." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:129(para) -msgid "The following example illustrates enabling the services and showing their properties." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:147(para) -msgid "Create a network interface to be used exclusively for data. An existing network interface may also be used. The following example illustrates how to make a network interface for data traffic flow only." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:150(para) -msgid "For better performance and reliability, it is recommended to configure a separate subnet exclusively for data traffic in your cloud environment." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:158(para) -msgid "For clustered controller systems, the following verification is required in addition to the above steps. Skip this step if a standalone system is used." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:161(para) -msgid "Verify that both the newly created pool and the network interface are of type \"singleton\" and are not locked to the current controller. This approach ensures that the pool and the interface used for data always belong to the active controller, regardless of the current state of the cluster. Verify that both the network interface used for management and data, and the storage pool belong to the same head." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:167(para) -msgid "There will be a short service interruption during failback/takeover, but once the process is complete, the driver should be able to access the ZFSSA for data as well as for management." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:174(title) -msgid "Cinder service configuration" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:177(para) -msgid "Define the following required properties in the cinder.conf configuration file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:183(replaceable) -msgid "mydata" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:186(para) -msgid "Management interface san_ip can be used instead of zfssa_data_ip, but it is not recommended." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:192(para) -msgid "You can also define the following additional properties in the cinder.conf configuration file:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:194(replaceable) -msgid "NFSProject" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:195(replaceable) -msgid "nfs_share" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:197(replaceable) -msgid "off" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:203(para) -msgid "The driver does not use the file specified in the nfs_shares_config option." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:209(para) -msgid "The Oracle ZFS Storage Appliance NFS driver supports these options:" -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:211(para) -msgid "This driver shares additional NFS configuration options with the generic NFS driver. For a description of these, see ." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:228(para) -msgid "REST and NFS services need to be turned on." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:238(para) -msgid "zfssa_cache_directory: The directory name inside zfssa_nfs_share where cache volumes are stored." -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:231(para) -msgid "On an OpenStack controller, cinder.conf needs to contain necessary properties used to configure and set up the ZFSSA NFS driver, including the following new properties: " -msgstr "" - -#: ./doc/config-reference/block-storage/drivers/zfssa-nfs-driver.xml:246(para) -msgid "Every cache volume has two additional properties stored as WebDAV properties. It is important that they are not altered outside of Block Storage when the driver is in use:" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:6(title) -msgid "Telemetry sample configuration files" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:7(para) -msgid "All the files in this section can be found in the /etc/ceilometer/ directory." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:10(title) -msgid "ceilometer.conf" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:11(para) -msgid "The configuration for the Telemetry services and agents is found in the ceilometer.conf file." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:20(title) -msgid "event_definitions.yaml" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:21(para) -msgid "The event_definitions.yaml file defines how events received from other OpenStack components should be translated to Telemetry events." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:24(para) -msgid "This file provides a standard set of events and corresponding traits that may be of interest. This file can be modified to add and drop traits that operators may find useful." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:30(title) -msgid "pipeline.yaml" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:31(para) -msgid "Pipelines describe a coupling between sources of samples and the corresponding sinks for transformation and publication of the data. They are defined in the pipeline.yaml file." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:35(para) -msgid "This file can be modified to adjust polling intervals and the samples generated by the Telemetry module" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:40(title) -msgid "event_pipeline.yaml" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:41(para) -msgid "Event pipelines describe a coupling between notification event_types and the corresponding sinks for publication of the event data. They are defined in the event_pipeline.yaml file." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:44(para) -msgid "This file can be modified to adjust which notifications to capture and the and where to publish the events." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml:50(para) -msgid "The policy.json file defines additional access controls that apply to the Telemetry service." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-alarming-service-config-opts.xml:6(title) -msgid "Telemetry Alarming service configuration options" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-alarming-service-config-opts.xml:7(para) -msgid "The following tables provide a comprehensive list of the Telemetry Alarming service configuration options." -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-service-config-opts.xml:6(title) -msgid "Telemetry configuration options" -msgstr "" - -#: ./doc/config-reference/telemetry/section_telemetry-service-config-opts.xml:7(para) -msgid "The following tables provide a comprehensive list of the Telemetry configuration options." -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/cinder.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for OpenStack Block Storage" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:6(caption) ./doc/config-reference/conf-changes/manila.xml:6(caption) ./doc/config-reference/conf-changes/keystone.xml:6(caption) ./doc/config-reference/conf-changes/ironic.xml:6(caption) ./doc/config-reference/conf-changes/sahara.xml:6(caption) ./doc/config-reference/conf-changes/nova.xml:6(caption) ./doc/config-reference/conf-changes/neutron.xml:6(caption) ./doc/config-reference/conf-changes/ceilometer.xml:6(caption) ./doc/config-reference/conf-changes/trove.xml:6(caption) ./doc/config-reference/conf-changes/glance.xml:6(caption) ./doc/config-reference/conf-changes/heat.xml:6(caption) -msgid "New options" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:11(td) ./doc/config-reference/conf-changes/manila.xml:11(td) ./doc/config-reference/conf-changes/keystone.xml:11(td) ./doc/config-reference/conf-changes/ironic.xml:11(td) ./doc/config-reference/conf-changes/sahara.xml:11(td) ./doc/config-reference/conf-changes/nova.xml:11(td) ./doc/config-reference/conf-changes/neutron.xml:11(td) ./doc/config-reference/conf-changes/ceilometer.xml:11(td) ./doc/config-reference/conf-changes/trove.xml:11(td) ./doc/config-reference/conf-changes/glance.xml:11(td) ./doc/config-reference/conf-changes/heat.xml:11(td) -msgid "Option = default value" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:12(td) ./doc/config-reference/conf-changes/manila.xml:12(td) ./doc/config-reference/conf-changes/keystone.xml:12(td) ./doc/config-reference/conf-changes/ironic.xml:12(td) ./doc/config-reference/conf-changes/sahara.xml:12(td) ./doc/config-reference/conf-changes/nova.xml:12(td) ./doc/config-reference/conf-changes/neutron.xml:12(td) ./doc/config-reference/conf-changes/ceilometer.xml:12(td) ./doc/config-reference/conf-changes/trove.xml:12(td) ./doc/config-reference/conf-changes/glance.xml:12(td) ./doc/config-reference/conf-changes/heat.xml:12(td) -msgid "(Type) Help string" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:16(td) -msgid "[DEFAULT] allow_availability_zone_fallback = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:17(td) -msgid "(BoolOpt) If the requested Cinder availability zone is unavailable, fall back to the value of default_availability_zone, then storage_availability_zone, instead of failing." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:20(td) -msgid "[DEFAULT] backup_posix_path = $state_path/backup" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:21(td) -msgid "(StrOpt) Path specifying where to store backups." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:24(td) -msgid "[DEFAULT] backup_service_inithost_offload = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:25(td) -msgid "(BoolOpt) Offload pending backup delete during backup service startup." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:28(td) -msgid "[DEFAULT] backup_swift_ca_cert_file = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:29(td) -msgid "(StrOpt) Location of the CA certificate file to use for swift client requests." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:32(td) -msgid "[DEFAULT] blockbridge_api_host = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:33(td) -msgid "(StrOpt) IP address/hostname of Blockbridge API." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:36(td) -msgid "[DEFAULT] blockbridge_api_port = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:37(td) -msgid "(IntOpt) Override HTTPS port to connect to Blockbridge API server." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:40(td) -msgid "[DEFAULT] blockbridge_auth_password = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:41(td) -msgid "(StrOpt) Blockbridge API password (for auth scheme 'password')" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:44(td) -msgid "[DEFAULT] blockbridge_auth_scheme = token" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:45(td) -msgid "(StrOpt) Blockbridge API authentication scheme (token or password)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:48(td) -msgid "[DEFAULT] blockbridge_auth_token = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:49(td) -msgid "(StrOpt) Blockbridge API token (for auth scheme 'token')" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:52(td) -msgid "[DEFAULT] blockbridge_auth_user = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:53(td) -msgid "(StrOpt) Blockbridge API user (for auth scheme 'password')" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:56(td) -msgid "[DEFAULT] blockbridge_default_pool = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:57(td) -msgid "(StrOpt) Default pool name if unspecified." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:60(td) -msgid "[DEFAULT] blockbridge_pools = {'OpenStack': '+openstack'}" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:61(td) -msgid "(DictOpt) Defines the set of exposed pools and their associated backend query strings" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:64(td) -msgid "[DEFAULT] cb_auth_group = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:65(td) -msgid "(StrOpt) This corresponds to the discovery authentication group in CloudByte storage. Chap users are added to this group. Driver uses the first user found for this group. Default value is None." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:68(td) -msgid "[DEFAULT] cb_confirm_volume_delete_retries = 3" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:69(td) -msgid "(IntOpt) Will confirm a successful volume deletion in CloudByte storage by making this many number of attempts." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:72(td) -msgid "[DEFAULT] cb_confirm_volume_delete_retry_interval = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:73(td) -msgid "(IntOpt) A retry value in seconds. Will be used by the driver to check if volume deletion was successful in CloudByte storage." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:76(td) -msgid "[DEFAULT] cinder_internal_tenant_project_id = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:77(td) -msgid "(StrOpt) ID of the project which will be used as the Cinder internal tenant." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:80(td) -msgid "[DEFAULT] cinder_internal_tenant_user_id = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:81(td) -msgid "(StrOpt) ID of the user to be used in volume operations as the Cinder internal tenant." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:84(td) -msgid "[DEFAULT] dell_sc_verify_cert = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:85(td) -msgid "(BoolOpt) Enable HTTPS SC certificate verification." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:88(td) -msgid "[DEFAULT] dothill_api_protocol = https" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:89(td) -msgid "(StrOpt) DotHill API interface protocol." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:92(td) -msgid "[DEFAULT] dothill_backend_name = A" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:93(td) ./doc/config-reference/conf-changes/cinder.xml:173(td) ./doc/config-reference/conf-changes/cinder.xml:333(td) -msgid "(StrOpt) Pool or Vdisk name to use for volume creation." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:96(td) -msgid "[DEFAULT] dothill_backend_type = virtual" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:97(td) ./doc/config-reference/conf-changes/cinder.xml:177(td) -msgid "(StrOpt) linear (for Vdisk) or virtual (for Pool)." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:100(td) -msgid "[DEFAULT] dothill_iscsi_ips =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:101(td) ./doc/config-reference/conf-changes/cinder.xml:181(td) ./doc/config-reference/conf-changes/cinder.xml:341(td) -msgid "(ListOpt) List of comma-separated target iSCSI IP addresses." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:104(td) -msgid "[DEFAULT] dothill_verify_certificate = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:105(td) -msgid "(BoolOpt) Whether to verify DotHill array SSL certificate." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:108(td) -msgid "[DEFAULT] dothill_verify_certificate_path = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:109(td) -msgid "(StrOpt) DotHill array SSL certificate path." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:112(td) -msgid "[DEFAULT] drbdmanage_redundancy = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:113(td) -msgid "(StrOpt) Number of nodes that should replicate the data." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:116(td) -msgid "[DEFAULT] driver_ssl_cert_verify = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:117(td) -msgid "(BoolOpt) If set to True the http client will validate the SSL certificate of the backend endpoint." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:120(td) -msgid "[DEFAULT] enable_force_upload = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:121(td) -msgid "(BoolOpt) Enables the Force option on upload_to_image. This enables running upload_volume on in-use volumes for backends that support it." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:124(td) ./doc/config-reference/conf-changes/manila.xml:36(td) ./doc/config-reference/conf-changes/keystone.xml:16(td) ./doc/config-reference/conf-changes/ironic.xml:16(td) ./doc/config-reference/conf-changes/sahara.xml:24(td) ./doc/config-reference/conf-changes/nova.xml:20(td) ./doc/config-reference/conf-changes/neutron.xml:36(td) ./doc/config-reference/conf-changes/ceilometer.xml:28(td) ./doc/config-reference/conf-changes/trove.xml:16(td) ./doc/config-reference/conf-changes/glance.xml:20(td) ./doc/config-reference/conf-changes/heat.xml:20(td) -msgid "[DEFAULT] executor_thread_pool_size = 64" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:125(td) ./doc/config-reference/conf-changes/manila.xml:37(td) ./doc/config-reference/conf-changes/keystone.xml:17(td) ./doc/config-reference/conf-changes/ironic.xml:17(td) ./doc/config-reference/conf-changes/sahara.xml:25(td) ./doc/config-reference/conf-changes/nova.xml:21(td) ./doc/config-reference/conf-changes/neutron.xml:37(td) ./doc/config-reference/conf-changes/ceilometer.xml:29(td) ./doc/config-reference/conf-changes/trove.xml:17(td) ./doc/config-reference/conf-changes/glance.xml:21(td) ./doc/config-reference/conf-changes/heat.xml:21(td) -msgid "(IntOpt) Size of executor thread pool." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:128(td) -msgid "[DEFAULT] flashsystem_iscsi_portid = 0" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:129(td) -msgid "(IntOpt) Default iSCSI Port ID of FlashSystem. (Default port is 0.)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:132(td) -msgid "[DEFAULT] glusterfs_backup_mount_point = $state_path/backup_mount" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:133(td) -msgid "(StrOpt) Base dir containing mount point for gluster share." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:136(td) -msgid "[DEFAULT] glusterfs_backup_share = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:137(td) -msgid "(StrOpt) GlusterFS share in <hostname|ipv4addr|ipv6addr>:<gluster_vol_name> format. Eg: 1.2.3.4:backup_vol" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:140(td) -msgid "[DEFAULT] hgst_net = Net 1 (IPv4)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:141(td) -msgid "(StrOpt) Space network name to use for data transfer" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:144(td) -msgid "[DEFAULT] hgst_redundancy = 0" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:145(td) -msgid "(StrOpt) Should spaces be redundantly stored (1/0)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:148(td) -msgid "[DEFAULT] hgst_space_group = disk" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:149(td) -msgid "(StrOpt) Group to own created spaces" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:152(td) -msgid "[DEFAULT] hgst_space_mode = 0600" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:153(td) -msgid "(StrOpt) UNIX mode for created spaces" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:156(td) -msgid "[DEFAULT] hgst_space_user = root" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:157(td) -msgid "(StrOpt) User to own created spaces" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:160(td) -msgid "[DEFAULT] hgst_storage_servers = os:gbd0" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:161(td) -msgid "(StrOpt) Comma separated list of Space storage servers:devices. ex: os1_stor:gbd0,os2_stor:gbd0" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:164(td) -msgid "[DEFAULT] hitachi_horcm_resource_lock_timeout = 600" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:165(td) -msgid "(IntOpt) Timeout until a resource lock is released, in seconds. The value must be between 0 and 7200." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:168(td) -msgid "[DEFAULT] hpmsa_api_protocol = https" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:169(td) -msgid "(StrOpt) HPMSA API interface protocol." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:172(td) -msgid "[DEFAULT] hpmsa_backend_name = A" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:176(td) -msgid "[DEFAULT] hpmsa_backend_type = virtual" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:180(td) -msgid "[DEFAULT] hpmsa_iscsi_ips =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:184(td) -msgid "[DEFAULT] hpmsa_verify_certificate = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:185(td) -msgid "(BoolOpt) Whether to verify HPMSA array SSL certificate." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:188(td) -msgid "[DEFAULT] hpmsa_verify_certificate_path = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:189(td) -msgid "(StrOpt) HPMSA array SSL certificate path." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:192(td) -msgid "[DEFAULT] hpxp_async_copy_check_interval = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:193(td) -msgid "(IntOpt) Interval to check copy asynchronously" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:196(td) -msgid "[DEFAULT] hpxp_compute_target_ports = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:197(td) -msgid "(ListOpt) Target port names of compute node for host group or iSCSI target" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:200(td) -msgid "[DEFAULT] hpxp_copy_check_interval = 3" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:201(td) -msgid "(IntOpt) Interval to check copy" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:204(td) -msgid "[DEFAULT] hpxp_copy_speed = 3" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:205(td) -msgid "(IntOpt) Copy speed of storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:208(td) -msgid "[DEFAULT] hpxp_default_copy_method = FULL" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:209(td) -msgid "(StrOpt) Default copy method of storage system. There are two valid values: \"FULL\" specifies that a full copy; \"THIN\" specifies that a thin copy. Default value is \"FULL\"" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:212(td) -msgid "[DEFAULT] hpxp_group_request = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:213(td) -msgid "(BoolOpt) Request for creating host group or iSCSI target" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:216(td) -msgid "[DEFAULT] hpxp_horcm_add_conf = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:217(td) -msgid "(BoolOpt) Add to HORCM configuration" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:220(td) -msgid "[DEFAULT] hpxp_horcm_name_only_discovery = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:221(td) -msgid "(BoolOpt) Only discover a specific name of host group or iSCSI target" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:224(td) -msgid "[DEFAULT] hpxp_horcm_numbers = 200, 201" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:225(td) -msgid "(ListOpt) Instance numbers for HORCM" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:228(td) -msgid "[DEFAULT] hpxp_horcm_resource_name = meta_resource" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:229(td) -msgid "(StrOpt) Resource group name of storage system for HORCM" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:232(td) -msgid "[DEFAULT] hpxp_horcm_user = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:233(td) -msgid "(StrOpt) Username of storage system for HORCM" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:236(td) -msgid "[DEFAULT] hpxp_ldev_range = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:237(td) -msgid "(StrOpt) Logical device range of storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:240(td) -msgid "[DEFAULT] hpxp_pool = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:241(td) -msgid "(StrOpt) Pool of storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:244(td) -msgid "[DEFAULT] hpxp_storage_cli = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:245(td) -msgid "(StrOpt) Type of storage command line interface" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:248(td) -msgid "[DEFAULT] hpxp_storage_id = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:249(td) -msgid "(StrOpt) ID of storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:252(td) -msgid "[DEFAULT] hpxp_target_ports = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:253(td) -msgid "(ListOpt) Target port names for host group or iSCSI target" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:256(td) -msgid "[DEFAULT] hpxp_thin_pool = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:257(td) -msgid "(StrOpt) Thin pool of storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:260(td) -msgid "[DEFAULT] hpxp_zoning_request = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:261(td) -msgid "(BoolOpt) Request for FC Zone creating host group" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:264(td) -msgid "[DEFAULT] ignore_pool_full_threshold = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:265(td) -msgid "(BoolOpt) Force LUN creation even if the full threshold of pool is reached." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:268(td) -msgid "[DEFAULT] image_upload_use_cinder_backend = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:269(td) -msgid "(BoolOpt) If set to True, upload-to-image in raw format will create a cloned volume and register its location to the image service, instead of uploading the volume content. The cinder backend and locations support must be enabled in the image service, and glance_api_version must be set to 2." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:272(td) -msgid "[DEFAULT] image_upload_use_internal_tenant = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:273(td) -msgid "(BoolOpt) If set to True, the image volume created by upload-to-image will be placed in the internal tenant. Otherwise, the image volume is created in the current context's tenant." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:276(td) -msgid "[DEFAULT] image_volume_cache_enabled = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:277(td) -msgid "(BoolOpt) Enable the image volume cache for this backend." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:280(td) -msgid "[DEFAULT] image_volume_cache_max_count = 0" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:281(td) -msgid "(IntOpt) Max number of entries allowed in the image volume cache. 0 => unlimited." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:284(td) -msgid "[DEFAULT] image_volume_cache_max_size_gb = 0" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:285(td) -msgid "(IntOpt) Max size of the image volume cache for this backend in GB. 0 => unlimited." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:288(td) -msgid "[DEFAULT] infortrend_cli_max_retries = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:289(td) -msgid "(IntOpt) Maximum retry time for cli. Default is 5." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:292(td) -msgid "[DEFAULT] infortrend_cli_path = /opt/bin/Infortrend/raidcmd_ESDS10.jar" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:293(td) -msgid "(StrOpt) The Infortrend CLI absolute path. By default, it is at /opt/bin/Infortrend/raidcmd_ESDS10.jar" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:296(td) -msgid "[DEFAULT] infortrend_cli_timeout = 30" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:297(td) -msgid "(IntOpt) Default timeout for CLI copy operations in minutes. Support: migrate volume, create cloned volume and create volume from snapshot. By Default, it is 30 minutes." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:300(td) -msgid "[DEFAULT] infortrend_pools_name =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:301(td) -msgid "(StrOpt) Infortrend raid pool name list. It is separated with comma." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:304(td) -msgid "[DEFAULT] infortrend_provisioning = full" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:305(td) -msgid "(StrOpt) Let the volume use specific provisioning. By default, it is the full provisioning. The supported options are full or thin." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:308(td) -msgid "[DEFAULT] infortrend_slots_a_channels_id = 0,1,2,3,4,5,6,7" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:309(td) -msgid "(StrOpt) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma. By default, it is the channel 0~7." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:312(td) -msgid "[DEFAULT] infortrend_slots_b_channels_id = 0,1,2,3,4,5,6,7" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:313(td) -msgid "(StrOpt) Infortrend raid channel ID list on Slot B for OpenStack usage. It is separated with comma. By default, it is the channel 0~7." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:316(td) -msgid "[DEFAULT] infortrend_tiering = 0" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:317(td) -msgid "(StrOpt) Let the volume use specific tiering level. By default, it is the level 0. The supported levels are 0,2,3,4." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:320(td) -msgid "[DEFAULT] io_port_list = *" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:321(td) -msgid "(StrOpt) Comma separated iSCSI or FC ports to be used in Nova or Cinder." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:324(td) -msgid "[DEFAULT] iscsi_target_flags =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:325(td) -msgid "(StrOpt) Sets the target-specific flags for the iSCSI target. Only used for tgtadm to specify backing device flags using bsoflags option. The specified string is passed as is to the underlying tool." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:328(td) -msgid "[DEFAULT] lenovo_api_protocol = https" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:329(td) -msgid "(StrOpt) Lenovo api interface protocol." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:332(td) -msgid "[DEFAULT] lenovo_backend_name = A" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:336(td) -msgid "[DEFAULT] lenovo_backend_type = virtual" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:337(td) -msgid "(StrOpt) linear (for VDisk) or virtual (for Pool)." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:340(td) -msgid "[DEFAULT] lenovo_iscsi_ips =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:344(td) -msgid "[DEFAULT] lenovo_verify_certificate = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:345(td) -msgid "(BoolOpt) Whether to verify Lenovo array SSL certificate." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:348(td) -msgid "[DEFAULT] lenovo_verify_certificate_path = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:349(td) -msgid "(StrOpt) Lenovo array SSL certificate path." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:352(td) -msgid "[DEFAULT] managed_replication_target = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:353(td) -msgid "(BoolOpt) There are two types of target configurations managed (replicate to another configured backend) or unmanaged (replicate to a device not managed by Cinder)." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:356(td) -msgid "[DEFAULT] management_ips =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:357(td) -msgid "(StrOpt) List of Management IP addresses (separated by commas)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:360(td) -msgid "[DEFAULT] nas_volume_prov_type = thin" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:361(td) -msgid "(StrOpt) Provisioning type that will be used when creating volumes." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:364(td) -msgid "[DEFAULT] netapp_enable_multiattach = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:365(td) -msgid "(BoolOpt) This option specifies whether the driver should allow operations that require multiple attachments to a volume. An example would be live migration of servers that have volumes attached. When enabled, this backend is limited to 256 total volumes in order to guarantee volumes can be accessed by more than one host." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:368(td) -msgid "[DEFAULT] netapp_host_type = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:369(td) -msgid "(StrOpt) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:372(td) -msgid "[DEFAULT] netapp_lun_ostype = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:373(td) -msgid "(StrOpt) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:376(td) -msgid "[DEFAULT] netapp_lun_space_reservation = enabled" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:377(td) -msgid "(StrOpt) This option determines if storage space is reserved for LUN allocation. If enabled, LUNs are thick provisioned. If space reservation is disabled, storage space is allocated on demand." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:380(td) -msgid "[DEFAULT] netapp_pool_name_search_pattern = (.+)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:381(td) -msgid "(StrOpt) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:384(td) -msgid "[DEFAULT] os_privileged_user_auth_url = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:385(td) -msgid "(StrOpt) Auth URL associated with the OpenStack privileged account." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:388(td) ./doc/config-reference/conf-changes/manila.xml:132(td) ./doc/config-reference/conf-changes/keystone.xml:24(td) ./doc/config-reference/conf-changes/ironic.xml:20(td) ./doc/config-reference/conf-changes/sahara.xml:36(td) ./doc/config-reference/conf-changes/nova.xml:28(td) ./doc/config-reference/conf-changes/neutron.xml:48(td) ./doc/config-reference/conf-changes/ceilometer.xml:36(td) ./doc/config-reference/conf-changes/trove.xml:28(td) ./doc/config-reference/conf-changes/glance.xml:28(td) ./doc/config-reference/conf-changes/heat.xml:28(td) -msgid "[DEFAULT] password =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:389(td) ./doc/config-reference/conf-changes/manila.xml:133(td) ./doc/config-reference/conf-changes/keystone.xml:25(td) ./doc/config-reference/conf-changes/ironic.xml:21(td) ./doc/config-reference/conf-changes/sahara.xml:37(td) ./doc/config-reference/conf-changes/nova.xml:29(td) ./doc/config-reference/conf-changes/neutron.xml:49(td) ./doc/config-reference/conf-changes/ceilometer.xml:37(td) ./doc/config-reference/conf-changes/trove.xml:29(td) ./doc/config-reference/conf-changes/glance.xml:29(td) ./doc/config-reference/conf-changes/heat.xml:29(td) -msgid "(StrOpt) Password for Redis server (optional)." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:392(td) -msgid "[DEFAULT] per_volume_size_limit = -1" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:393(td) -msgid "(IntOpt) Max size allowed per volume, in gigabytes" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:396(td) ./doc/config-reference/conf-changes/manila.xml:144(td) ./doc/config-reference/conf-changes/keystone.xml:28(td) ./doc/config-reference/conf-changes/ironic.xml:24(td) ./doc/config-reference/conf-changes/sahara.xml:40(td) ./doc/config-reference/conf-changes/nova.xml:32(td) ./doc/config-reference/conf-changes/neutron.xml:60(td) ./doc/config-reference/conf-changes/ceilometer.xml:44(td) ./doc/config-reference/conf-changes/trove.xml:32(td) ./doc/config-reference/conf-changes/glance.xml:32(td) ./doc/config-reference/conf-changes/heat.xml:32(td) -msgid "[DEFAULT] port = 6379" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:397(td) ./doc/config-reference/conf-changes/manila.xml:145(td) ./doc/config-reference/conf-changes/keystone.xml:29(td) ./doc/config-reference/conf-changes/ironic.xml:25(td) ./doc/config-reference/conf-changes/sahara.xml:41(td) ./doc/config-reference/conf-changes/nova.xml:33(td) ./doc/config-reference/conf-changes/neutron.xml:61(td) ./doc/config-reference/conf-changes/ceilometer.xml:45(td) ./doc/config-reference/conf-changes/trove.xml:33(td) ./doc/config-reference/conf-changes/glance.xml:33(td) ./doc/config-reference/conf-changes/heat.xml:33(td) -msgid "(IntOpt) Use this port to connect to redis host." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:400(td) -msgid "[DEFAULT] query_volume_filters = name, status, metadata, availability_zone" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:401(td) -msgid "(ListOpt) Volume filter options which non-admin user could use to query volumes. Default values are: ['name', 'status', 'metadata', 'availability_zone']" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:404(td) -msgid "[DEFAULT] rados_connection_interval = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:405(td) -msgid "(IntOpt) Interval value (in seconds) between connection retries to ceph cluster." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:408(td) -msgid "[DEFAULT] rados_connection_retries = 3" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:409(td) -msgid "(IntOpt) Number of retries if connection to ceph cluster failed." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:412(td) -msgid "[DEFAULT] rbd_cluster_name = ceph" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:413(td) -msgid "(StrOpt) The name of ceph cluster" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:416(td) -msgid "[DEFAULT] replication_devices = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:417(td) -msgid "(ListOpt) List of k/v pairs representing a replication target for this backend device. For unmanaged the format is: {'key-1'='val1' 'key-2'='val2'...},{...} and for managed devices its simply a list of valid configured backend_names that the driver supports replicating to: backend-a,bakcend-b..." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:420(td) ./doc/config-reference/conf-changes/manila.xml:148(td) ./doc/config-reference/conf-changes/keystone.xml:32(td) ./doc/config-reference/conf-changes/ironic.xml:28(td) ./doc/config-reference/conf-changes/sahara.xml:44(td) ./doc/config-reference/conf-changes/nova.xml:36(td) ./doc/config-reference/conf-changes/neutron.xml:68(td) ./doc/config-reference/conf-changes/ceilometer.xml:56(td) ./doc/config-reference/conf-changes/trove.xml:36(td) ./doc/config-reference/conf-changes/glance.xml:36(td) ./doc/config-reference/conf-changes/heat.xml:36(td) -msgid "[DEFAULT] rpc_conn_pool_size = 30" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:421(td) ./doc/config-reference/conf-changes/manila.xml:149(td) ./doc/config-reference/conf-changes/keystone.xml:33(td) ./doc/config-reference/conf-changes/ironic.xml:29(td) ./doc/config-reference/conf-changes/sahara.xml:45(td) ./doc/config-reference/conf-changes/nova.xml:37(td) ./doc/config-reference/conf-changes/neutron.xml:69(td) ./doc/config-reference/conf-changes/ceilometer.xml:57(td) ./doc/config-reference/conf-changes/trove.xml:37(td) ./doc/config-reference/conf-changes/glance.xml:37(td) ./doc/config-reference/conf-changes/heat.xml:37(td) -msgid "(IntOpt) Size of RPC connection pool." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:424(td) ./doc/config-reference/conf-changes/manila.xml:152(td) ./doc/config-reference/conf-changes/keystone.xml:36(td) ./doc/config-reference/conf-changes/ironic.xml:32(td) ./doc/config-reference/conf-changes/sahara.xml:48(td) ./doc/config-reference/conf-changes/nova.xml:40(td) ./doc/config-reference/conf-changes/neutron.xml:72(td) ./doc/config-reference/conf-changes/ceilometer.xml:60(td) ./doc/config-reference/conf-changes/trove.xml:40(td) ./doc/config-reference/conf-changes/glance.xml:40(td) ./doc/config-reference/conf-changes/heat.xml:40(td) -msgid "[DEFAULT] rpc_poll_timeout = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:425(td) ./doc/config-reference/conf-changes/manila.xml:153(td) ./doc/config-reference/conf-changes/keystone.xml:37(td) ./doc/config-reference/conf-changes/ironic.xml:33(td) ./doc/config-reference/conf-changes/sahara.xml:49(td) ./doc/config-reference/conf-changes/nova.xml:41(td) ./doc/config-reference/conf-changes/neutron.xml:73(td) ./doc/config-reference/conf-changes/ceilometer.xml:61(td) ./doc/config-reference/conf-changes/trove.xml:41(td) ./doc/config-reference/conf-changes/glance.xml:41(td) ./doc/config-reference/conf-changes/heat.xml:41(td) -msgid "(IntOpt) The default number of seconds that poll should wait. Poll raises timeout exception when timeout expired." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:428(td) ./doc/config-reference/conf-changes/manila.xml:156(td) ./doc/config-reference/conf-changes/keystone.xml:40(td) ./doc/config-reference/conf-changes/ironic.xml:36(td) ./doc/config-reference/conf-changes/sahara.xml:52(td) ./doc/config-reference/conf-changes/nova.xml:44(td) ./doc/config-reference/conf-changes/neutron.xml:76(td) ./doc/config-reference/conf-changes/ceilometer.xml:64(td) ./doc/config-reference/conf-changes/trove.xml:44(td) ./doc/config-reference/conf-changes/glance.xml:44(td) ./doc/config-reference/conf-changes/heat.xml:44(td) -msgid "[DEFAULT] rpc_zmq_all_req_rep = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:429(td) ./doc/config-reference/conf-changes/manila.xml:157(td) ./doc/config-reference/conf-changes/keystone.xml:41(td) ./doc/config-reference/conf-changes/ironic.xml:37(td) ./doc/config-reference/conf-changes/sahara.xml:53(td) ./doc/config-reference/conf-changes/nova.xml:45(td) ./doc/config-reference/conf-changes/neutron.xml:77(td) ./doc/config-reference/conf-changes/ceilometer.xml:65(td) ./doc/config-reference/conf-changes/trove.xml:45(td) ./doc/config-reference/conf-changes/glance.xml:45(td) ./doc/config-reference/conf-changes/heat.xml:45(td) -msgid "(BoolOpt) Use REQ/REP pattern for all methods CALL/CAST/FANOUT." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:432(td) ./doc/config-reference/conf-changes/manila.xml:160(td) ./doc/config-reference/conf-changes/keystone.xml:44(td) ./doc/config-reference/conf-changes/ironic.xml:40(td) ./doc/config-reference/conf-changes/sahara.xml:56(td) ./doc/config-reference/conf-changes/nova.xml:48(td) ./doc/config-reference/conf-changes/neutron.xml:80(td) ./doc/config-reference/conf-changes/ceilometer.xml:68(td) ./doc/config-reference/conf-changes/trove.xml:48(td) ./doc/config-reference/conf-changes/glance.xml:48(td) ./doc/config-reference/conf-changes/heat.xml:48(td) -msgid "[DEFAULT] rpc_zmq_concurrency = eventlet" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:433(td) ./doc/config-reference/conf-changes/manila.xml:161(td) ./doc/config-reference/conf-changes/keystone.xml:45(td) ./doc/config-reference/conf-changes/ironic.xml:41(td) ./doc/config-reference/conf-changes/sahara.xml:57(td) ./doc/config-reference/conf-changes/nova.xml:49(td) ./doc/config-reference/conf-changes/neutron.xml:81(td) ./doc/config-reference/conf-changes/ceilometer.xml:69(td) ./doc/config-reference/conf-changes/trove.xml:49(td) ./doc/config-reference/conf-changes/glance.xml:49(td) ./doc/config-reference/conf-changes/heat.xml:49(td) -msgid "(StrOpt) Type of concurrency used. Either \"native\" or \"eventlet\"" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:436(td) -msgid "[DEFAULT] sf_enable_volume_mapping = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:437(td) -msgid "(BoolOpt) Create an internal mapping of volume IDs and account. Optimizes lookups and performance at the expense of memory, very large deployments may want to consider setting to False." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:440(td) -msgid "[DEFAULT] sheepdog_store_address = 127.0.0.1" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:441(td) -msgid "(StrOpt) IP address of sheep daemon." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:444(td) -msgid "[DEFAULT] sheepdog_store_port = 7000" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:445(td) -msgid "(IntOpt) Port of sheep daemon." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:448(td) -msgid "[DEFAULT] sio_force_delete = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:449(td) -msgid "(BoolOpt) Whether to allow force delete." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:452(td) -msgid "[DEFAULT] sio_protection_domain_id = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:453(td) -msgid "(StrOpt) Protection domain id." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:456(td) -msgid "[DEFAULT] sio_protection_domain_name = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:457(td) -msgid "(StrOpt) Protection domain name." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:460(td) -msgid "[DEFAULT] sio_rest_server_port = 443" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:461(td) -msgid "(StrOpt) REST server port." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:464(td) -msgid "[DEFAULT] sio_round_volume_capacity = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:465(td) -msgid "(BoolOpt) Whether to round volume capacity." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:468(td) -msgid "[DEFAULT] sio_server_certificate_path = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:469(td) -msgid "(StrOpt) Server certificate path." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:472(td) -msgid "[DEFAULT] sio_storage_pool_id = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:473(td) -msgid "(StrOpt) Storage pool id." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:476(td) -msgid "[DEFAULT] sio_storage_pool_name = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:477(td) ./doc/config-reference/conf-changes/cinder.xml:613(td) -msgid "(StrOpt) Storage pool name." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:480(td) -msgid "[DEFAULT] sio_storage_pools = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:481(td) -msgid "(StrOpt) Storage pools." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:484(td) -msgid "[DEFAULT] sio_unmap_volume_before_deletion = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:485(td) -msgid "(BoolOpt) Whether to unmap volume before deletion." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:488(td) -msgid "[DEFAULT] sio_verify_server_certificate = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:489(td) -msgid "(BoolOpt) Whether to verify server certificate." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:492(td) -msgid "[DEFAULT] storage_vnx_pool_names = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:493(td) -msgid "(StrOpt) Comma-separated list of storage pool names to be used." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:496(td) -msgid "[DEFAULT] tintri_api_version = v310" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:497(td) -msgid "(StrOpt) API version for the storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:500(td) -msgid "[DEFAULT] tintri_server_hostname = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:501(td) -msgid "(StrOpt) The hostname (or IP address) for the storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:504(td) -msgid "[DEFAULT] tintri_server_password = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:505(td) -msgid "(StrOpt) Password for the storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:508(td) -msgid "[DEFAULT] tintri_server_username = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:509(td) -msgid "(StrOpt) User name for the storage system" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:512(td) -msgid "[DEFAULT] trace_flags = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:513(td) -msgid "(ListOpt) List of options that control which trace info is written to the DEBUG log level to assist developers. Valid values are method and api." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:516(td) -msgid "[DEFAULT] violin_request_timeout = 300" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:517(td) -msgid "(IntOpt) Global backend request timeout, in seconds." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:520(td) -msgid "[DEFAULT] vmware_ca_file = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:521(td) ./doc/config-reference/conf-changes/ceilometer.xml:245(td) -msgid "(StrOpt) CA bundle file to use in verifying the vCenter server certificate." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:524(td) -msgid "[DEFAULT] vmware_cluster_name = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:525(td) -msgid "(MultiStrOpt) Name of a vCenter compute cluster where volumes should be created." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:528(td) -msgid "[DEFAULT] vmware_insecure = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:529(td) -msgid "(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if \"vmware_ca_file\" is set." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:532(td) -msgid "[DEFAULT] vzstorage_mount_options = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:533(td) -msgid "(ListOpt) Mount options passed to the vzstorage client. See section of the pstorage-mount man page for details." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:536(td) -msgid "[DEFAULT] vzstorage_mount_point_base = $state_path/mnt" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:537(td) -msgid "(StrOpt) Base dir containing mount points for vzstorage shares." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:540(td) -msgid "[DEFAULT] vzstorage_shares_config = /etc/cinder/vzstorage_shares" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:541(td) -msgid "(StrOpt) File with the list of available vzstorage shares." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:544(td) -msgid "[DEFAULT] vzstorage_sparsed_volumes = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:545(td) -msgid "(BoolOpt) Create volumes as sparsed files which take no space rather than regular files when using raw format, in which case volume creation takes lot of time." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:548(td) -msgid "[DEFAULT] vzstorage_used_ratio = 0.95" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:549(td) -msgid "(FloatOpt) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:552(td) ./doc/config-reference/conf-changes/manila.xml:172(td) ./doc/config-reference/conf-changes/keystone.xml:48(td) ./doc/config-reference/conf-changes/ironic.xml:44(td) ./doc/config-reference/conf-changes/sahara.xml:60(td) ./doc/config-reference/conf-changes/nova.xml:64(td) ./doc/config-reference/conf-changes/neutron.xml:88(td) ./doc/config-reference/conf-changes/ceilometer.xml:72(td) ./doc/config-reference/conf-changes/trove.xml:56(td) ./doc/config-reference/conf-changes/glance.xml:56(td) ./doc/config-reference/conf-changes/heat.xml:52(td) -msgid "[DEFAULT] watch_log_file = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:553(td) ./doc/config-reference/conf-changes/manila.xml:173(td) ./doc/config-reference/conf-changes/keystone.xml:49(td) ./doc/config-reference/conf-changes/ironic.xml:45(td) ./doc/config-reference/conf-changes/sahara.xml:61(td) ./doc/config-reference/conf-changes/nova.xml:65(td) ./doc/config-reference/conf-changes/neutron.xml:89(td) ./doc/config-reference/conf-changes/ceilometer.xml:73(td) ./doc/config-reference/conf-changes/trove.xml:57(td) ./doc/config-reference/conf-changes/glance.xml:57(td) ./doc/config-reference/conf-changes/heat.xml:53(td) -msgid "(BoolOpt) (Optional) Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log-file option is specified and Linux platform is used. This option is ignored if log_config_append is set." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:556(td) -msgid "[DEFAULT] xtremio_array_busy_retry_count = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:557(td) -msgid "(IntOpt) Number of retries in case array is busy" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:560(td) -msgid "[DEFAULT] xtremio_array_busy_retry_interval = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:561(td) -msgid "(IntOpt) Interval between retries in case array is busy" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:564(td) -msgid "[DEFAULT] zfssa_cache_directory = os-cinder-cache" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:565(td) -msgid "(StrOpt) Name of directory inside zfssa_nfs_share where cache volumes are stored." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:568(td) -msgid "[DEFAULT] zfssa_cache_project = os-cinder-cache" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:569(td) -msgid "(StrOpt) Name of ZFSSA project where cache volumes are stored." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:572(td) -msgid "[DEFAULT] zfssa_enable_local_cache = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:573(td) -msgid "(BoolOpt) Flag to enable local caching: True, False." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:576(td) -msgid "[DEFAULT] zfssa_initiator =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:577(td) -msgid "(StrOpt) iSCSI initiator IQNs. (comma separated)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:580(td) -msgid "[DEFAULT] zfssa_initiator_config =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:581(td) -msgid "(StrOpt) iSCSI initiators configuration." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:584(td) -msgid "[DEFAULT] zfssa_initiator_group =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:585(td) -msgid "(StrOpt) iSCSI initiator group." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:588(td) -msgid "[DEFAULT] zfssa_initiator_password =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:589(td) -msgid "(StrOpt) Secret of the iSCSI initiator CHAP user." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:592(td) -msgid "[DEFAULT] zfssa_initiator_user =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:593(td) -msgid "(StrOpt) iSCSI initiator CHAP user (name)." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:596(td) -msgid "[DEFAULT] zfssa_lun_compression = off" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:597(td) -msgid "(StrOpt) Data compression." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:600(td) -msgid "[DEFAULT] zfssa_lun_logbias = latency" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:601(td) -msgid "(StrOpt) Synchronous write bias." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:604(td) -msgid "[DEFAULT] zfssa_lun_sparse = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:605(td) -msgid "(BoolOpt) Flag to enable sparse (thin-provisioned): True, False." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:608(td) -msgid "[DEFAULT] zfssa_lun_volblocksize = 8k" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:609(td) -msgid "(StrOpt) Block size." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:612(td) -msgid "[DEFAULT] zfssa_pool = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:616(td) -msgid "[DEFAULT] zfssa_project = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:617(td) -msgid "(StrOpt) Project name." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:620(td) -msgid "[DEFAULT] zfssa_replication_ip =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:621(td) -msgid "(StrOpt) IP address used for replication data. (maybe the same as data ip)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:624(td) -msgid "[DEFAULT] zfssa_target_group = tgt-grp" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:625(td) -msgid "(StrOpt) iSCSI target group name." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:628(td) -msgid "[DEFAULT] zfssa_target_interfaces = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:629(td) -msgid "(StrOpt) Network interfaces of iSCSI targets. (comma separated)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:632(td) -msgid "[DEFAULT] zfssa_target_password =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:633(td) -msgid "(StrOpt) Secret of the iSCSI target CHAP user." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:636(td) -msgid "[DEFAULT] zfssa_target_portal = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:637(td) -msgid "(StrOpt) iSCSI target portal (Data-IP:Port, w.x.y.z:3260)." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:640(td) -msgid "[DEFAULT] zfssa_target_user =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:641(td) -msgid "(StrOpt) iSCSI target CHAP user (name)." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:644(td) ./doc/config-reference/conf-changes/manila.xml:208(td) ./doc/config-reference/conf-changes/keystone.xml:52(td) ./doc/config-reference/conf-changes/ironic.xml:48(td) ./doc/config-reference/conf-changes/sahara.xml:64(td) ./doc/config-reference/conf-changes/nova.xml:68(td) ./doc/config-reference/conf-changes/neutron.xml:92(td) ./doc/config-reference/conf-changes/ceilometer.xml:76(td) ./doc/config-reference/conf-changes/trove.xml:60(td) ./doc/config-reference/conf-changes/glance.xml:60(td) ./doc/config-reference/conf-changes/heat.xml:56(td) -msgid "[DEFAULT] zmq_use_broker = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:645(td) ./doc/config-reference/conf-changes/manila.xml:209(td) ./doc/config-reference/conf-changes/keystone.xml:53(td) ./doc/config-reference/conf-changes/ironic.xml:49(td) ./doc/config-reference/conf-changes/sahara.xml:65(td) ./doc/config-reference/conf-changes/nova.xml:69(td) ./doc/config-reference/conf-changes/neutron.xml:93(td) ./doc/config-reference/conf-changes/ceilometer.xml:77(td) ./doc/config-reference/conf-changes/trove.xml:61(td) ./doc/config-reference/conf-changes/glance.xml:61(td) ./doc/config-reference/conf-changes/heat.xml:57(td) -msgid "(BoolOpt) Shows whether zmq-messaging uses broker or not." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:648(td) ./doc/config-reference/conf-changes/manila.xml:212(td) ./doc/config-reference/conf-changes/keystone.xml:56(td) ./doc/config-reference/conf-changes/ironic.xml:92(td) ./doc/config-reference/conf-changes/sahara.xml:72(td) ./doc/config-reference/conf-changes/nova.xml:72(td) ./doc/config-reference/conf-changes/neutron.xml:152(td) ./doc/config-reference/conf-changes/ceilometer.xml:112(td) ./doc/config-reference/conf-changes/glance.xml:64(td) ./doc/config-reference/conf-changes/heat.xml:124(td) -msgid "[cors] allow_credentials = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:649(td) ./doc/config-reference/conf-changes/cinder.xml:673(td) ./doc/config-reference/conf-changes/manila.xml:213(td) ./doc/config-reference/conf-changes/manila.xml:237(td) ./doc/config-reference/conf-changes/keystone.xml:57(td) ./doc/config-reference/conf-changes/keystone.xml:81(td) ./doc/config-reference/conf-changes/ironic.xml:93(td) ./doc/config-reference/conf-changes/ironic.xml:117(td) ./doc/config-reference/conf-changes/sahara.xml:73(td) ./doc/config-reference/conf-changes/sahara.xml:97(td) ./doc/config-reference/conf-changes/nova.xml:73(td) ./doc/config-reference/conf-changes/nova.xml:97(td) ./doc/config-reference/conf-changes/neutron.xml:153(td) ./doc/config-reference/conf-changes/neutron.xml:177(td) ./doc/config-reference/conf-changes/ceilometer.xml:113(td) ./doc/config-reference/conf-changes/ceilometer.xml:137(td) ./doc/config-reference/conf-changes/glance.xml:65(td) ./doc/config-reference/conf-changes/glance.xml:89(td) ./doc/config-reference/conf-changes/heat.xml:125(td) ./doc/config-reference/conf-changes/heat.xml:149(td) -msgid "(BoolOpt) Indicate that the actual request can include user credentials" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:652(td) ./doc/config-reference/conf-changes/manila.xml:216(td) ./doc/config-reference/conf-changes/keystone.xml:60(td) ./doc/config-reference/conf-changes/ironic.xml:96(td) ./doc/config-reference/conf-changes/sahara.xml:76(td) ./doc/config-reference/conf-changes/nova.xml:76(td) ./doc/config-reference/conf-changes/neutron.xml:156(td) ./doc/config-reference/conf-changes/ceilometer.xml:116(td) ./doc/config-reference/conf-changes/glance.xml:68(td) ./doc/config-reference/conf-changes/heat.xml:128(td) -msgid "[cors] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:653(td) ./doc/config-reference/conf-changes/cinder.xml:677(td) ./doc/config-reference/conf-changes/manila.xml:217(td) ./doc/config-reference/conf-changes/manila.xml:241(td) ./doc/config-reference/conf-changes/keystone.xml:61(td) ./doc/config-reference/conf-changes/keystone.xml:85(td) ./doc/config-reference/conf-changes/ironic.xml:97(td) ./doc/config-reference/conf-changes/ironic.xml:121(td) ./doc/config-reference/conf-changes/sahara.xml:77(td) ./doc/config-reference/conf-changes/sahara.xml:101(td) ./doc/config-reference/conf-changes/nova.xml:77(td) ./doc/config-reference/conf-changes/nova.xml:101(td) ./doc/config-reference/conf-changes/neutron.xml:157(td) ./doc/config-reference/conf-changes/neutron.xml:181(td) ./doc/config-reference/conf-changes/ceilometer.xml:117(td) ./doc/config-reference/conf-changes/ceilometer.xml:141(td) ./doc/config-reference/conf-changes/glance.xml:69(td) ./doc/config-reference/conf-changes/glance.xml:93(td) ./doc/config-reference/conf-changes/heat.xml:129(td) ./doc/config-reference/conf-changes/heat.xml:153(td) -msgid "(ListOpt) Indicate which header field names may be used during the actual request." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:656(td) ./doc/config-reference/conf-changes/manila.xml:220(td) ./doc/config-reference/conf-changes/keystone.xml:64(td) ./doc/config-reference/conf-changes/ironic.xml:100(td) ./doc/config-reference/conf-changes/sahara.xml:80(td) ./doc/config-reference/conf-changes/nova.xml:80(td) ./doc/config-reference/conf-changes/neutron.xml:160(td) ./doc/config-reference/conf-changes/ceilometer.xml:120(td) ./doc/config-reference/conf-changes/glance.xml:72(td) ./doc/config-reference/conf-changes/heat.xml:132(td) -msgid "[cors] allow_methods = GET, POST, PUT, DELETE, OPTIONS" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:657(td) ./doc/config-reference/conf-changes/cinder.xml:681(td) ./doc/config-reference/conf-changes/manila.xml:221(td) ./doc/config-reference/conf-changes/manila.xml:245(td) ./doc/config-reference/conf-changes/keystone.xml:65(td) ./doc/config-reference/conf-changes/keystone.xml:89(td) ./doc/config-reference/conf-changes/ironic.xml:101(td) ./doc/config-reference/conf-changes/ironic.xml:125(td) ./doc/config-reference/conf-changes/sahara.xml:81(td) ./doc/config-reference/conf-changes/sahara.xml:105(td) ./doc/config-reference/conf-changes/nova.xml:81(td) ./doc/config-reference/conf-changes/nova.xml:105(td) ./doc/config-reference/conf-changes/neutron.xml:161(td) ./doc/config-reference/conf-changes/neutron.xml:185(td) ./doc/config-reference/conf-changes/ceilometer.xml:121(td) ./doc/config-reference/conf-changes/ceilometer.xml:145(td) ./doc/config-reference/conf-changes/glance.xml:73(td) ./doc/config-reference/conf-changes/glance.xml:97(td) ./doc/config-reference/conf-changes/heat.xml:133(td) ./doc/config-reference/conf-changes/heat.xml:157(td) -msgid "(ListOpt) Indicate which methods can be used during the actual request." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:660(td) ./doc/config-reference/conf-changes/manila.xml:224(td) ./doc/config-reference/conf-changes/keystone.xml:68(td) ./doc/config-reference/conf-changes/ironic.xml:104(td) ./doc/config-reference/conf-changes/sahara.xml:84(td) ./doc/config-reference/conf-changes/nova.xml:84(td) ./doc/config-reference/conf-changes/neutron.xml:164(td) ./doc/config-reference/conf-changes/ceilometer.xml:124(td) ./doc/config-reference/conf-changes/glance.xml:76(td) ./doc/config-reference/conf-changes/heat.xml:136(td) -msgid "[cors] allowed_origin = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:661(td) ./doc/config-reference/conf-changes/cinder.xml:685(td) ./doc/config-reference/conf-changes/manila.xml:225(td) ./doc/config-reference/conf-changes/manila.xml:249(td) ./doc/config-reference/conf-changes/keystone.xml:69(td) ./doc/config-reference/conf-changes/keystone.xml:93(td) ./doc/config-reference/conf-changes/ironic.xml:105(td) ./doc/config-reference/conf-changes/ironic.xml:129(td) ./doc/config-reference/conf-changes/sahara.xml:85(td) ./doc/config-reference/conf-changes/sahara.xml:109(td) ./doc/config-reference/conf-changes/nova.xml:85(td) ./doc/config-reference/conf-changes/nova.xml:109(td) ./doc/config-reference/conf-changes/neutron.xml:165(td) ./doc/config-reference/conf-changes/neutron.xml:189(td) ./doc/config-reference/conf-changes/ceilometer.xml:125(td) ./doc/config-reference/conf-changes/ceilometer.xml:149(td) ./doc/config-reference/conf-changes/glance.xml:77(td) ./doc/config-reference/conf-changes/glance.xml:101(td) ./doc/config-reference/conf-changes/heat.xml:137(td) ./doc/config-reference/conf-changes/heat.xml:161(td) -msgid "(StrOpt) Indicate whether this resource may be shared with the domain received in the requests \"origin\" header." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:664(td) ./doc/config-reference/conf-changes/manila.xml:228(td) ./doc/config-reference/conf-changes/keystone.xml:72(td) ./doc/config-reference/conf-changes/ironic.xml:108(td) ./doc/config-reference/conf-changes/sahara.xml:88(td) ./doc/config-reference/conf-changes/nova.xml:88(td) ./doc/config-reference/conf-changes/neutron.xml:168(td) ./doc/config-reference/conf-changes/ceilometer.xml:128(td) ./doc/config-reference/conf-changes/glance.xml:80(td) ./doc/config-reference/conf-changes/heat.xml:140(td) -msgid "[cors] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:665(td) ./doc/config-reference/conf-changes/cinder.xml:689(td) ./doc/config-reference/conf-changes/manila.xml:229(td) ./doc/config-reference/conf-changes/manila.xml:253(td) ./doc/config-reference/conf-changes/keystone.xml:73(td) ./doc/config-reference/conf-changes/keystone.xml:97(td) ./doc/config-reference/conf-changes/ironic.xml:109(td) ./doc/config-reference/conf-changes/ironic.xml:133(td) ./doc/config-reference/conf-changes/sahara.xml:89(td) ./doc/config-reference/conf-changes/sahara.xml:113(td) ./doc/config-reference/conf-changes/nova.xml:89(td) ./doc/config-reference/conf-changes/nova.xml:113(td) ./doc/config-reference/conf-changes/neutron.xml:169(td) ./doc/config-reference/conf-changes/neutron.xml:193(td) ./doc/config-reference/conf-changes/ceilometer.xml:129(td) ./doc/config-reference/conf-changes/ceilometer.xml:153(td) ./doc/config-reference/conf-changes/glance.xml:81(td) ./doc/config-reference/conf-changes/glance.xml:105(td) ./doc/config-reference/conf-changes/heat.xml:141(td) ./doc/config-reference/conf-changes/heat.xml:165(td) -msgid "(ListOpt) Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:668(td) ./doc/config-reference/conf-changes/manila.xml:232(td) ./doc/config-reference/conf-changes/keystone.xml:76(td) ./doc/config-reference/conf-changes/ironic.xml:112(td) ./doc/config-reference/conf-changes/sahara.xml:92(td) ./doc/config-reference/conf-changes/nova.xml:92(td) ./doc/config-reference/conf-changes/neutron.xml:172(td) ./doc/config-reference/conf-changes/ceilometer.xml:132(td) ./doc/config-reference/conf-changes/glance.xml:84(td) ./doc/config-reference/conf-changes/heat.xml:144(td) -msgid "[cors] max_age = 3600" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:669(td) ./doc/config-reference/conf-changes/cinder.xml:693(td) ./doc/config-reference/conf-changes/manila.xml:233(td) ./doc/config-reference/conf-changes/manila.xml:257(td) ./doc/config-reference/conf-changes/keystone.xml:77(td) ./doc/config-reference/conf-changes/keystone.xml:101(td) ./doc/config-reference/conf-changes/ironic.xml:113(td) ./doc/config-reference/conf-changes/ironic.xml:137(td) ./doc/config-reference/conf-changes/sahara.xml:93(td) ./doc/config-reference/conf-changes/sahara.xml:117(td) ./doc/config-reference/conf-changes/nova.xml:93(td) ./doc/config-reference/conf-changes/nova.xml:117(td) ./doc/config-reference/conf-changes/neutron.xml:173(td) ./doc/config-reference/conf-changes/neutron.xml:197(td) ./doc/config-reference/conf-changes/ceilometer.xml:133(td) ./doc/config-reference/conf-changes/ceilometer.xml:157(td) ./doc/config-reference/conf-changes/glance.xml:85(td) ./doc/config-reference/conf-changes/glance.xml:109(td) ./doc/config-reference/conf-changes/heat.xml:145(td) ./doc/config-reference/conf-changes/heat.xml:169(td) -msgid "(IntOpt) Maximum cache age of CORS preflight requests." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:672(td) ./doc/config-reference/conf-changes/manila.xml:236(td) ./doc/config-reference/conf-changes/keystone.xml:80(td) ./doc/config-reference/conf-changes/ironic.xml:116(td) ./doc/config-reference/conf-changes/sahara.xml:96(td) ./doc/config-reference/conf-changes/nova.xml:96(td) ./doc/config-reference/conf-changes/neutron.xml:176(td) ./doc/config-reference/conf-changes/ceilometer.xml:136(td) ./doc/config-reference/conf-changes/glance.xml:88(td) ./doc/config-reference/conf-changes/heat.xml:148(td) -msgid "[cors.subdomain] allow_credentials = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:676(td) ./doc/config-reference/conf-changes/manila.xml:240(td) ./doc/config-reference/conf-changes/keystone.xml:84(td) ./doc/config-reference/conf-changes/ironic.xml:120(td) ./doc/config-reference/conf-changes/sahara.xml:100(td) ./doc/config-reference/conf-changes/nova.xml:100(td) ./doc/config-reference/conf-changes/neutron.xml:180(td) ./doc/config-reference/conf-changes/ceilometer.xml:140(td) ./doc/config-reference/conf-changes/glance.xml:92(td) ./doc/config-reference/conf-changes/heat.xml:152(td) -msgid "[cors.subdomain] allow_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:680(td) ./doc/config-reference/conf-changes/manila.xml:244(td) ./doc/config-reference/conf-changes/keystone.xml:88(td) ./doc/config-reference/conf-changes/ironic.xml:124(td) ./doc/config-reference/conf-changes/sahara.xml:104(td) ./doc/config-reference/conf-changes/nova.xml:104(td) ./doc/config-reference/conf-changes/neutron.xml:184(td) ./doc/config-reference/conf-changes/ceilometer.xml:144(td) ./doc/config-reference/conf-changes/glance.xml:96(td) ./doc/config-reference/conf-changes/heat.xml:156(td) -msgid "[cors.subdomain] allow_methods = GET, POST, PUT, DELETE, OPTIONS" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:684(td) ./doc/config-reference/conf-changes/manila.xml:248(td) ./doc/config-reference/conf-changes/keystone.xml:92(td) ./doc/config-reference/conf-changes/ironic.xml:128(td) ./doc/config-reference/conf-changes/sahara.xml:108(td) ./doc/config-reference/conf-changes/nova.xml:108(td) ./doc/config-reference/conf-changes/neutron.xml:188(td) ./doc/config-reference/conf-changes/ceilometer.xml:148(td) ./doc/config-reference/conf-changes/glance.xml:100(td) ./doc/config-reference/conf-changes/heat.xml:160(td) -msgid "[cors.subdomain] allowed_origin = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:688(td) ./doc/config-reference/conf-changes/manila.xml:252(td) ./doc/config-reference/conf-changes/keystone.xml:96(td) ./doc/config-reference/conf-changes/ironic.xml:132(td) ./doc/config-reference/conf-changes/sahara.xml:112(td) ./doc/config-reference/conf-changes/nova.xml:112(td) ./doc/config-reference/conf-changes/neutron.xml:192(td) ./doc/config-reference/conf-changes/ceilometer.xml:152(td) ./doc/config-reference/conf-changes/glance.xml:104(td) ./doc/config-reference/conf-changes/heat.xml:164(td) -msgid "[cors.subdomain] expose_headers = Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:692(td) ./doc/config-reference/conf-changes/manila.xml:256(td) ./doc/config-reference/conf-changes/keystone.xml:100(td) ./doc/config-reference/conf-changes/ironic.xml:136(td) ./doc/config-reference/conf-changes/sahara.xml:116(td) ./doc/config-reference/conf-changes/nova.xml:116(td) ./doc/config-reference/conf-changes/neutron.xml:196(td) ./doc/config-reference/conf-changes/ceilometer.xml:156(td) ./doc/config-reference/conf-changes/glance.xml:108(td) ./doc/config-reference/conf-changes/heat.xml:168(td) -msgid "[cors.subdomain] max_age = 3600" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:696(td) ./doc/config-reference/conf-changes/manila.xml:260(td) ./doc/config-reference/conf-changes/keystone.xml:108(td) ./doc/config-reference/conf-changes/ironic.xml:216(td) ./doc/config-reference/conf-changes/sahara.xml:128(td) ./doc/config-reference/conf-changes/nova.xml:128(td) ./doc/config-reference/conf-changes/neutron.xml:200(td) ./doc/config-reference/conf-changes/ceilometer.xml:192(td) ./doc/config-reference/conf-changes/trove.xml:80(td) ./doc/config-reference/conf-changes/glance.xml:136(td) ./doc/config-reference/conf-changes/heat.xml:184(td) -msgid "[keystone_authtoken] region_name = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:697(td) ./doc/config-reference/conf-changes/manila.xml:261(td) ./doc/config-reference/conf-changes/keystone.xml:109(td) ./doc/config-reference/conf-changes/ironic.xml:217(td) ./doc/config-reference/conf-changes/sahara.xml:129(td) ./doc/config-reference/conf-changes/nova.xml:129(td) ./doc/config-reference/conf-changes/neutron.xml:201(td) ./doc/config-reference/conf-changes/ceilometer.xml:193(td) ./doc/config-reference/conf-changes/trove.xml:81(td) ./doc/config-reference/conf-changes/glance.xml:137(td) ./doc/config-reference/conf-changes/heat.xml:185(td) -msgid "(StrOpt) The region in which the identity server can be found." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:700(td) ./doc/config-reference/conf-changes/manila.xml:264(td) ./doc/config-reference/conf-changes/keystone.xml:112(td) ./doc/config-reference/conf-changes/ironic.xml:220(td) ./doc/config-reference/conf-changes/sahara.xml:160(td) ./doc/config-reference/conf-changes/nova.xml:176(td) ./doc/config-reference/conf-changes/neutron.xml:216(td) ./doc/config-reference/conf-changes/ceilometer.xml:208(td) ./doc/config-reference/conf-changes/trove.xml:168(td) ./doc/config-reference/conf-changes/glance.xml:140(td) ./doc/config-reference/conf-changes/heat.xml:188(td) -msgid "[oslo_messaging_amqp] password =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:701(td) ./doc/config-reference/conf-changes/manila.xml:265(td) ./doc/config-reference/conf-changes/keystone.xml:113(td) ./doc/config-reference/conf-changes/ironic.xml:221(td) ./doc/config-reference/conf-changes/sahara.xml:161(td) ./doc/config-reference/conf-changes/nova.xml:177(td) ./doc/config-reference/conf-changes/neutron.xml:217(td) ./doc/config-reference/conf-changes/ceilometer.xml:209(td) ./doc/config-reference/conf-changes/trove.xml:169(td) ./doc/config-reference/conf-changes/glance.xml:141(td) ./doc/config-reference/conf-changes/heat.xml:189(td) -msgid "(StrOpt) Password for message broker authentication" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:704(td) ./doc/config-reference/conf-changes/manila.xml:268(td) ./doc/config-reference/conf-changes/keystone.xml:116(td) ./doc/config-reference/conf-changes/ironic.xml:224(td) ./doc/config-reference/conf-changes/sahara.xml:164(td) ./doc/config-reference/conf-changes/nova.xml:180(td) ./doc/config-reference/conf-changes/neutron.xml:220(td) ./doc/config-reference/conf-changes/ceilometer.xml:212(td) ./doc/config-reference/conf-changes/trove.xml:172(td) ./doc/config-reference/conf-changes/glance.xml:144(td) ./doc/config-reference/conf-changes/heat.xml:192(td) -msgid "[oslo_messaging_amqp] sasl_config_dir =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:705(td) ./doc/config-reference/conf-changes/manila.xml:269(td) ./doc/config-reference/conf-changes/keystone.xml:117(td) ./doc/config-reference/conf-changes/ironic.xml:225(td) ./doc/config-reference/conf-changes/sahara.xml:165(td) ./doc/config-reference/conf-changes/nova.xml:181(td) ./doc/config-reference/conf-changes/neutron.xml:221(td) ./doc/config-reference/conf-changes/ceilometer.xml:213(td) ./doc/config-reference/conf-changes/trove.xml:173(td) ./doc/config-reference/conf-changes/glance.xml:145(td) ./doc/config-reference/conf-changes/heat.xml:193(td) -msgid "(StrOpt) Path to directory that contains the SASL configuration" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:708(td) ./doc/config-reference/conf-changes/manila.xml:272(td) ./doc/config-reference/conf-changes/keystone.xml:120(td) ./doc/config-reference/conf-changes/ironic.xml:228(td) ./doc/config-reference/conf-changes/sahara.xml:168(td) ./doc/config-reference/conf-changes/nova.xml:184(td) ./doc/config-reference/conf-changes/neutron.xml:224(td) ./doc/config-reference/conf-changes/ceilometer.xml:216(td) ./doc/config-reference/conf-changes/trove.xml:176(td) ./doc/config-reference/conf-changes/glance.xml:148(td) ./doc/config-reference/conf-changes/heat.xml:196(td) -msgid "[oslo_messaging_amqp] sasl_config_name =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:709(td) ./doc/config-reference/conf-changes/manila.xml:273(td) ./doc/config-reference/conf-changes/keystone.xml:121(td) ./doc/config-reference/conf-changes/ironic.xml:229(td) ./doc/config-reference/conf-changes/sahara.xml:169(td) ./doc/config-reference/conf-changes/nova.xml:185(td) ./doc/config-reference/conf-changes/neutron.xml:225(td) ./doc/config-reference/conf-changes/ceilometer.xml:217(td) ./doc/config-reference/conf-changes/trove.xml:177(td) ./doc/config-reference/conf-changes/glance.xml:149(td) ./doc/config-reference/conf-changes/heat.xml:197(td) -msgid "(StrOpt) Name of configuration file (without .conf suffix)" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:712(td) ./doc/config-reference/conf-changes/manila.xml:276(td) ./doc/config-reference/conf-changes/keystone.xml:124(td) ./doc/config-reference/conf-changes/ironic.xml:232(td) ./doc/config-reference/conf-changes/sahara.xml:172(td) ./doc/config-reference/conf-changes/nova.xml:188(td) ./doc/config-reference/conf-changes/neutron.xml:228(td) ./doc/config-reference/conf-changes/ceilometer.xml:220(td) ./doc/config-reference/conf-changes/trove.xml:180(td) ./doc/config-reference/conf-changes/glance.xml:152(td) ./doc/config-reference/conf-changes/heat.xml:200(td) -msgid "[oslo_messaging_amqp] sasl_mechanisms =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:713(td) ./doc/config-reference/conf-changes/manila.xml:277(td) ./doc/config-reference/conf-changes/keystone.xml:125(td) ./doc/config-reference/conf-changes/ironic.xml:233(td) ./doc/config-reference/conf-changes/sahara.xml:173(td) ./doc/config-reference/conf-changes/nova.xml:189(td) ./doc/config-reference/conf-changes/neutron.xml:229(td) ./doc/config-reference/conf-changes/ceilometer.xml:221(td) ./doc/config-reference/conf-changes/trove.xml:181(td) ./doc/config-reference/conf-changes/glance.xml:153(td) ./doc/config-reference/conf-changes/heat.xml:201(td) -msgid "(StrOpt) Space separated list of acceptable SASL mechanisms" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:716(td) ./doc/config-reference/conf-changes/manila.xml:280(td) ./doc/config-reference/conf-changes/keystone.xml:128(td) ./doc/config-reference/conf-changes/ironic.xml:236(td) ./doc/config-reference/conf-changes/sahara.xml:176(td) ./doc/config-reference/conf-changes/nova.xml:192(td) ./doc/config-reference/conf-changes/neutron.xml:232(td) ./doc/config-reference/conf-changes/ceilometer.xml:224(td) ./doc/config-reference/conf-changes/trove.xml:184(td) ./doc/config-reference/conf-changes/glance.xml:156(td) ./doc/config-reference/conf-changes/heat.xml:204(td) -msgid "[oslo_messaging_amqp] username =" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:717(td) ./doc/config-reference/conf-changes/manila.xml:281(td) ./doc/config-reference/conf-changes/keystone.xml:129(td) ./doc/config-reference/conf-changes/ironic.xml:237(td) ./doc/config-reference/conf-changes/sahara.xml:177(td) ./doc/config-reference/conf-changes/nova.xml:193(td) ./doc/config-reference/conf-changes/neutron.xml:233(td) ./doc/config-reference/conf-changes/ceilometer.xml:225(td) ./doc/config-reference/conf-changes/trove.xml:185(td) ./doc/config-reference/conf-changes/glance.xml:157(td) ./doc/config-reference/conf-changes/heat.xml:205(td) -msgid "(StrOpt) User name for message broker authentication" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:720(td) ./doc/config-reference/conf-changes/manila.xml:284(td) ./doc/config-reference/conf-changes/keystone.xml:132(td) ./doc/config-reference/conf-changes/ironic.xml:240(td) ./doc/config-reference/conf-changes/sahara.xml:180(td) ./doc/config-reference/conf-changes/nova.xml:196(td) ./doc/config-reference/conf-changes/neutron.xml:236(td) ./doc/config-reference/conf-changes/ceilometer.xml:228(td) ./doc/config-reference/conf-changes/trove.xml:188(td) ./doc/config-reference/conf-changes/glance.xml:160(td) ./doc/config-reference/conf-changes/heat.xml:208(td) -msgid "[oslo_messaging_qpid] send_single_reply = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:721(td) ./doc/config-reference/conf-changes/cinder.xml:729(td) ./doc/config-reference/conf-changes/manila.xml:285(td) ./doc/config-reference/conf-changes/manila.xml:293(td) ./doc/config-reference/conf-changes/keystone.xml:133(td) ./doc/config-reference/conf-changes/keystone.xml:141(td) ./doc/config-reference/conf-changes/ironic.xml:241(td) ./doc/config-reference/conf-changes/ironic.xml:249(td) ./doc/config-reference/conf-changes/sahara.xml:181(td) ./doc/config-reference/conf-changes/sahara.xml:189(td) ./doc/config-reference/conf-changes/nova.xml:197(td) ./doc/config-reference/conf-changes/nova.xml:205(td) ./doc/config-reference/conf-changes/neutron.xml:237(td) ./doc/config-reference/conf-changes/neutron.xml:245(td) ./doc/config-reference/conf-changes/ceilometer.xml:229(td) ./doc/config-reference/conf-changes/ceilometer.xml:237(td) ./doc/config-reference/conf-changes/trove.xml:189(td) ./doc/config-reference/conf-changes/trove.xml:197(td) ./doc/config-reference/conf-changes/glance.xml:161(td) ./doc/config-reference/conf-changes/glance.xml:169(td) ./doc/config-reference/conf-changes/heat.xml:209(td) ./doc/config-reference/conf-changes/heat.xml:217(td) -msgid "(BoolOpt) Send a single AMQP reply to call message. The current behaviour since oslo-incubator is to send two AMQP replies - first one with the payload, a second one to ensure the other have finish to send the payload. We are going to remove it in the N release, but we must keep backward compatible at the same time. This option provides such compatibility - it defaults to False in Liberty and can be turned on for early adopters with a new installations or for testing. Please note, that this option will be removed in the Mitaka release." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:724(td) ./doc/config-reference/conf-changes/manila.xml:288(td) ./doc/config-reference/conf-changes/keystone.xml:136(td) ./doc/config-reference/conf-changes/ironic.xml:244(td) ./doc/config-reference/conf-changes/sahara.xml:184(td) ./doc/config-reference/conf-changes/nova.xml:200(td) ./doc/config-reference/conf-changes/neutron.xml:240(td) ./doc/config-reference/conf-changes/ceilometer.xml:232(td) ./doc/config-reference/conf-changes/trove.xml:192(td) ./doc/config-reference/conf-changes/glance.xml:164(td) ./doc/config-reference/conf-changes/heat.xml:212(td) -msgid "[oslo_messaging_rabbit] kombu_reconnect_timeout = 60" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:725(td) ./doc/config-reference/conf-changes/manila.xml:289(td) ./doc/config-reference/conf-changes/keystone.xml:137(td) ./doc/config-reference/conf-changes/ironic.xml:245(td) ./doc/config-reference/conf-changes/sahara.xml:185(td) ./doc/config-reference/conf-changes/nova.xml:201(td) ./doc/config-reference/conf-changes/neutron.xml:241(td) ./doc/config-reference/conf-changes/ceilometer.xml:233(td) ./doc/config-reference/conf-changes/trove.xml:193(td) ./doc/config-reference/conf-changes/glance.xml:165(td) ./doc/config-reference/conf-changes/heat.xml:213(td) -msgid "(IntOpt) How long to wait before considering a reconnect attempt to have failed. This value should not be longer than rpc_response_timeout." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:728(td) ./doc/config-reference/conf-changes/manila.xml:292(td) ./doc/config-reference/conf-changes/keystone.xml:140(td) ./doc/config-reference/conf-changes/ironic.xml:248(td) ./doc/config-reference/conf-changes/sahara.xml:188(td) ./doc/config-reference/conf-changes/nova.xml:204(td) ./doc/config-reference/conf-changes/neutron.xml:244(td) ./doc/config-reference/conf-changes/ceilometer.xml:236(td) ./doc/config-reference/conf-changes/trove.xml:196(td) ./doc/config-reference/conf-changes/glance.xml:168(td) ./doc/config-reference/conf-changes/heat.xml:216(td) -msgid "[oslo_messaging_rabbit] send_single_reply = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:732(td) ./doc/config-reference/conf-changes/manila.xml:300(td) ./doc/config-reference/conf-changes/keystone.xml:144(td) ./doc/config-reference/conf-changes/ironic.xml:256(td) ./doc/config-reference/conf-changes/sahara.xml:192(td) ./doc/config-reference/conf-changes/nova.xml:208(td) ./doc/config-reference/conf-changes/neutron.xml:248(td) ./doc/config-reference/conf-changes/ceilometer.xml:240(td) ./doc/config-reference/conf-changes/glance.xml:176(td) ./doc/config-reference/conf-changes/heat.xml:220(td) -msgid "[oslo_middleware] secure_proxy_ssl_header = X-Forwarded-Proto" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:733(td) ./doc/config-reference/conf-changes/manila.xml:301(td) ./doc/config-reference/conf-changes/keystone.xml:145(td) ./doc/config-reference/conf-changes/ironic.xml:257(td) ./doc/config-reference/conf-changes/sahara.xml:193(td) ./doc/config-reference/conf-changes/nova.xml:209(td) ./doc/config-reference/conf-changes/neutron.xml:249(td) ./doc/config-reference/conf-changes/ceilometer.xml:241(td) ./doc/config-reference/conf-changes/glance.xml:177(td) ./doc/config-reference/conf-changes/heat.xml:221(td) -msgid "(StrOpt) The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by an SSL termination proxy." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:736(td) ./doc/config-reference/conf-changes/manila.xml:304(td) ./doc/config-reference/conf-changes/neutron.xml:252(td) ./doc/config-reference/conf-changes/heat.xml:224(td) -msgid "[oslo_policy] policy_default_rule = default" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:737(td) ./doc/config-reference/conf-changes/manila.xml:305(td) ./doc/config-reference/conf-changes/neutron.xml:253(td) ./doc/config-reference/conf-changes/heat.xml:225(td) -msgid "(StrOpt) Default rule. Enforced when a requested rule is not found." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:740(td) ./doc/config-reference/conf-changes/manila.xml:308(td) ./doc/config-reference/conf-changes/neutron.xml:256(td) ./doc/config-reference/conf-changes/heat.xml:228(td) -msgid "[oslo_policy] policy_dirs = ['policy.d']" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:741(td) ./doc/config-reference/conf-changes/manila.xml:309(td) ./doc/config-reference/conf-changes/neutron.xml:257(td) ./doc/config-reference/conf-changes/heat.xml:229(td) -msgid "(MultiStrOpt) Directories where policy configuration files are stored. They can be relative to any directory in the search path defined by the config_dir option, or absolute paths. The file defined by policy_file must exist for these directories to be searched. Missing or empty directories are ignored." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:744(td) ./doc/config-reference/conf-changes/manila.xml:312(td) ./doc/config-reference/conf-changes/neutron.xml:260(td) ./doc/config-reference/conf-changes/heat.xml:232(td) -msgid "[oslo_policy] policy_file = policy.json" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:745(td) ./doc/config-reference/conf-changes/manila.xml:313(td) ./doc/config-reference/conf-changes/neutron.xml:261(td) ./doc/config-reference/conf-changes/heat.xml:233(td) -msgid "(StrOpt) The JSON file that defines policies." -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:748(td) ./doc/config-reference/conf-changes/ironic.xml:260(td) ./doc/config-reference/conf-changes/nova.xml:212(td) ./doc/config-reference/conf-changes/neutron.xml:264(td) ./doc/config-reference/conf-changes/heat.xml:236(td) -msgid "[oslo_versionedobjects] fatal_exception_format_errors = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:749(td) ./doc/config-reference/conf-changes/ironic.xml:261(td) ./doc/config-reference/conf-changes/nova.xml:213(td) ./doc/config-reference/conf-changes/neutron.xml:265(td) ./doc/config-reference/conf-changes/heat.xml:237(td) -msgid "(BoolOpt) Make exception message format errors fatal" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:753(caption) ./doc/config-reference/conf-changes/manila.xml:317(caption) ./doc/config-reference/conf-changes/keystone.xml:161(caption) ./doc/config-reference/conf-changes/ironic.xml:265(caption) ./doc/config-reference/conf-changes/sahara.xml:209(caption) ./doc/config-reference/conf-changes/nova.xml:265(caption) ./doc/config-reference/conf-changes/neutron.xml:273(caption) ./doc/config-reference/conf-changes/ceilometer.xml:253(caption) ./doc/config-reference/conf-changes/trove.xml:321(caption) ./doc/config-reference/conf-changes/glance.xml:181(caption) ./doc/config-reference/conf-changes/heat.xml:249(caption) -msgid "New default values" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:760(td) ./doc/config-reference/conf-changes/manila.xml:324(td) ./doc/config-reference/conf-changes/keystone.xml:168(td) ./doc/config-reference/conf-changes/ironic.xml:272(td) ./doc/config-reference/conf-changes/sahara.xml:216(td) ./doc/config-reference/conf-changes/nova.xml:272(td) ./doc/config-reference/conf-changes/neutron.xml:280(td) ./doc/config-reference/conf-changes/ceilometer.xml:260(td) ./doc/config-reference/conf-changes/trove.xml:328(td) ./doc/config-reference/conf-changes/glance.xml:188(td) ./doc/config-reference/conf-changes/heat.xml:256(td) -msgid "Previous default value" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:761(td) ./doc/config-reference/conf-changes/manila.xml:325(td) ./doc/config-reference/conf-changes/keystone.xml:169(td) ./doc/config-reference/conf-changes/ironic.xml:273(td) ./doc/config-reference/conf-changes/sahara.xml:217(td) ./doc/config-reference/conf-changes/nova.xml:273(td) ./doc/config-reference/conf-changes/neutron.xml:281(td) ./doc/config-reference/conf-changes/ceilometer.xml:261(td) ./doc/config-reference/conf-changes/trove.xml:329(td) ./doc/config-reference/conf-changes/glance.xml:189(td) ./doc/config-reference/conf-changes/heat.xml:257(td) -msgid "New default value" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:765(td) -msgid "[DEFAULT] auth_strategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:766(td) -msgid "noauth" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:770(td) ./doc/config-reference/conf-changes/manila.xml:329(td) ./doc/config-reference/conf-changes/keystone.xml:178(td) ./doc/config-reference/conf-changes/ironic.xml:277(td) ./doc/config-reference/conf-changes/sahara.xml:221(td) ./doc/config-reference/conf-changes/nova.xml:287(td) ./doc/config-reference/conf-changes/neutron.xml:290(td) ./doc/config-reference/conf-changes/ceilometer.xml:265(td) ./doc/config-reference/conf-changes/trove.xml:338(td) ./doc/config-reference/conf-changes/glance.xml:203(td) ./doc/config-reference/conf-changes/heat.xml:261(td) -msgid "[DEFAULT] default_log_levels" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:771(td) ./doc/config-reference/conf-changes/keystone.xml:179(td) ./doc/config-reference/conf-changes/nova.xml:288(td) ./doc/config-reference/conf-changes/neutron.xml:291(td) ./doc/config-reference/conf-changes/glance.xml:204(td) ./doc/config-reference/conf-changes/heat.xml:262(td) -msgid "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:772(td) ./doc/config-reference/conf-changes/manila.xml:331(td) ./doc/config-reference/conf-changes/keystone.xml:180(td) ./doc/config-reference/conf-changes/ironic.xml:279(td) ./doc/config-reference/conf-changes/nova.xml:289(td) ./doc/config-reference/conf-changes/neutron.xml:292(td) ./doc/config-reference/conf-changes/ceilometer.xml:267(td) ./doc/config-reference/conf-changes/trove.xml:340(td) ./doc/config-reference/conf-changes/glance.xml:205(td) ./doc/config-reference/conf-changes/heat.xml:263(td) -msgid "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:775(td) ./doc/config-reference/conf-changes/manila.xml:344(td) ./doc/config-reference/conf-changes/keystone.xml:183(td) ./doc/config-reference/conf-changes/ironic.xml:282(td) ./doc/config-reference/conf-changes/sahara.xml:231(td) ./doc/config-reference/conf-changes/nova.xml:292(td) ./doc/config-reference/conf-changes/neutron.xml:305(td) ./doc/config-reference/conf-changes/ceilometer.xml:270(td) ./doc/config-reference/conf-changes/trove.xml:348(td) ./doc/config-reference/conf-changes/glance.xml:218(td) ./doc/config-reference/conf-changes/heat.xml:276(td) -msgid "[DEFAULT] logging_exception_prefix" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:776(td) ./doc/config-reference/conf-changes/manila.xml:345(td) ./doc/config-reference/conf-changes/keystone.xml:184(td) ./doc/config-reference/conf-changes/ironic.xml:283(td) ./doc/config-reference/conf-changes/sahara.xml:232(td) ./doc/config-reference/conf-changes/nova.xml:293(td) ./doc/config-reference/conf-changes/neutron.xml:306(td) ./doc/config-reference/conf-changes/ceilometer.xml:271(td) ./doc/config-reference/conf-changes/trove.xml:349(td) ./doc/config-reference/conf-changes/glance.xml:219(td) ./doc/config-reference/conf-changes/heat.xml:277(td) -msgid "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:777(td) ./doc/config-reference/conf-changes/manila.xml:346(td) ./doc/config-reference/conf-changes/keystone.xml:185(td) ./doc/config-reference/conf-changes/ironic.xml:284(td) ./doc/config-reference/conf-changes/sahara.xml:233(td) ./doc/config-reference/conf-changes/nova.xml:294(td) ./doc/config-reference/conf-changes/neutron.xml:307(td) ./doc/config-reference/conf-changes/ceilometer.xml:272(td) ./doc/config-reference/conf-changes/trove.xml:350(td) ./doc/config-reference/conf-changes/glance.xml:220(td) ./doc/config-reference/conf-changes/heat.xml:278(td) -msgid "%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:780(td) ./doc/config-reference/conf-changes/manila.xml:354(td) ./doc/config-reference/conf-changes/keystone.xml:188(td) ./doc/config-reference/conf-changes/ironic.xml:287(td) ./doc/config-reference/conf-changes/sahara.xml:236(td) ./doc/config-reference/conf-changes/nova.xml:307(td) ./doc/config-reference/conf-changes/neutron.xml:330(td) ./doc/config-reference/conf-changes/ceilometer.xml:275(td) ./doc/config-reference/conf-changes/trove.xml:363(td) ./doc/config-reference/conf-changes/glance.xml:223(td) ./doc/config-reference/conf-changes/heat.xml:281(td) -msgid "[DEFAULT] rpc_zmq_matchmaker" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:781(td) ./doc/config-reference/conf-changes/manila.xml:355(td) ./doc/config-reference/conf-changes/keystone.xml:189(td) ./doc/config-reference/conf-changes/ironic.xml:288(td) ./doc/config-reference/conf-changes/sahara.xml:237(td) ./doc/config-reference/conf-changes/nova.xml:308(td) ./doc/config-reference/conf-changes/neutron.xml:331(td) ./doc/config-reference/conf-changes/ceilometer.xml:276(td) ./doc/config-reference/conf-changes/trove.xml:364(td) ./doc/config-reference/conf-changes/glance.xml:224(td) ./doc/config-reference/conf-changes/heat.xml:282(td) -msgid "local" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:782(td) ./doc/config-reference/conf-changes/manila.xml:356(td) ./doc/config-reference/conf-changes/keystone.xml:190(td) ./doc/config-reference/conf-changes/ironic.xml:289(td) ./doc/config-reference/conf-changes/sahara.xml:238(td) ./doc/config-reference/conf-changes/nova.xml:309(td) ./doc/config-reference/conf-changes/neutron.xml:332(td) ./doc/config-reference/conf-changes/ceilometer.xml:277(td) ./doc/config-reference/conf-changes/trove.xml:365(td) ./doc/config-reference/conf-changes/glance.xml:225(td) ./doc/config-reference/conf-changes/heat.xml:283(td) -msgid "redis" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:785(td) -msgid "[DEFAULT] storwize_svc_npiv_compatibility_mode" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:790(td) ./doc/config-reference/conf-changes/manila.xml:364(td) ./doc/config-reference/conf-changes/keystone.xml:193(td) ./doc/config-reference/conf-changes/ironic.xml:297(td) ./doc/config-reference/conf-changes/sahara.xml:241(td) ./doc/config-reference/conf-changes/nova.xml:317(td) ./doc/config-reference/conf-changes/neutron.xml:335(td) ./doc/config-reference/conf-changes/ceilometer.xml:280(td) ./doc/config-reference/conf-changes/trove.xml:373(td) ./doc/config-reference/conf-changes/glance.xml:228(td) ./doc/config-reference/conf-changes/heat.xml:286(td) -msgid "[DEFAULT] use_syslog_rfc_format" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:795(td) ./doc/config-reference/conf-changes/manila.xml:369(td) ./doc/config-reference/conf-changes/keystone.xml:198(td) ./doc/config-reference/conf-changes/ironic.xml:302(td) ./doc/config-reference/conf-changes/sahara.xml:246(td) ./doc/config-reference/conf-changes/nova.xml:322(td) ./doc/config-reference/conf-changes/neutron.xml:340(td) ./doc/config-reference/conf-changes/ceilometer.xml:285(td) ./doc/config-reference/conf-changes/trove.xml:378(td) ./doc/config-reference/conf-changes/glance.xml:233(td) ./doc/config-reference/conf-changes/heat.xml:291(td) -msgid "[DEFAULT] verbose" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:800(td) -msgid "[DEFAULT] vmware_volume_folder" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:801(td) -msgid "cinder-volumes" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:802(td) -msgid "Volumes" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:805(td) -msgid "[DEFAULT] volume_driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:806(td) -msgid "cinder.volume.drivers.lvm.LVMISCSIDriver" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:807(td) -msgid "cinder.volume.drivers.lvm.LVMVolumeDriver" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:810(td) ./doc/config-reference/conf-changes/manila.xml:374(td) ./doc/config-reference/conf-changes/keystone.xml:273(td) ./doc/config-reference/conf-changes/ironic.xml:307(td) ./doc/config-reference/conf-changes/sahara.xml:251(td) ./doc/config-reference/conf-changes/nova.xml:337(td) ./doc/config-reference/conf-changes/neutron.xml:355(td) ./doc/config-reference/conf-changes/ceilometer.xml:290(td) ./doc/config-reference/conf-changes/trove.xml:383(td) ./doc/config-reference/conf-changes/glance.xml:238(td) ./doc/config-reference/conf-changes/heat.xml:301(td) -msgid "[matchmaker_redis] password" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:811(td) ./doc/config-reference/conf-changes/cinder.xml:832(td) ./doc/config-reference/conf-changes/cinder.xml:844(td) ./doc/config-reference/conf-changes/cinder.xml:864(td) ./doc/config-reference/conf-changes/cinder.xml:888(td) ./doc/config-reference/conf-changes/manila.xml:375(td) ./doc/config-reference/conf-changes/manila.xml:408(td) ./doc/config-reference/conf-changes/manila.xml:432(td) ./doc/config-reference/conf-changes/keystone.xml:205(td) ./doc/config-reference/conf-changes/keystone.xml:210(td) ./doc/config-reference/conf-changes/keystone.xml:215(td) ./doc/config-reference/conf-changes/keystone.xml:220(td) ./doc/config-reference/conf-changes/keystone.xml:274(td) ./doc/config-reference/conf-changes/keystone.xml:325(td) ./doc/config-reference/conf-changes/keystone.xml:329(td) ./doc/config-reference/conf-changes/ironic.xml:293(td) ./doc/config-reference/conf-changes/ironic.xml:308(td) ./doc/config-reference/conf-changes/ironic.xml:329(td) ./doc/config-reference/conf-changes/ironic.xml:341(td) ./doc/config-reference/conf-changes/ironic.xml:345(td) ./doc/config-reference/conf-changes/ironic.xml:349(td) ./doc/config-reference/conf-changes/sahara.xml:252(td) ./doc/config-reference/conf-changes/sahara.xml:277(td) ./doc/config-reference/conf-changes/sahara.xml:281(td) ./doc/config-reference/conf-changes/nova.xml:279(td) ./doc/config-reference/conf-changes/nova.xml:338(td) ./doc/config-reference/conf-changes/nova.xml:359(td) ./doc/config-reference/conf-changes/nova.xml:367(td) ./doc/config-reference/conf-changes/nova.xml:371(td) ./doc/config-reference/conf-changes/nova.xml:379(td) ./doc/config-reference/conf-changes/nova.xml:387(td) ./doc/config-reference/conf-changes/nova.xml:391(td) ./doc/config-reference/conf-changes/nova.xml:399(td) ./doc/config-reference/conf-changes/nova.xml:403(td) ./doc/config-reference/conf-changes/nova.xml:411(td) ./doc/config-reference/conf-changes/nova.xml:415(td) ./doc/config-reference/conf-changes/nova.xml:419(td) ./doc/config-reference/conf-changes/nova.xml:427(td) ./doc/config-reference/conf-changes/nova.xml:435(td) ./doc/config-reference/conf-changes/nova.xml:439(td) ./doc/config-reference/conf-changes/nova.xml:443(td) ./doc/config-reference/conf-changes/nova.xml:447(td) ./doc/config-reference/conf-changes/nova.xml:459(td) ./doc/config-reference/conf-changes/nova.xml:463(td) ./doc/config-reference/conf-changes/nova.xml:471(td) ./doc/config-reference/conf-changes/nova.xml:475(td) ./doc/config-reference/conf-changes/neutron.xml:287(td) ./doc/config-reference/conf-changes/neutron.xml:356(td) ./doc/config-reference/conf-changes/neutron.xml:387(td) ./doc/config-reference/conf-changes/neutron.xml:395(td) ./doc/config-reference/conf-changes/neutron.xml:399(td) ./doc/config-reference/conf-changes/ceilometer.xml:291(td) ./doc/config-reference/conf-changes/ceilometer.xml:325(td) ./doc/config-reference/conf-changes/ceilometer.xml:337(td) ./doc/config-reference/conf-changes/ceilometer.xml:341(td) ./doc/config-reference/conf-changes/trove.xml:384(td) ./doc/config-reference/conf-changes/trove.xml:389(td) ./doc/config-reference/conf-changes/trove.xml:394(td) ./doc/config-reference/conf-changes/trove.xml:399(td) ./doc/config-reference/conf-changes/trove.xml:409(td) ./doc/config-reference/conf-changes/trove.xml:414(td) ./doc/config-reference/conf-changes/trove.xml:419(td) ./doc/config-reference/conf-changes/trove.xml:424(td) ./doc/config-reference/conf-changes/trove.xml:450(td) ./doc/config-reference/conf-changes/trove.xml:458(td) ./doc/config-reference/conf-changes/glance.xml:239(td) ./doc/config-reference/conf-changes/glance.xml:260(td) ./doc/config-reference/conf-changes/glance.xml:264(td) ./doc/config-reference/conf-changes/glance.xml:268(td) ./doc/config-reference/conf-changes/glance.xml:276(td) ./doc/config-reference/conf-changes/heat.xml:273(td) ./doc/config-reference/conf-changes/heat.xml:302(td) ./doc/config-reference/conf-changes/heat.xml:327(td) ./doc/config-reference/conf-changes/heat.xml:331(td) -msgid "None" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:815(td) ./doc/config-reference/conf-changes/manila.xml:379(td) ./doc/config-reference/conf-changes/keystone.xml:283(td) ./doc/config-reference/conf-changes/ironic.xml:312(td) ./doc/config-reference/conf-changes/sahara.xml:256(td) ./doc/config-reference/conf-changes/nova.xml:342(td) ./doc/config-reference/conf-changes/neutron.xml:370(td) ./doc/config-reference/conf-changes/ceilometer.xml:300(td) ./doc/config-reference/conf-changes/trove.xml:403(td) ./doc/config-reference/conf-changes/glance.xml:243(td) ./doc/config-reference/conf-changes/heat.xml:306(td) -msgid "[oslo_messaging_rabbit] heartbeat_timeout_threshold" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:817(td) ./doc/config-reference/conf-changes/manila.xml:381(td) ./doc/config-reference/conf-changes/keystone.xml:285(td) ./doc/config-reference/conf-changes/ironic.xml:314(td) ./doc/config-reference/conf-changes/sahara.xml:258(td) ./doc/config-reference/conf-changes/nova.xml:344(td) ./doc/config-reference/conf-changes/neutron.xml:372(td) ./doc/config-reference/conf-changes/ceilometer.xml:302(td) ./doc/config-reference/conf-changes/trove.xml:405(td) ./doc/config-reference/conf-changes/glance.xml:245(td) ./doc/config-reference/conf-changes/heat.xml:308(td) -msgid "60" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:821(caption) ./doc/config-reference/conf-changes/manila.xml:385(caption) ./doc/config-reference/conf-changes/keystone.xml:314(caption) ./doc/config-reference/conf-changes/ironic.xml:318(caption) ./doc/config-reference/conf-changes/sahara.xml:262(caption) ./doc/config-reference/conf-changes/nova.xml:348(caption) ./doc/config-reference/conf-changes/neutron.xml:376(caption) ./doc/config-reference/conf-changes/ceilometer.xml:306(caption) ./doc/config-reference/conf-changes/trove.xml:439(caption) ./doc/config-reference/conf-changes/glance.xml:249(caption) ./doc/config-reference/conf-changes/heat.xml:312(caption) -msgid "Deprecated options" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:826(td) ./doc/config-reference/conf-changes/manila.xml:390(td) ./doc/config-reference/conf-changes/keystone.xml:319(td) ./doc/config-reference/conf-changes/ironic.xml:323(td) ./doc/config-reference/conf-changes/sahara.xml:267(td) ./doc/config-reference/conf-changes/nova.xml:353(td) ./doc/config-reference/conf-changes/neutron.xml:381(td) ./doc/config-reference/conf-changes/ceilometer.xml:311(td) ./doc/config-reference/conf-changes/trove.xml:444(td) ./doc/config-reference/conf-changes/glance.xml:254(td) ./doc/config-reference/conf-changes/heat.xml:317(td) -msgid "Deprecated option" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:827(td) ./doc/config-reference/conf-changes/manila.xml:391(td) ./doc/config-reference/conf-changes/keystone.xml:320(td) ./doc/config-reference/conf-changes/ironic.xml:324(td) ./doc/config-reference/conf-changes/sahara.xml:268(td) ./doc/config-reference/conf-changes/nova.xml:354(td) ./doc/config-reference/conf-changes/neutron.xml:382(td) ./doc/config-reference/conf-changes/ceilometer.xml:312(td) ./doc/config-reference/conf-changes/trove.xml:445(td) ./doc/config-reference/conf-changes/glance.xml:255(td) ./doc/config-reference/conf-changes/heat.xml:318(td) -msgid "New Option" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:831(td) ./doc/config-reference/conf-changes/manila.xml:407(td) ./doc/config-reference/conf-changes/keystone.xml:324(td) ./doc/config-reference/conf-changes/ironic.xml:348(td) ./doc/config-reference/conf-changes/sahara.xml:280(td) ./doc/config-reference/conf-changes/nova.xml:378(td) ./doc/config-reference/conf-changes/neutron.xml:386(td) ./doc/config-reference/conf-changes/ceilometer.xml:340(td) ./doc/config-reference/conf-changes/trove.xml:449(td) ./doc/config-reference/conf-changes/glance.xml:267(td) ./doc/config-reference/conf-changes/heat.xml:330(td) -msgid "[DEFAULT] use_syslog" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:835(td) ./doc/config-reference/conf-changes/manila.xml:411(td) -msgid "[DEFAULT] osapi_max_request_body_size" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:836(td) ./doc/config-reference/conf-changes/manila.xml:412(td) -msgid "[oslo_middleware] max_request_body_size" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:839(td) -msgid "[DEFAULT] eqlx_chap_password" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:840(td) -msgid "[DEFAULT] chap_password" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:843(td) -msgid "[DEFAULT] datera_api_token" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:847(td) -msgid "[DEFAULT] glusterfs_sparsed_volumes" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:848(td) ./doc/config-reference/conf-changes/cinder.xml:852(td) -msgid "[DEFAULT] nas_volume_prov_type" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:851(td) -msgid "[DEFAULT] glusterfs_qcow2_volumes" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:855(td) -msgid "[DEFAULT] eqlx_use_chap" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:856(td) -msgid "[DEFAULT] use_chap_auth" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:859(td) ./doc/config-reference/conf-changes/manila.xml:423(td) ./doc/config-reference/conf-changes/keystone.xml:332(td) ./doc/config-reference/conf-changes/ironic.xml:336(td) ./doc/config-reference/conf-changes/sahara.xml:272(td) ./doc/config-reference/conf-changes/nova.xml:450(td) ./doc/config-reference/conf-changes/neutron.xml:390(td) ./doc/config-reference/conf-changes/ceilometer.xml:332(td) ./doc/config-reference/conf-changes/trove.xml:453(td) ./doc/config-reference/conf-changes/glance.xml:271(td) ./doc/config-reference/conf-changes/heat.xml:322(td) -msgid "[DEFAULT] rpc_thread_pool_size" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:860(td) ./doc/config-reference/conf-changes/manila.xml:424(td) ./doc/config-reference/conf-changes/keystone.xml:333(td) ./doc/config-reference/conf-changes/ironic.xml:337(td) ./doc/config-reference/conf-changes/sahara.xml:273(td) ./doc/config-reference/conf-changes/nova.xml:451(td) ./doc/config-reference/conf-changes/neutron.xml:391(td) ./doc/config-reference/conf-changes/ceilometer.xml:333(td) ./doc/config-reference/conf-changes/trove.xml:454(td) ./doc/config-reference/conf-changes/glance.xml:272(td) ./doc/config-reference/conf-changes/heat.xml:323(td) -msgid "[DEFAULT] executor_thread_pool_size" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:863(td) ./doc/config-reference/conf-changes/manila.xml:334(td) -msgid "[DEFAULT] enable_v1_api" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:867(td) -msgid "[DEFAULT] netapp_volume_list" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:868(td) ./doc/config-reference/conf-changes/cinder.xml:872(td) -msgid "[DEFAULT] netapp_pool_name_search_pattern" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:871(td) -msgid "[DEFAULT] netapp_storage_pools" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:875(td) ./doc/config-reference/conf-changes/glance.xml:213(td) -msgid "[DEFAULT] host" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:876(td) -msgid "[DEFAULT] backend_host" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:879(td) -msgid "[DEFAULT] netapp_eseries_host_type" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:880(td) -msgid "[DEFAULT] netapp_host_type" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:883(td) -msgid "[DEFAULT] eqlx_chap_login" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:884(td) -msgid "[DEFAULT] chap_username" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:887(td) ./doc/config-reference/conf-changes/manila.xml:431(td) ./doc/config-reference/conf-changes/keystone.xml:328(td) ./doc/config-reference/conf-changes/ironic.xml:340(td) ./doc/config-reference/conf-changes/sahara.xml:276(td) ./doc/config-reference/conf-changes/nova.xml:426(td) ./doc/config-reference/conf-changes/neutron.xml:398(td) ./doc/config-reference/conf-changes/ceilometer.xml:336(td) ./doc/config-reference/conf-changes/trove.xml:457(td) ./doc/config-reference/conf-changes/glance.xml:263(td) ./doc/config-reference/conf-changes/heat.xml:326(td) -msgid "[DEFAULT] log_format" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:891(td) -msgid "[DEFAULT] storage_vnx_pool_name" -msgstr "" - -#: ./doc/config-reference/conf-changes/cinder.xml:892(td) -msgid "[DEFAULT] storage_vnx_pool_names" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/manila.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for Shared File Systems service" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:16(td) -msgid "[DEFAULT] client_socket_timeout = 900" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:17(td) -msgid "(IntOpt) Timeout for client connections socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:20(td) -msgid "[DEFAULT] emc_nas_root_dir = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:21(td) -msgid "(StrOpt) The root directory where shares will be located." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:24(td) -msgid "[DEFAULT] enable_periodic_hooks = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:25(td) -msgid "(BoolOpt) Whether to enable periodic hooks or not." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:28(td) -msgid "[DEFAULT] enable_post_hooks = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:29(td) -msgid "(BoolOpt) Whether to enable post hooks or not." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:32(td) -msgid "[DEFAULT] enable_pre_hooks = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:33(td) -msgid "(BoolOpt) Whether to enable pre hooks or not." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:40(td) -msgid "[DEFAULT] glusterfs_share_layout = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:41(td) -msgid "(StrOpt) Specifies GlusterFS share layout, that is, the method of associating backing GlusterFS resources to shares." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:44(td) -msgid "[DEFAULT] hds_hnas_cluster_admin_ip0 = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:45(td) -msgid "(StrOpt) The IP of the clusters admin node. Only set in HNAS multinode clusters." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:48(td) -msgid "[DEFAULT] hds_hnas_evs_id = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:49(td) -msgid "(StrOpt) Specify which EVS this backend is assigned to." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:52(td) -msgid "[DEFAULT] hds_hnas_evs_ip = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:53(td) -msgid "(StrOpt) Specify IP for mounting shares." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:56(td) -msgid "[DEFAULT] hds_hnas_file_system_name = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:57(td) -msgid "(StrOpt) Specify file-system name for creating shares." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:60(td) -msgid "[DEFAULT] hds_hnas_ip = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:61(td) -msgid "(StrOpt) HNAS management interface IP for communication between Manila controller and HNAS." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:64(td) -msgid "[DEFAULT] hds_hnas_password = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:65(td) -msgid "(StrOpt) HNAS user password. Required only if private key is not provided." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:68(td) -msgid "[DEFAULT] hds_hnas_ssh_private_key = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:69(td) -msgid "(StrOpt) RSA/DSA private key value used to connect into HNAS. Required only if password is not provided." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:72(td) -msgid "[DEFAULT] hds_hnas_stalled_job_timeout = 30" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:73(td) -msgid "(IntOpt) The time (in seconds) to wait for stalled HNAS jobs before aborting." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:76(td) -msgid "[DEFAULT] hds_hnas_user = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:77(td) -msgid "(StrOpt) HNAS username Base64 String in order to perform tasks such as create file-systems and network interfaces." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:80(td) -msgid "[DEFAULT] hook_drivers =" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:81(td) -msgid "(ListOpt) Driver(s) to perform some additional actions before and after share driver actions and on a periodic basis. Default is []." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:84(td) -msgid "[DEFAULT] max_over_subscription_ratio = 20.0" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:85(td) -msgid "(FloatOpt) Float representation of the over subscription ratio when thin provisioning is involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times the total physical capacity. If the ratio is 10.5, it means provisioned capacity can be 10.5 times the total physical capacity. A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. A ratio lower than 1.0 is invalid." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:88(td) -msgid "[DEFAULT] max_time_to_extend_volume = 180" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:89(td) -msgid "(IntOpt) Maximum time to wait for extending cinder volume." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:92(td) -msgid "[DEFAULT] migration_create_delete_share_timeout = 300" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:93(td) -msgid "(IntOpt) Timeout for creating and deleting share instances when performing share migration (seconds)." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:96(td) -msgid "[DEFAULT] migration_data_copy_node_ip = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:97(td) -msgid "(StrOpt) The IP of the node responsible for copying data during migration, such as the data copy service node, reachable by the backend." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:100(td) -msgid "[DEFAULT] migration_ignore_files = lost+found" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:101(td) -msgid "(ListOpt) List of files and folders to be ignored when migrating shares. Items should be names (not including any path)." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:104(td) -msgid "[DEFAULT] migration_mounting_backend_ip = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:105(td) -msgid "(StrOpt) Backend IP in admin network to use for mounting shares during migration." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:108(td) -msgid "[DEFAULT] migration_protocol_mount_command = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:109(td) -msgid "(StrOpt) The command for mounting shares for this backend. Must specifythe executable and all necessary parameters for the protocol supported. It is advisable to separate protocols per backend." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:112(td) -msgid "[DEFAULT] migration_readonly_support = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:113(td) -msgid "(BoolOpt) Specify whether read only access mode is supported in thisbackend." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:116(td) -msgid "[DEFAULT] migration_tmp_location = /tmp/" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:117(td) -msgid "(StrOpt) Temporary path to create and mount shares during migration." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:120(td) -msgid "[DEFAULT] migration_wait_access_rules_timeout = 90" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:121(td) -msgid "(IntOpt) Time to wait for access rules to be allowed/denied on backends when migrating shares using generic approach (seconds)." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:124(td) -msgid "[DEFAULT] nova_api_microversion = 2.10" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:125(td) -msgid "(StrOpt) Version of Nova API to be used." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:128(td) -msgid "[DEFAULT] osapi_share_workers = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:129(td) -msgid "(IntOpt) Number of workers for OpenStack Share API service." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:136(td) -msgid "[DEFAULT] periodic_hooks_interval = 300.0" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:137(td) -msgid "(FloatOpt) Interval in seconds between execution of periodic hooks. Used when option 'enable_periodic_hooks' is set to True. Default is 300." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:140(td) -msgid "[DEFAULT] pool_weight_multiplier = 1.0" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:141(td) -msgid "(FloatOpt) Multiplier used for weighing pools which have existing share servers. Negative numbers mean to spread vs stack." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:164(td) -msgid "[DEFAULT] suppress_post_hooks_errors = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:165(td) -msgid "(BoolOpt) Whether to suppress post hook errors (allow driver's results to pass through) or not." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:168(td) -msgid "[DEFAULT] suppress_pre_hooks_errors = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:169(td) -msgid "(BoolOpt) Whether to suppress pre hook errors (allow driver perform actions) or not." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:176(td) -msgid "[DEFAULT] winrm_cert_key_pem_path = ~/.ssl/key.pem" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:177(td) -msgid "(StrOpt) Path to the x509 certificate key." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:180(td) -msgid "[DEFAULT] winrm_cert_pem_path = ~/.ssl/cert.pem" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:181(td) -msgid "(StrOpt) Path to the x509 certificate used for accessing the serviceinstance." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:184(td) -msgid "[DEFAULT] winrm_conn_timeout = 60" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:185(td) -msgid "(IntOpt) WinRM connection timeout." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:188(td) -msgid "[DEFAULT] winrm_operation_timeout = 60" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:189(td) -msgid "(IntOpt) WinRM operation timeout." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:192(td) -msgid "[DEFAULT] winrm_retry_count = 3" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:193(td) -msgid "(IntOpt) WinRM retry count." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:196(td) -msgid "[DEFAULT] winrm_retry_interval = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:197(td) -msgid "(IntOpt) WinRM retry interval in seconds" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:200(td) -msgid "[DEFAULT] winrm_use_cert_based_auth = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:201(td) -msgid "(BoolOpt) Use x509 certificates in order to authenticate to theservice instance." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:204(td) -msgid "[DEFAULT] wsgi_keep_alive = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:205(td) -msgid "(BoolOpt) If False, closes the client socket connection explicitly. Setting it to True to maintain backward compatibility. Recommended setting is set it to False." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:296(td) ./doc/config-reference/conf-changes/ironic.xml:252(td) ./doc/config-reference/conf-changes/glance.xml:172(td) -msgid "[oslo_middleware] max_request_body_size = 114688" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:297(td) ./doc/config-reference/conf-changes/ironic.xml:253(td) ./doc/config-reference/conf-changes/glance.xml:173(td) -msgid "(IntOpt) The maximum body size for each request, in bytes." -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:330(td) ./doc/config-reference/conf-changes/ironic.xml:278(td) ./doc/config-reference/conf-changes/ceilometer.xml:266(td) ./doc/config-reference/conf-changes/trove.xml:339(td) -msgid "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:339(td) -msgid "[DEFAULT] enable_v2_api" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:349(td) -msgid "[DEFAULT] osapi_share_listen" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:350(td) -msgid "0.0.0.0" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:351(td) -msgid "::" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:359(td) ./doc/config-reference/conf-changes/nova.xml:312(td) -msgid "[DEFAULT] scheduler_default_filters" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:360(td) -msgid "AvailabilityZoneFilter, CapacityFilter, CapabilitiesFilter" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:361(td) -msgid "AvailabilityZoneFilter, CapacityFilter, CapabilitiesFilter, ConsistencyGroupFilter" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:395(td) -msgid "[DEFAULT] glusterfs_native_server_password" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:396(td) -msgid "[DEFAULT] glusterfs_server_password" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:399(td) -msgid "[DEFAULT] sql_max_retries" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:400(td) -msgid "[database] max_retries" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:403(td) -msgid "[DEFAULT] sql_retry_interval" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:404(td) -msgid "[database] retry_interval" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:415(td) -msgid "[DEFAULT] glusterfs_native_path_to_private_key" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:416(td) -msgid "[DEFAULT] glusterfs_path_to_private_key" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:419(td) -msgid "[DEFAULT] sql_idle_timeout" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:420(td) -msgid "[database] idle_timeout" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:427(td) -msgid "[DEFAULT] db_backend" -msgstr "" - -#: ./doc/config-reference/conf-changes/manila.xml:428(td) -msgid "[database] backend" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/keystone.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for OpenStack Identity" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:20(td) ./doc/config-reference/conf-changes/sahara.xml:32(td) -msgid "[DEFAULT] host = 127.0.0.1" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:21(td) ./doc/config-reference/conf-changes/sahara.xml:33(td) -msgid "(StrOpt) Host to locate redis." -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:104(td) -msgid "[endpoint_policy] enabled = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:105(td) -msgid "(BoolOpt) Enable endpoint_policy functionality." -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:148(td) -msgid "[tokenless_auth] issuer_attribute = SSL_CLIENT_I_DN" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:149(td) -msgid "(StrOpt) The issuer attribute that is served as an IdP ID for the X.509 tokenless authorization along with the protocol to look up its corresponding mapping. It is the environment variable in the WSGI environment that references to the issuer of the client certificate." -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:152(td) -msgid "[tokenless_auth] protocol = x509" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:153(td) -msgid "(StrOpt) The protocol name for the X.509 tokenless authorization along with the option issuer_attribute below can look up its corresponding mapping." -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:156(td) -msgid "[tokenless_auth] trusted_issuer = []" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:157(td) -msgid "(MultiStrOpt) The list of trusted issuers to further filter the certificates that are allowed to participate in the X.509 tokenless authorization. If the option is absent then no certificates will be allowed. The naming format for the attributes of a Distinguished Name(DN) must be separated by a comma and contain no spaces. This configuration option may be repeated for multiple values. For example: trusted_issuer=CN=john,OU=keystone,O=openstack trusted_issuer=CN=mary,OU=eng,O=abc" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:173(td) -msgid "[DEFAULT] crypt_strength" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:174(td) -msgid "40000" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:175(td) -msgid "10000" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:203(td) -msgid "[auth] external" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:204(td) -msgid "keystone.auth.plugins.external.DefaultDomain" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:208(td) -msgid "[auth] oauth1" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:209(td) -msgid "keystone.auth.plugins.oauth1.OAuth" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:213(td) -msgid "[auth] password" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:214(td) -msgid "keystone.auth.plugins.password.Password" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:218(td) -msgid "[auth] token" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:219(td) -msgid "keystone.auth.plugins.token.Token" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:223(td) -msgid "[catalog] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:224(td) -msgid "keystone.catalog.backends.sql.Catalog" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:225(td) ./doc/config-reference/conf-changes/keystone.xml:230(td) ./doc/config-reference/conf-changes/keystone.xml:235(td) ./doc/config-reference/conf-changes/keystone.xml:240(td) ./doc/config-reference/conf-changes/keystone.xml:245(td) ./doc/config-reference/conf-changes/keystone.xml:250(td) ./doc/config-reference/conf-changes/keystone.xml:255(td) ./doc/config-reference/conf-changes/keystone.xml:260(td) ./doc/config-reference/conf-changes/keystone.xml:280(td) ./doc/config-reference/conf-changes/keystone.xml:290(td) ./doc/config-reference/conf-changes/keystone.xml:295(td) ./doc/config-reference/conf-changes/keystone.xml:300(td) ./doc/config-reference/conf-changes/keystone.xml:310(td) -msgid "sql" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:228(td) -msgid "[credential] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:229(td) -msgid "keystone.credential.backends.sql.Credential" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:233(td) -msgid "[domain_config] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:234(td) -msgid "keystone.resource.config_backends.sql.DomainConfig" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:238(td) -msgid "[endpoint_filter] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:239(td) -msgid "keystone.contrib.endpoint_filter.backends.sql.EndpointFilter" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:243(td) -msgid "[endpoint_policy] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:244(td) -msgid "keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:248(td) -msgid "[federation] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:249(td) -msgid "keystone.contrib.federation.backends.sql.Federation" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:253(td) -msgid "[identity] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:254(td) -msgid "keystone.identity.backends.sql.Identity" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:258(td) -msgid "[identity_mapping] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:259(td) -msgid "keystone.identity.mapping_backends.sql.Mapping" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:263(td) -msgid "[identity_mapping] generator" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:264(td) -msgid "keystone.identity.id_generators.sha256.Generator" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:265(td) ./doc/config-reference/conf-changes/glance.xml:210(td) -msgid "sha256" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:268(td) -msgid "[ldap] user_attribute_ignore" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:269(td) -msgid "default_project_id, tenants" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:270(td) -msgid "default_project_id" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:278(td) -msgid "[oauth1] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:279(td) -msgid "keystone.contrib.oauth1.backends.sql.OAuth1" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:288(td) -msgid "[policy] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:289(td) -msgid "keystone.policy.backends.sql.Policy" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:293(td) -msgid "[revoke] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:294(td) -msgid "keystone.contrib.revoke.backends.sql.Revoke" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:298(td) -msgid "[token] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:299(td) -msgid "keystone.token.persistence.backends.sql.Token" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:303(td) -msgid "[token] provider" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:304(td) -msgid "keystone.token.providers.uuid.Provider" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:305(td) -msgid "uuid" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:308(td) -msgid "[trust] driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/keystone.xml:309(td) -msgid "keystone.trust.backends.sql.Trust" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/ironic.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for Bare metal service" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:52(td) -msgid "[agent] manage_agent_boot = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:53(td) -msgid "(BoolOpt) Whether Ironic will manage booting of the agent ramdisk. If set to False, you will need to configure your mechanism to allow booting the agent ramdisk." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:56(td) -msgid "[agent] memory_consumed_by_agent = 0" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:57(td) -msgid "(IntOpt) The memory size in MiB consumed by agent when it is booted on a bare metal node. This is used for checking if the image can be downloaded and deployed on the bare metal node after booting agent ramdisk. This may be set according to the memory consumed by the agent ramdisk image." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:60(td) -msgid "[agent] post_deploy_get_power_state_retries = 6" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:61(td) -msgid "(IntOpt) Number of times to retry getting power state to check if bare metal node has been powered off after a soft power off." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:64(td) -msgid "[agent] post_deploy_get_power_state_retry_interval = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:65(td) -msgid "(IntOpt) Amount of time (in seconds) to wait between polling power state after trigger soft poweroff." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:68(td) -msgid "[api] public_endpoint = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:69(td) -msgid "(StrOpt) Public URL to use when building the links to the API resources (for example, \"https://ironic.rocks:6384\"). If None the links will be built using the request's host URL. If the API is operating behind a proxy, you will want to change this to represent the proxy's URL. Defaults to None." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:72(td) -msgid "[cimc] action_interval = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:73(td) ./doc/config-reference/conf-changes/ironic.xml:81(td) -msgid "(IntOpt) Amount of time in seconds to wait in between power operations" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:76(td) -msgid "[cimc] max_retry = 6" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:77(td) ./doc/config-reference/conf-changes/ironic.xml:85(td) -msgid "(IntOpt) Number of times a power operation needs to be retried" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:80(td) -msgid "[cisco_ucs] action_interval = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:84(td) -msgid "[cisco_ucs] max_retry = 6" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:88(td) -msgid "[conductor] clean_callback_timeout = 1800" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:89(td) -msgid "(IntOpt) Timeout (seconds) to wait for a callback from the ramdisk doing the cleaning. If the timeout is reached the node will be put in the \"clean failed\" provision state. Set to 0 to disable timeout." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:140(td) -msgid "[deploy] erase_devices_iterations = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:141(td) -msgid "(IntOpt) Number of iterations to be run for erasing devices." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:144(td) -msgid "[deploy] erase_devices_priority = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:145(td) -msgid "(IntOpt) Priority to run in-band erase devices via the Ironic Python Agent ramdisk. If unset, will use the priority set in the ramdisk (defaults to 10 for the GenericHardwareManager). If set to 0, will not run during cleaning." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:148(td) -msgid "[deploy] http_root = /httpboot" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:149(td) -msgid "(StrOpt) ironic-conductor node's HTTP root path." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:152(td) -msgid "[deploy] http_url = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:153(td) -msgid "(StrOpt) ironic-conductor node's HTTP server URL. Example: http://192.1.2.3:8080" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:156(td) -msgid "[drac] client_retry_count = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:157(td) -msgid "(IntOpt) In case there is a communication failure, the DRAC client is going to resend the request as many times as defined in this setting." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:160(td) -msgid "[drac] client_retry_delay = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:161(td) -msgid "(IntOpt) In case there is a communication failure, the DRAC client is going to wait for as many seconds as defined in this setting before resending the request." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:164(td) -msgid "[iboot] max_retry = 3" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:165(td) -msgid "(IntOpt) Maximum retries for iBoot operations" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:168(td) -msgid "[iboot] retry_interval = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:169(td) -msgid "(IntOpt) Time between retry attempts for iBoot operations" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:172(td) -msgid "[ilo] use_web_server_for_images = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:173(td) -msgid "(BoolOpt) Set this to True to use http web server to host floppy images and generated boot ISO. This requires http_root and http_url to be configured in the [deploy] section of the config file. If this is set to False, then Ironic will use Swift to host the floppy images and generated boot_iso." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:176(td) -msgid "[inspector] enabled = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:177(td) -msgid "(BoolOpt) whether to enable inspection using ironic-inspector" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:180(td) -msgid "[inspector] service_url = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:181(td) -msgid "(StrOpt) ironic-inspector HTTP endpoint. If this is not set, the ironic-inspector client default (http://127.0.0.1:5050) will be used." -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:184(td) -msgid "[inspector] status_check_period = 60" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:185(td) -msgid "(IntOpt) period (in seconds) to check status of nodes on inspection" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:188(td) -msgid "[irmc] remote_image_server = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:189(td) -msgid "(StrOpt) IP of remote image server" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:192(td) -msgid "[irmc] remote_image_share_name = share" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:193(td) -msgid "(StrOpt) share name of remote_image_server" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:196(td) -msgid "[irmc] remote_image_share_root = /remote_image_share_root" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:197(td) -msgid "(StrOpt) Ironic conductor node's \"NFS\" or \"CIFS\" root path" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:200(td) -msgid "[irmc] remote_image_share_type = CIFS" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:201(td) -msgid "(StrOpt) Share type of virtual media, either \"NFS\" or \"CIFS\"" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:204(td) -msgid "[irmc] remote_image_user_domain =" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:205(td) -msgid "(StrOpt) Domain name of remote_image_user_name" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:208(td) -msgid "[irmc] remote_image_user_name = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:209(td) -msgid "(StrOpt) User name of remote_image_server" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:212(td) -msgid "[irmc] remote_image_user_password = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:213(td) -msgid "(StrOpt) Password of remote_image_user_name" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:292(td) -msgid "[DEFAULT] tempdir" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:294(td) -msgid "/tmp" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:328(td) -msgid "[agent] agent_pxe_append_params" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:332(td) -msgid "[agent] agent_erase_devices_priority" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:333(td) -msgid "[deploy] erase_devices_priority" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:344(td) -msgid "[agent] agent_pxe_config_template" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:352(td) -msgid "[agent] manage_tftp" -msgstr "" - -#: ./doc/config-reference/conf-changes/ironic.xml:353(td) -msgid "[agent] manage_agent_boot" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/sahara.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for Data Processing service" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:16(td) -msgid "[DEFAULT] cluster_operation_trust_expiration_hours = 24" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:17(td) -msgid "(IntOpt) Defines the period of time (in hours) after which trusts created to allow sahara to create or scale a cluster will expire. Note that this value should be significantly larger than the value of the cleanup_time_for_incomplete_clusters configuration key if use of the cluster cleanup feature is desired (the trust must last at least as long as a cluster could validly take to stall in its creation, plus the timeout value set in that key, plus one hour for the period of the cleanup job)." -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:20(td) -msgid "[DEFAULT] default_ntp_server = pool.ntp.org" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:21(td) -msgid "(StrOpt) Default ntp server for time sync" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:28(td) -msgid "[DEFAULT] heat_stack_tags = data-processing-cluster" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:29(td) -msgid "(ListOpt) List of tags to be used during operating with stack." -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:68(td) -msgid "[cinder] endpoint_type = internalURL" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:69(td) -msgid "(StrOpt) Endpoint type for cinder client requests" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:120(td) -msgid "[heat] endpoint_type = internalURL" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:121(td) -msgid "(StrOpt) Endpoint type for heat client requests" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:124(td) -msgid "[keystone] endpoint_type = internalURL" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:125(td) -msgid "(StrOpt) Endpoint type for keystone client requests" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:132(td) -msgid "[manila] api_insecure = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:133(td) -msgid "(BoolOpt) Allow to perform insecure SSL requests to manila." -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:136(td) -msgid "[manila] api_version = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:137(td) -msgid "(IntOpt) Version of the manila API to use." -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:140(td) -msgid "[manila] ca_file = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:141(td) -msgid "(StrOpt) Location of ca certificates file to use for manila client requests." -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:144(td) -msgid "[neutron] endpoint_type = internalURL" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:145(td) -msgid "(StrOpt) Endpoint type for neutron client requests" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:148(td) -msgid "[nova] endpoint_type = internalURL" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:149(td) -msgid "(StrOpt) Endpoint type for nova client requests" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:152(td) -msgid "[object_store_access] public_identity_ca_file = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:153(td) -msgid "(StrOpt) Location of ca certificate file to use for identity client requests via public endpoint" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:156(td) -msgid "[object_store_access] public_object_store_ca_file = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:157(td) -msgid "(StrOpt) Location of ca certificate file to use for object-store client requests via public endpoint" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:196(td) -msgid "[retries] retries_number = 5" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:197(td) -msgid "(IntOpt) Number of times to retry the request to client before failing" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:200(td) -msgid "[retries] retry_after = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:201(td) -msgid "(IntOpt) Time between the retries to client (in seconds)." -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:204(td) -msgid "[swift] endpoint_type = internalURL" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:205(td) -msgid "(StrOpt) Endpoint type for swift client requests" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:222(td) -msgid "amqplib=WARN, qpid.messaging=INFO, stevedore=INFO, eventlet.wsgi.server=WARN, sqlalchemy=WARN, boto=WARN, suds=INFO, keystone=INFO, paramiko=WARN, requests=WARN, iso8601=WARN, oslo_messaging=INFO" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:223(td) -msgid "amqplib=WARN, qpid.messaging=INFO, stevedore=INFO, eventlet.wsgi.server=WARN, sqlalchemy=WARN, boto=WARN, suds=INFO, keystone=INFO, paramiko=WARN, requests=WARN, iso8601=WARN, oslo_messaging=INFO, neutronclient=INFO" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:226(td) -msgid "[DEFAULT] infrastructure_engine" -msgstr "" - -#: ./doc/config-reference/conf-changes/sahara.xml:227(td) -msgid "direct" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/nova.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for OpenStack Compute" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:16(td) -msgid "[DEFAULT] console_allowed_origins =" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:17(td) -msgid "(ListOpt) Allowed Origin header hostnames for access to console proxy servers" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:24(td) -msgid "[DEFAULT] max_concurrent_live_migrations = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:25(td) -msgid "(IntOpt) Maximum number of live migrations to run concurrently. This limit is enforced to avoid outbound live migrations overwhelming the host/network and causing failures. It is not recommended that you change this unless you are very sure that doing so is safe and stable in your environment." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:52(td) -msgid "[DEFAULT] secure_proxy_ssl_header = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:53(td) -msgid "(StrOpt) The HTTP header used to determine the scheme for the original request, even if it was removed by an SSL terminating proxy. Typical value is \"HTTP_X_FORWARDED_PROTO\"." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:56(td) -msgid "[DEFAULT] update_resources_interval = 0" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:57(td) -msgid "(IntOpt) Interval in seconds for updating compute resources. A number less than 0 means to disable the task completely. Leaving this at the default of 0 will cause this to run at the default periodic interval. Setting it to any positive value will cause it to run at approximately that number of seconds." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:60(td) -msgid "[DEFAULT] use_rootwrap_daemon = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:61(td) -msgid "(BoolOpt) Start and use a daemon that can run the commands that need to be run with root privileges. This option is usually enabled on nodes that run nova compute processes" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:120(td) -msgid "[hyperv] power_state_check_timeframe = 60" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:121(td) -msgid "(IntOpt) The timeframe to be checked for instance power state changes." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:124(td) -msgid "[hyperv] power_state_event_polling_interval = 2" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:125(td) -msgid "(IntOpt) Instance power state change event polling frequency." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:132(td) -msgid "[libvirt] live_migration_completion_timeout = 800" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:133(td) -msgid "(IntOpt) Time to wait, in seconds, for migration to successfully complete transferring data before aborting the operation. Value is per GiB of guest RAM + disk to be transferred, with lower bound of a minimum of 2 GiB. Should usually be larger than downtime delay * downtime steps. Set to 0 to disable timeouts." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:136(td) -msgid "[libvirt] live_migration_downtime = 500" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:137(td) -msgid "(IntOpt) Maximum permitted downtime, in milliseconds, for live migration switchover. Will be rounded up to a minimum of 100ms. Use a large value if guest liveness is unimportant." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:140(td) -msgid "[libvirt] live_migration_downtime_delay = 75" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:141(td) -msgid "(IntOpt) Time to wait, in seconds, between each step increase of the migration downtime. Minimum delay is 10 seconds. Value is per GiB of guest RAM + disk to be transferred, with lower bound of a minimum of 2 GiB per device" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:144(td) -msgid "[libvirt] live_migration_downtime_steps = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:145(td) -msgid "(IntOpt) Number of incremental steps to reach max downtime value. Will be rounded up to a minimum of 3 steps" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:148(td) -msgid "[libvirt] live_migration_progress_timeout = 150" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:149(td) -msgid "(IntOpt) Time to wait, in seconds, for migration to make forward progress in transferring data before aborting the operation. Set to 0 to disable timeouts." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:152(td) -msgid "[libvirt] remote_filesystem_transport = ssh" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:153(td) -msgid "(StrOpt) Use ssh or rsync transport for creating, copying, removing files on the remote host." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:156(td) -msgid "[mks] enabled = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:157(td) -msgid "(BoolOpt) Enable MKS related features" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:160(td) -msgid "[mks] mksproxy_base_url = http://127.0.0.1:6090/" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:161(td) -msgid "(StrOpt) Location of MKS web console proxy, in the form \"http://127.0.0.1:6090/\"" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:164(td) -msgid "[osapi_v21] enabled = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:165(td) -msgid "(BoolOpt) DEPRECATED: Whether the V2.1 API is enabled or not. This option will be removed in the near future." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:168(td) -msgid "[osapi_v21] extensions_blacklist =" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:169(td) -msgid "(ListOpt) DEPRECATED: A list of v2.1 API extensions to never load. Specify the extension aliases here. This option will be removed in the near future. After that point you have to run all of the API." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:172(td) -msgid "[osapi_v21] extensions_whitelist =" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:173(td) -msgid "(ListOpt) DEPRECATED: If the list is not empty then a v2.1 API extension will only be loaded if it exists in this list. Specify the extension aliases here. This option will be removed in the near future. After that point you have to run all of the API." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:216(td) ./doc/config-reference/conf-changes/ceilometer.xml:244(td) -msgid "[vmware] ca_file = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:217(td) -msgid "(StrOpt) Specify a CA bundle file to use in verifying the vCenter server certificate." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:220(td) -msgid "[vmware] console_delay_seconds = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:221(td) -msgid "(IntOpt) Set this value if affected by an increased network latency causing repeated characters when typing in a remote console." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:224(td) ./doc/config-reference/conf-changes/ceilometer.xml:248(td) -msgid "[vmware] insecure = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:225(td) ./doc/config-reference/conf-changes/ceilometer.xml:249(td) -msgid "(BoolOpt) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if \"ca_file\" is set." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:228(td) -msgid "[vmware] serial_port_proxy_uri = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:229(td) -msgid "(StrOpt) Identifies a proxy service that provides network access to the serial_port_service_uri. This option is ignored if serial_port_service_uri is not specified." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:232(td) -msgid "[vmware] serial_port_service_uri = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:233(td) -msgid "(StrOpt) Identifies the remote system that serial port traffic will be sent to. If this is not set, no serial ports will be added to the created VMs." -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:236(td) -msgid "[vnc] enabled = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:237(td) -msgid "(BoolOpt) Enable VNC related features" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:240(td) -msgid "[vnc] keymap = en-us" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:241(td) -msgid "(StrOpt) Keymap for VNC" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:244(td) -msgid "[vnc] novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:245(td) -msgid "(StrOpt) Location of VNC console proxy, in the form \"http://127.0.0.1:6080/vnc_auto.html\"" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:248(td) -msgid "[vnc] vncserver_listen = 127.0.0.1" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:249(td) -msgid "(StrOpt) IP address on which instance vncservers should listen" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:252(td) -msgid "[vnc] vncserver_proxyclient_address = 127.0.0.1" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:253(td) -msgid "(StrOpt) The address to which proxy clients (like nova-xvpvncproxy) should connect" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:256(td) -msgid "[vnc] xvpvncproxy_base_url = http://127.0.0.1:6081/console" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:257(td) -msgid "(StrOpt) Location of nova xvp VNC console proxy, in the form \"http://127.0.0.1:6081/console\"" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:260(td) -msgid "[workarounds] handle_virt_lifecycle_events = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:261(td) -msgid "(BoolOpt) Whether or not to handle events raised from the compute driver's 'emit_event' method. These are lifecycle events raised from compute drivers that implement the method. An example of a lifecycle event is an instance starting or stopping. If the instance is going through task state changes due to an API operation, like resize, the events are ignored. However, this is an advanced feature which allows the hypervisor to signal to the compute service that an unexpected state change has occurred in an instance and the instance can be shutdown automatically - which can inherently race in reboot operations or when the compute service or host is rebooted, either planned or due to an unexpected outage. Care should be taken when using this and sync_power_state_interval is negative since then if any instances are out of sync between the hypervisor and the Nova database they will have to be synchronized manually. See https://bugs.launchpad.net/bugs/1444630" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:277(td) ./doc/config-reference/conf-changes/nova.xml:410(td) -msgid "[DEFAULT] compute_available_monitors" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:278(td) -msgid "['nova.compute.monitors.all_monitors']" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:282(td) -msgid "[DEFAULT] cpu_allocation_ratio" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:283(td) -msgid "16.0" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:284(td) ./doc/config-reference/conf-changes/nova.xml:304(td) -msgid "0.0" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:297(td) -msgid "[DEFAULT] osapi_compute_extension" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:298(td) -msgid "['nova.api.openstack.compute.contrib.standard_extensions']" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:299(td) -msgid "['nova.api.openstack.compute.legacy_v2.contrib.standard_extensions']" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:302(td) -msgid "[DEFAULT] ram_allocation_ratio" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:303(td) -msgid "1.5" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:313(td) -msgid "RetryFilter, AvailabilityZoneFilter, RamFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:314(td) -msgid "RetryFilter, AvailabilityZoneFilter, RamFilter, DiskFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:327(td) -msgid "[cells] mute_weight_multiplier" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:328(td) -msgid "-10.0" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:329(td) -msgid "-10000.0" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:332(td) ./doc/config-reference/conf-changes/nova.xml:470(td) -msgid "[libvirt] remove_unused_kernels" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:358(td) -msgid "[DEFAULT] network_device_mtu" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:362(td) -msgid "[DEFAULT] vnc_keymap" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:363(td) -msgid "[vnc] keymap" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:366(td) -msgid "[osapi_v21] extensions_whitelist" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:370(td) -msgid "[ironic] admin_auth_token" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:374(td) -msgid "[DEFAULT] vnc_enabled" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:375(td) -msgid "[vnc] enabled" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:382(td) -msgid "[DEFAULT] xvpvncproxy_base_url" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:383(td) -msgid "[vnc] xvpvncproxy_base_url" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:386(td) -msgid "[ironic] client_log_level" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:390(td) -msgid "[neutron] admin_username" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:394(td) -msgid "[DEFAULT] ssl_ca_file" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:395(td) -msgid "[ssl] ca_file" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:398(td) -msgid "[neutron] auth_strategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:402(td) -msgid "[osapi_v21] enabled" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:406(td) -msgid "[DEFAULT] novncproxy_base_url" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:407(td) -msgid "[vnc] novncproxy_base_url" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:414(td) -msgid "[neutron] admin_user_id" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:418(td) -msgid "[neutron] admin_tenant_id" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:422(td) -msgid "[DEFAULT] ssl_cert_file" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:423(td) -msgid "[ssl] cert_file" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:430(td) -msgid "[DEFAULT] vncserver_proxyclient_address" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:431(td) -msgid "[vnc] vncserver_proxyclient_address" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:434(td) -msgid "[osapi_v21] extensions_blacklist" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:438(td) -msgid "[workarounds] destroy_after_evacuate" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:442(td) -msgid "[neutron] admin_tenant_name" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:446(td) -msgid "[DEFAULT] osapi_compute_ext_list" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:454(td) -msgid "[DEFAULT] vncserver_listen" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:455(td) -msgid "[vnc] vncserver_listen" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:458(td) -msgid "[neutron] admin_password" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:462(td) -msgid "[DEFAULT] share_dhcp_address" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:466(td) -msgid "[DEFAULT] ssl_key_file" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:467(td) -msgid "[ssl] key_file" -msgstr "" - -#: ./doc/config-reference/conf-changes/nova.xml:474(td) -msgid "[neutron] admin_auth_url" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/neutron.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for OpenStack Networking" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:16(td) -msgid "[DEFAULT] dns_domain = openstacklocal" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:17(td) -msgid "(StrOpt) Domain to use for building the hostnames" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:20(td) -msgid "[DEFAULT] dnsmasq_base_log_dir = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:21(td) -msgid "(StrOpt) Base log dir for dnsmasq logging. The log contains DHCP and DNS log information and is useful for debugging issues with either DHCP or DNS. If this section is null, disable dnsmasq log." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:24(td) -msgid "[DEFAULT] enable_new_agents = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:25(td) -msgid "(BoolOpt) Agent starts with admin_state_up=False when enable_new_agents=False. In the case, user's resources will not be scheduled automatically to the agent until admin changes admin_state_up to True." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:28(td) -msgid "[DEFAULT] enable_snat_by_default = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:29(td) -msgid "(BoolOpt) Define the default value of enable_snat if not provided in external_gateway_info." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:32(td) -msgid "[DEFAULT] endpoint_url = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:33(td) -msgid "(StrOpt) Neutron endpoint URL, if not set will use endpoint from the keystone catalog along with endpoint_type" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:40(td) -msgid "[DEFAULT] force_metadata = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:41(td) -msgid "(BoolOpt) Force to use DHCP to get Metadata on all networks." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:44(td) -msgid "[DEFAULT] ipam_driver = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:45(td) -msgid "(StrOpt) IPAM driver to use." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:52(td) -msgid "[DEFAULT] pd_confs = $state_path/pd" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:53(td) -msgid "(StrOpt) Location to store IPv6 PD files." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:56(td) -msgid "[DEFAULT] pd_dhcp_driver = dibbler" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:57(td) -msgid "(StrOpt) Service to handle DHCPv6 Prefix delegation." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:64(td) -msgid "[DEFAULT] prefix_delegation_driver = dibbler" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:65(td) -msgid "(StrOpt) Driver used for ipv6 prefix delegation. This needs to be an entry point defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for entry points included with the neutron source." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:84(td) -msgid "[DEFAULT] vendor_pen = 8888" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:85(td) -msgid "(StrOpt) A decimal value as Vendor's Registered Private Enterprise Number as required by RFC3315 DUID-EN." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:96(td) -msgid "[AGENT] agent_type = Open vSwitch agent" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:97(td) -msgid "(StrOpt) Selects the Agent Type reported" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:100(td) -msgid "[AGENT] drop_flows_on_start = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:101(td) -msgid "(BoolOpt) Reset flow table on start. Setting this to True will cause brief traffic interruption." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:104(td) -msgid "[AGENT] log_agent_heartbeats = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:105(td) -msgid "(BoolOpt) Log agent heartbeats" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:108(td) -msgid "[AGENT] tunnel_csum = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:109(td) -msgid "(BoolOpt) Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/VXLAN tunnel." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:112(td) -msgid "[LINUX_BRIDGE] bridge_mappings =" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:113(td) -msgid "(ListOpt) List of <physical_network>:<physical_bridge>" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:116(td) -msgid "[OVS] datapath_type = system" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:117(td) -msgid "(StrOpt) OVS datapath to use." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:120(td) -msgid "[OVS] of_connect_timeout = 30" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:121(td) -msgid "(IntOpt) Timeout in seconds to wait for the local switch connecting the controller. Used only for 'native' driver." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:124(td) -msgid "[OVS] of_interface = ovs-ofctl" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:125(td) -msgid "(StrOpt) OpenFlow interface to use." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:128(td) -msgid "[OVS] of_listen_address = 127.0.0.1" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:129(td) -msgid "(IPOpt) Address to listen on for OpenFlow connections. Used only for 'native' driver." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:132(td) -msgid "[OVS] of_listen_port = 6633" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:133(td) -msgid "(IntOpt) Port to listen on for OpenFlow connections. Used only for 'native' driver." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:136(td) -msgid "[OVS] of_request_timeout = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:137(td) -msgid "(IntOpt) Timeout in seconds to wait for a single OpenFlow request. Used only for 'native' driver." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:140(td) -msgid "[QUOTAS] quota_rbac_entry = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:141(td) -msgid "(IntOpt) Default number of RBAC entries allowed per tenant. A negative value means unlimited." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:144(td) -msgid "[QUOTAS] track_quota_usage = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:145(td) -msgid "(BoolOpt) Keep in track in the database of current resourcequota usage. Plugins which do not leverage the neutron database should set this flag to False" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:148(td) -msgid "[agent] extensions =" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:149(td) -msgid "(ListOpt) Extensions list to use" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:204(td) -msgid "[ml2] external_network_type = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:205(td) -msgid "(StrOpt) Default network type for external networks when no provider attributes are specified. By default it is None, which means that if provider attributes are not specified while creating external networks then they will have the same type as tenant networks. Allowed values for external_network_type config option depend on the network type values configured in type_drivers config option." -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:208(td) -msgid "[ml2_type_geneve] max_header_size = 50" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:209(td) -msgid "(IntOpt) Geneve encapsulation header size is dynamic, this value is used to calculate the maximum MTU for the driver.this is the sum of the sizes of the outer ETH + IP + UDP + GENEVE header sizes" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:212(td) -msgid "[ml2_type_geneve] vni_ranges =" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:213(td) -msgid "(ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of Geneve VNI IDs that are available for tenant network allocation" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:268(td) -msgid "[qos] notification_drivers = message_queue" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:269(td) -msgid "(ListOpt) Drivers list to use to send the update notification" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:285(td) ./doc/config-reference/conf-changes/ceilometer.xml:316(td) -msgid "[DEFAULT] api_workers" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:295(td) -msgid "[DEFAULT] dhcp_delete_namespaces" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:300(td) -msgid "[DEFAULT] endpoint_type" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:301(td) -msgid "publicURL" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:302(td) -msgid "adminURL" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:310(td) -msgid "[DEFAULT] network_scheduler_driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:311(td) -msgid "neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:312(td) -msgid "neutron.scheduler.dhcp_agent_scheduler.WeightScheduler" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:315(td) -msgid "[DEFAULT] router_delete_namespaces" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:320(td) -msgid "[DEFAULT] router_scheduler_driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:321(td) -msgid "neutron.scheduler.l3_agent_scheduler.ChanceScheduler" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:322(td) -msgid "neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:325(td) -msgid "[DEFAULT] rpc_workers" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:345(td) -msgid "[AGENT] prevent_arp_spoofing" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:350(td) -msgid "[QUOTAS] quota_driver" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:351(td) -msgid "neutron.db.quota_db.DbQuotaDriver" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:352(td) -msgid "neutron.db.quota.driver.DbQuotaDriver" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:360(td) -msgid "[ml2] type_drivers" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:361(td) -msgid "local, flat, vlan, gre, vxlan" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:362(td) -msgid "local, flat, vlan, gre, vxlan, geneve" -msgstr "" - -#: ./doc/config-reference/conf-changes/neutron.xml:365(td) ./doc/config-reference/conf-changes/neutron.xml:394(td) -msgid "[ml2_sriov] agent_required" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/ceilometer.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for Telemetry" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:16(td) -msgid "[DEFAULT] batch_polled_samples = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:17(td) -msgid "(BoolOpt) To reduce polling agent load, samples are sent to the notification agent in a batch. To gain higher throughput at the cost of load set this to False." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:20(td) -msgid "[DEFAULT] ceilometer_control_exchange = ceilometer" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:21(td) -msgid "(StrOpt) Exchange name for ceilometer notifications." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:24(td) -msgid "[DEFAULT] dns_control_exchange = central" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:25(td) -msgid "(StrOpt) Exchange name for DNS notifications." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:32(td) -msgid "[DEFAULT] magnum_control_exchange = magnum" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:33(td) -msgid "(StrOpt) Exchange name for Magnum notifications." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:40(td) -msgid "[DEFAULT] pipeline_polling_interval = 20" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:41(td) -msgid "(IntOpt) Polling interval for pipeline file configuration in seconds." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:48(td) -msgid "[DEFAULT] refresh_event_pipeline_cfg = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:49(td) -msgid "(BoolOpt) Refresh Event Pipeline configuration on-the-fly." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:52(td) -msgid "[DEFAULT] refresh_pipeline_cfg = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:53(td) -msgid "(BoolOpt) Refresh Pipeline configuration on-the-fly." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:80(td) -msgid "[alarm] alarm_max_actions = -1" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:81(td) -msgid "(IntOpt) Maximum count of actions for each state of an alarm, non-positive number means no limit." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:84(td) -msgid "[api] aodh_is_enabled = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:85(td) -msgid "(BoolOpt) Set True to redirect alarms URLs to aodh. Default autodetection by querying keystone." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:88(td) -msgid "[api] aodh_url = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:89(td) -msgid "(StrOpt) The endpoint of Aodh to redirect alarms URLs to Aodh API. Default autodetection by querying keystone." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:92(td) -msgid "[api] default_api_return_limit = 100" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:93(td) -msgid "(IntOpt) Default maximum number of items returned by API request." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:96(td) -msgid "[api] gnocchi_is_enabled = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:97(td) -msgid "(BoolOpt) Set True to disable resource/meter/sample URLs. Default autodetection by querying keystone." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:100(td) -msgid "[api] workers = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:101(td) -msgid "(IntOpt) Number of workers for api, default value is 1." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:104(td) -msgid "[collector] enable_rpc = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:105(td) -msgid "(BoolOpt) Enable the RPC functionality of collector. This functionality is now deprecated in favour of notifier publisher and queues." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:108(td) -msgid "[collector] workers = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:109(td) -msgid "(IntOpt) Number of workers for collector service. default value is 1." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:160(td) -msgid "[database] alarm_history_time_to_live = -1" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:161(td) -msgid "(IntOpt) Number of seconds that alarm histories are kept in the database for (<= 0 means forever)." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:164(td) -msgid "[dispatcher_gnocchi] archive_policy = low" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:165(td) -msgid "(StrOpt) The archive policy to use when the dispatcher create a new metric." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:168(td) -msgid "[dispatcher_gnocchi] archive_policy_file = gnocchi_archive_policy_map.yaml" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:169(td) -msgid "(StrOpt) The Yaml file that defines per metric archive policies." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:172(td) -msgid "[dispatcher_gnocchi] filter_project = gnocchi" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:173(td) -msgid "(StrOpt) Gnocchi project used to filter out samples generated by Gnocchi service activity" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:176(td) -msgid "[dispatcher_gnocchi] filter_service_activity = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:177(td) -msgid "(BoolOpt) Filter out samples generated by Gnocchi service activity" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:180(td) -msgid "[dispatcher_gnocchi] resources_definition_file = gnocchi_resources.yaml" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:181(td) -msgid "(StrOpt) The Yaml file that defines mapping between samples and gnocchi resources/metrics" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:184(td) -msgid "[dispatcher_gnocchi] url = http://localhost:8041" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:185(td) -msgid "(StrOpt) URL to Gnocchi." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:188(td) -msgid "[hardware] meter_definitions_file = snmp.yaml" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:189(td) -msgid "(StrOpt) Configuration file for defining hardware snmp meters." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:196(td) -msgid "[meter] meter_definitions_cfg_file = meters.yaml" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:197(td) -msgid "(StrOpt) Configuration file for defining meter notifications." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:200(td) -msgid "[notification] pipeline_processing_queues = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:201(td) -msgid "(IntOpt) Number of queues to parallelize workload across. This value should be larger than the number of active notification agents for optimal results." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:204(td) -msgid "[notification] workers = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:205(td) -msgid "(IntOpt) Number of workers for notification service, default value is 1." -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:295(td) -msgid "[notification] disable_non_metric_meters" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:317(td) -msgid "[api] workers" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:320(td) -msgid "[DEFAULT] notification_workers" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:321(td) -msgid "[notification] workers" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:324(td) -msgid "[DEFAULT] database_connection" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:328(td) -msgid "[DEFAULT] collector_workers" -msgstr "" - -#: ./doc/config-reference/conf-changes/ceilometer.xml:329(td) -msgid "[collector] workers" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/swift.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for OpenStack Object Storage" -msgstr "" - -#: ./doc/config-reference/conf-changes/swift.xml:5(para) -msgid "There are no new, updated, and deprecated options in Liberty for OpenStack Object Storage." -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/trove.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for Database service" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:20(td) -msgid "[DEFAULT] exists_notification_interval = 3600" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:21(td) -msgid "(IntOpt) Seconds to wait between pushing events." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:24(td) -msgid "[DEFAULT] nova_proxy_admin_tenant_id =" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:25(td) -msgid "(StrOpt) Admin tenant ID used to connect to Nova." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:52(td) -msgid "[DEFAULT] timeout_wait_for_service = 120" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:53(td) -msgid "(IntOpt) Maximum time (in seconds) to wait for a service to become alive." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:64(td) -msgid "[cassandra] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:65(td) -msgid "(StrOpt) Root controller implementation for cassandra." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:68(td) -msgid "[couchbase] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:69(td) -msgid "(StrOpt) Root controller implementation for couchbase." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:72(td) -msgid "[couchdb] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:73(td) -msgid "(StrOpt) Root controller implementation for couchdb." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:76(td) -msgid "[db2] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:77(td) -msgid "(StrOpt) Root controller implementation for db2." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:84(td) -msgid "[mariadb] backup_incremental_strategy = {'InnoBackupEx': 'InnoBackupExIncremental'}" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:85(td) ./doc/config-reference/conf-changes/trove.xml:213(td) -msgid "(DictOpt) Incremental Backup Runner based on the default strategy. For strategies that do not implement an incremental backup, the runner will use the default full backup." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:88(td) -msgid "[mariadb] backup_namespace = trove.guestagent.strategies.backup.mysql_impl" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:89(td) ./doc/config-reference/conf-changes/trove.xml:217(td) -msgid "(StrOpt) Namespace to load backup strategies from." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:92(td) -msgid "[mariadb] backup_strategy = InnoBackupEx" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:93(td) ./doc/config-reference/conf-changes/trove.xml:221(td) -msgid "(StrOpt) Default strategy to perform backups." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:96(td) -msgid "[mariadb] device_path = /dev/vdb" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:97(td) ./doc/config-reference/conf-changes/trove.xml:229(td) -msgid "(StrOpt) Device path for volume if volume support is enabled." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:100(td) -msgid "[mariadb] mount_point = /var/lib/mysql" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:101(td) ./doc/config-reference/conf-changes/trove.xml:245(td) -msgid "(StrOpt) Filesystem path for mounting volumes if volume support is enabled." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:104(td) -msgid "[mariadb] replication_namespace = trove.guestagent.strategies.replication.mysql_binlog" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:105(td) ./doc/config-reference/conf-changes/trove.xml:249(td) ./doc/config-reference/conf-changes/trove.xml:305(td) -msgid "(StrOpt) Namespace to load replication strategies from." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:108(td) -msgid "[mariadb] replication_strategy = MysqlBinlogReplication" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:109(td) ./doc/config-reference/conf-changes/trove.xml:253(td) -msgid "(StrOpt) Default strategy for replication." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:112(td) -msgid "[mariadb] restore_namespace = trove.guestagent.strategies.restore.mysql_impl" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:113(td) ./doc/config-reference/conf-changes/trove.xml:261(td) -msgid "(StrOpt) Namespace to load restore strategies from." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:116(td) -msgid "[mariadb] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:117(td) ./doc/config-reference/conf-changes/trove.xml:165(td) -msgid "(StrOpt) Root controller implementation for mysql." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:120(td) -msgid "[mariadb] root_on_create = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:121(td) ./doc/config-reference/conf-changes/trove.xml:269(td) -msgid "(BoolOpt) Enable the automatic creation of the root user for the service during instance-create. The generated password for the root user is immediately returned in the response of instance-create as the 'password' field." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:124(td) -msgid "[mariadb] tcp_ports = 3306" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:125(td) ./doc/config-reference/conf-changes/trove.xml:277(td) -msgid "(ListOpt) List of TCP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True)." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:128(td) -msgid "[mariadb] udp_ports =" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:129(td) ./doc/config-reference/conf-changes/trove.xml:281(td) -msgid "(ListOpt) List of UDP ports and/or port ranges to open in the security group (only applicable if trove_security_groups_support is True)." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:132(td) -msgid "[mariadb] usage_timeout = 400" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:133(td) ./doc/config-reference/conf-changes/trove.xml:285(td) -msgid "(IntOpt) Maximum time (in seconds) to wait for a Guest to become active." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:136(td) -msgid "[mariadb] volume_support = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:137(td) ./doc/config-reference/conf-changes/trove.xml:289(td) -msgid "(BoolOpt) Whether to provision a Cinder volume for datadir." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:140(td) -msgid "[mongodb] add_members_timeout = 300" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:141(td) -msgid "(IntOpt) Maximum time to wait (in seconds) for a replica set initialization process to complete." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:144(td) -msgid "[mongodb] configsvr_port = 27019" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:145(td) -msgid "(IntOpt) Port for instances running as config servers." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:148(td) -msgid "[mongodb] ignore_dbs = admin, local, config" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:149(td) -msgid "(ListOpt) Databases to exclude when listing databases." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:152(td) -msgid "[mongodb] ignore_users = admin.os_admin, admin.root" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:153(td) ./doc/config-reference/conf-changes/trove.xml:237(td) -msgid "(ListOpt) Users to exclude when listing users." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:156(td) -msgid "[mongodb] mongodb_port = 27017" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:157(td) -msgid "(IntOpt) Port for mongod and mongos instances." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:160(td) -msgid "[mongodb] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:161(td) -msgid "(StrOpt) Root controller implementation for mongodb." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:164(td) -msgid "[mysql] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:200(td) -msgid "[percona] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:201(td) -msgid "(StrOpt) Root controller implementation for percona." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:204(td) -msgid "[postgresql] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:205(td) -msgid "(StrOpt) Root controller implementation for postgresql." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:208(td) -msgid "[pxc] api_strategy = trove.common.strategies.cluster.experimental.pxc.api.PXCAPIStrategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:209(td) ./doc/config-reference/conf-changes/trove.xml:293(td) -msgid "(StrOpt) Class that implements datastore-specific API logic." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:212(td) -msgid "[pxc] backup_incremental_strategy = {'InnoBackupEx': 'InnoBackupExIncremental'}" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:216(td) -msgid "[pxc] backup_namespace = trove.guestagent.strategies.backup.mysql_impl" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:220(td) -msgid "[pxc] backup_strategy = InnoBackupEx" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:224(td) -msgid "[pxc] cluster_support = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:225(td) ./doc/config-reference/conf-changes/trove.xml:297(td) -msgid "(BoolOpt) Enable clusters to be created and managed." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:228(td) -msgid "[pxc] device_path = /dev/vdb" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:232(td) -msgid "[pxc] guestagent_strategy = trove.common.strategies.cluster.experimental.pxc.guestagent.PXCGuestAgentStrategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:233(td) ./doc/config-reference/conf-changes/trove.xml:301(td) -msgid "(StrOpt) Class that implements datastore-specific Guest Agent API logic." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:236(td) -msgid "[pxc] ignore_users = os_admin, root, clusterrepuser" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:240(td) -msgid "[pxc] min_cluster_member_count = 3" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:241(td) -msgid "(IntOpt) Minimum number of members in PXC cluster." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:244(td) -msgid "[pxc] mount_point = /var/lib/mysql" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:248(td) -msgid "[pxc] replication_namespace = trove.guestagent.strategies.replication.mysql_gtid" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:252(td) -msgid "[pxc] replication_strategy = MysqlGTIDReplication" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:256(td) -msgid "[pxc] replication_user = slave_user" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:257(td) -msgid "(StrOpt) Userid for replication slave." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:260(td) -msgid "[pxc] restore_namespace = trove.guestagent.strategies.restore.mysql_impl" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:264(td) -msgid "[pxc] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:265(td) -msgid "(StrOpt) Root controller implementation for pxc." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:268(td) -msgid "[pxc] root_on_create = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:272(td) -msgid "[pxc] taskmanager_strategy = trove.common.strategies.cluster.experimental.pxc.taskmanager.PXCTaskManagerStrategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:273(td) ./doc/config-reference/conf-changes/trove.xml:313(td) -msgid "(StrOpt) Class that implements datastore-specific task manager logic." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:276(td) -msgid "[pxc] tcp_ports = 3306, 4444, 4567, 4568" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:280(td) -msgid "[pxc] udp_ports =" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:284(td) -msgid "[pxc] usage_timeout = 450" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:288(td) -msgid "[pxc] volume_support = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:292(td) -msgid "[redis] api_strategy = trove.common.strategies.cluster.experimental.redis.api.RedisAPIStrategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:296(td) -msgid "[redis] cluster_support = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:300(td) -msgid "[redis] guestagent_strategy = trove.common.strategies.cluster.experimental.redis.guestagent.RedisGuestAgentStrategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:304(td) -msgid "[redis] replication_namespace = trove.guestagent.strategies.replication.experimental.redis_sync" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:308(td) -msgid "[redis] root_controller = trove.extensions.common.service.DefaultRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:309(td) -msgid "(StrOpt) Root controller implementation for redis." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:312(td) -msgid "[redis] taskmanager_strategy = trove.common.strategies.cluster.experimental.redis.taskmanager.RedisTaskManagerStrategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:316(td) -msgid "[vertica] root_controller = trove.extensions.vertica.service.VerticaRootController" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:317(td) -msgid "(StrOpt) Root controller implementation for Vertica." -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:333(td) -msgid "[DEFAULT] cluster_usage_timeout" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:334(td) -msgid "675" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:335(td) -msgid "36000" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:343(td) -msgid "[DEFAULT] ignore_dbs" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:344(td) -msgid "lost+found, #mysql50#lost+found, mysql, information_schema" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:345(td) -msgid "mysql, information_schema, performance_schema" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:353(td) -msgid "[DEFAULT] notification_service_id" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:354(td) -msgid "{'vertica': 'a8d805ae-a3b2-c4fd-gb23-b62cee5201ae', 'db2': 'e040cd37-263d-4869-aaa6-c62aa97523b5', 'postgresql': 'ac277e0d-4f21-40aa-b347-1ea31e571720', 'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b', 'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415', 'mongodb': 'c8c907af-7375-456f-b929-b637ff9209ee', 'couchdb': 'f0a9ab7b-66f7-4352-93d7-071521d44c7c', 'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0', 'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed'}" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:355(td) -msgid "{'mongodb': 'c8c907af-7375-456f-b929-b637ff9209ee', 'percona': 'fd1723f5-68d2-409c-994f-a4a197892a17', 'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b', 'pxc': '75a628c3-f81b-4ffb-b10a-4087c26bc854', 'db2': 'e040cd37-263d-4869-aaa6-c62aa97523b5', 'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed', 'mariadb': '7a4f82cc-10d2-4bc6-aadc-d9aacc2a3cb5', 'postgresql': 'ac277e0d-4f21-40aa-b347-1ea31e571720', 'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415', 'couchdb': 'f0a9ab7b-66f7-4352-93d7-071521d44c7c', 'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0', 'vertica': 'a8d805ae-a3b2-c4fd-gb23-b62cee5201ae'}" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:358(td) -msgid "[DEFAULT] report_interval" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:359(td) -msgid "10" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:360(td) -msgid "30" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:368(td) -msgid "[DEFAULT] usage_timeout" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:369(td) -msgid "600" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:370(td) ./doc/config-reference/conf-changes/glance.xml:200(td) -msgid "900" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:388(td) -msgid "[mongodb] backup_namespace" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:390(td) -msgid "trove.guestagent.strategies.backup.experimental.mongo_impl" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:393(td) -msgid "[mongodb] backup_strategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:395(td) -msgid "MongoDump" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:398(td) -msgid "[mongodb] restore_namespace" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:400(td) -msgid "trove.guestagent.strategies.restore.experimental.mongo_impl" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:408(td) -msgid "[redis] backup_namespace" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:410(td) -msgid "trove.guestagent.strategies.backup.experimental.redis_impl" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:413(td) -msgid "[redis] backup_strategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:415(td) -msgid "RedisBackup" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:418(td) -msgid "[redis] replication_strategy" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:420(td) -msgid "RedisSyncReplication" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:423(td) -msgid "[redis] restore_namespace" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:425(td) -msgid "trove.guestagent.strategies.restore.experimental.redis_impl" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:428(td) -msgid "[redis] tcp_ports" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:429(td) -msgid "6379" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:430(td) -msgid "6379, 16379" -msgstr "" - -#: ./doc/config-reference/conf-changes/trove.xml:433(td) -msgid "[redis] volume_support" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/glance.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for OpenStack Image service" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:16(td) -msgid "[DEFAULT] enable_v3_api = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:17(td) -msgid "(BoolOpt) Deploy the v3 OpenStack Objects API." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:24(td) -msgid "[DEFAULT] max_request_id_length = 64" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:25(td) -msgid "(IntOpt) Limits request ID length." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:52(td) -msgid "[DEFAULT] scrub_pool_size = 1" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:53(td) -msgid "(IntOpt) The size of thread pool to be used for scrubbing images. The default is one, which signifies serial scrubbing. Any value above one indicates the max number of images that may be scrubbed in parallel." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:112(td) -msgid "[glance_store] rados_connect_timeout = 0" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:113(td) -msgid "(IntOpt) Timeout value (in seconds) used when connecting to ceph cluster. If value <= 0, no timeout is set and default librados value is used." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:116(td) -msgid "[glance_store] s3_store_enable_proxy = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:117(td) -msgid "(BoolOpt) Enable the use of a proxy." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:120(td) -msgid "[glance_store] s3_store_proxy_host = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:121(td) -msgid "(StrOpt) Address or hostname for the proxy server." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:124(td) -msgid "[glance_store] s3_store_proxy_password = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:125(td) -msgid "(StrOpt) The password to use when connecting over a proxy." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:128(td) -msgid "[glance_store] s3_store_proxy_port = 8080" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:129(td) -msgid "(IntOpt) The port to use when connecting over a proxy." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:132(td) -msgid "[glance_store] s3_store_proxy_user = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:133(td) -msgid "(StrOpt) The username to connect to the proxy." -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:193(td) -msgid "[DEFAULT] allowed_rpc_exception_modules" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:194(td) -msgid "openstack.common.exception, glance.common.exception, exceptions" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:195(td) -msgid "glance.common.exception, exceptions" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:198(td) -msgid "[DEFAULT] client_socket_timeout" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:208(td) -msgid "[DEFAULT] digest_algorithm" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:209(td) -msgid "sha1" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:214(td) -msgid "localhost" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:215(td) -msgid "127.0.0.1" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:259(td) -msgid "[glance_store] vmware_datacenter_path" -msgstr "" - -#: ./doc/config-reference/conf-changes/glance.xml:275(td) -msgid "[glance_store] vmware_datastore_name" -msgstr "" - -#. Warning: Do not edit this file. It is automatically generated and your changes will be overwritten. The tool to do so lives in the openstack-doc-tools repository. -#: ./doc/config-reference/conf-changes/heat.xml:4(title) -msgid "New, updated, and deprecated options in Liberty for Orchestration" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:16(td) -msgid "[DEFAULT] encrypt_parameters_and_properties = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:17(td) -msgid "(BoolOpt) Encrypt template parameters that were marked as hidden and also all the resource properties before storing them in database." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:24(td) -msgid "[DEFAULT] hidden_stack_tags = data-processing-cluster" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:25(td) -msgid "(ListOpt) Stacks containing these tag names will be hidden. Multiple tags should be given in a comma-delimited list (eg. hidden_stack_tags=hide_me,me_too)." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:60(td) -msgid "[cache] backend = dogpile.cache.null" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:61(td) -msgid "(StrOpt) Dogpile.cache backend module. It is recommended that Memcache with pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be used in production deployments. Small workloads (single process) like devstack can use the dogpile.cache.memory backend." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:64(td) -msgid "[cache] backend_argument = []" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:65(td) -msgid "(MultiStrOpt) Arguments supplied to the backend module. Specify this option once per argument to be passed to the dogpile.cache backend. Example format: \"<argname>:<value>\"." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:68(td) -msgid "[cache] config_prefix = cache.oslo" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:69(td) -msgid "(StrOpt) Prefix for building the configuration dictionary for the cache region. This should not need to be changed unless there is another dogpile.cache region with the same configuration name." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:72(td) -msgid "[cache] debug_cache_backend = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:73(td) -msgid "(BoolOpt) Extra debugging from the cache backend (cache keys, get/set/delete/etc calls). This is only really useful if you need to see the specific cache-backend get/set/delete calls with the keys/values. Typically this should be left set to false." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:76(td) -msgid "[cache] enabled = False" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:77(td) -msgid "(BoolOpt) Global toggle for caching." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:80(td) -msgid "[cache] expiration_time = 600" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:81(td) -msgid "(IntOpt) Default TTL, in seconds, for any cached item in the dogpile.cache region. This applies to any cached method that doesn't have an explicit cache expiration time defined for it." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:84(td) -msgid "[cache] memcache_dead_retry = 300" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:85(td) -msgid "(IntOpt) Number of seconds memcached server is considered dead before it is tried again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only)." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:88(td) -msgid "[cache] memcache_pool_connection_get_timeout = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:89(td) -msgid "(IntOpt) Number of seconds that an operation will wait to get a memcache client connection." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:92(td) -msgid "[cache] memcache_pool_maxsize = 10" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:93(td) -msgid "(IntOpt) Max total number of open connections to every memcached server. (oslo_cache.memcache_pool backend only)." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:96(td) -msgid "[cache] memcache_pool_unused_timeout = 60" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:97(td) -msgid "(IntOpt) Number of seconds a connection to memcached is held unused in the pool before it is closed. (oslo_cache.memcache_pool backend only)." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:100(td) -msgid "[cache] memcache_servers = localhost:11211" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:101(td) -msgid "(ListOpt) Memcache servers in the format of \"host:port\". (dogpile.cache.memcache and oslo_cache.memcache_pool backends only)." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:104(td) -msgid "[cache] memcache_socket_timeout = 3" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:105(td) -msgid "(IntOpt) Timeout in seconds for every call to a server. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only)." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:108(td) -msgid "[cache] proxies =" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:109(td) -msgid "(ListOpt) Proxy classes to import that will affect the way the dogpile.cache backend functions. See the dogpile.cache documentation on changing-backend-behavior." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:112(td) -msgid "[clients_keystone] auth_uri =" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:113(td) -msgid "(StrOpt) Unversioned keystone url in format like http://0.0.0.0:5000." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:116(td) -msgid "[constraint_validation_cache] caching = True" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:117(td) -msgid "(BoolOpt) Toggle to enable/disable caching when Orchestration Engine validates property constraints of stack.During property validation with constraints Orchestration Engine caches requests to other OpenStack services. Please note that the global toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use this feature." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:120(td) -msgid "[constraint_validation_cache] expiration_time = 60" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:121(td) -msgid "(IntOpt) TTL, in seconds, for any cached item in the dogpile.cache region used for caching of validation constraints." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:172(td) -msgid "[heat_api] tcp_keepidle = 600" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:173(td) ./doc/config-reference/conf-changes/heat.xml:177(td) ./doc/config-reference/conf-changes/heat.xml:181(td) -msgid "(IntOpt) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes." -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:176(td) -msgid "[heat_api_cfn] tcp_keepidle = 600" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:180(td) -msgid "[heat_api_cloudwatch] tcp_keepidle = 600" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:240(td) -msgid "[trustee] auth_plugin = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:241(td) -msgid "(StrOpt) Name of the plugin to load" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:244(td) -msgid "[trustee] auth_section = None" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:245(td) -msgid "(StrOpt) Config Section from which to load plugin specific options" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:266(td) -msgid "[DEFAULT] enable_cloud_watch_lite" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:271(td) -msgid "[DEFAULT] heat_waitcondition_server_url" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:296(td) -msgid "[heat_api] workers" -msgstr "" - -#: ./doc/config-reference/conf-changes/heat.xml:298(td) -msgid "8" -msgstr "" - -#. Put one translator per line, in the form of NAME , YEAR1, YEAR2 -#: ./doc/config-reference/conf-changes/heat.xml:0(None) -msgid "translator-credits" -msgstr "" - diff --git a/doc/config-reference/networking/section_networking-log-files.xml b/doc/config-reference/networking/section_networking-log-files.xml deleted file mode 100644 index 951a094db9..0000000000 --- a/doc/config-reference/networking/section_networking-log-files.xml +++ /dev/null @@ -1,97 +0,0 @@ - -
- Log files used by Networking - The corresponding log file of each Networking service is - stored in the /var/log/neutron/ directory of - the host on which each service runs. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Log files used by Networking services
- Log file - - Service/interface -
- dhcp-agent.log - - neutron-dhcp-agent -
- l3-agent.log - - neutron-l3-agent -
- lbaas-agent.log - - neutron-lbaas-agent - - The - neutron-lbaas-agent - service only runs when Load-Balancer-as-a-Service is - enabled. -
- linuxbridge-agent.log - - neutron-linuxbridge-agent -
- metadata-agent.log - - neutron-metadata-agent -
- metering-agent.log - - neutron-metering-agent -
- openvswitch-agent.log - - neutron-openvswitch-agent -
- server.log - - neutron-server -
-
diff --git a/doc/config-reference/networking/section_networking-options-reference.xml b/doc/config-reference/networking/section_networking-options-reference.xml deleted file mode 100644 index 02845ae17c..0000000000 --- a/doc/config-reference/networking/section_networking-options-reference.xml +++ /dev/null @@ -1,215 +0,0 @@ - -
- Networking configuration options -The options and descriptions listed in this introduction are auto generated from the code in - the Networking service project, which provides software-defined networking between VMs run - in Compute. The list contains common options, while the subsections list the options for the - various networking plug-ins. - - - - - - - -
-Agent -Use the following options to alter agent-related settings. - -
- -
-API -Use the following options to alter API-related settings. - -
- -
-Token authentication -Use the following options to alter token authentication settings. - -
- -
-Compute -Use the following options to alter Compute-related settings. - -
- -
-Compute -Use the following options to alter CORS-related settings. - -
- -
-Database -Use the following options to alter Database-related settings. - -
- -
-DHCP agent -Use the following options to alter Database-related settings. - -
- -
-Distributed virtual router -Use the following options to alter DVR-related settings. - -
- -
-Firewall-as-a-Service driver -Use the following options in the fwaas_driver.ini - file for the FWaaS driver. - - - -
- -
-Load-Balancer-as-a-Service configuration options -Use the following options in the neutron_lbaas.conf - file for the LBaaS agent. - -Use the following options in the lbaas_agent.ini - file for the LBaaS agent. - -Use the following options in the services_lbaas.conf - file for the LBaaS agent. - -Use the following options in the /etc/octavia/octavia.conf - file for octavia config. - -
- -
-VPN-as-a-Service configuration options -Use the following options in the vpnaas_agent.ini - file for the VPNaaS agent. - - - - -
- -
-IPv6 router advertisement -Use the following options to alter IPv6 RA settings. - -
- -
-L3 agent -Use the following options in the l3_agent.ini - file for the L3 agent. - -
- -
-Logging -Use the following options to alter logging settings. - -
- -
-Metadata Agent -Use the following options in the -metadata_agent.ini file for the Metadata agent. - - -Previously, neutron metadata agent connected to a neutron server via REST API using a neutron client. This is ineffective because keystone is then fully involved into the authentication process and gets overloaded. -The neutron metadata agent has been reworked to use RPC by default to connect to a server since Kilo release. This is a typical way of interacting between neutron server and its agents. If neutron server does not support metadata RPC then neutron client will be used. - - - Do not run the neutron-ns-metadata-proxy proxy - namespace as root on a node with the L3 agent running. In OpenStack Kilo - and newer, you can change the permissions of - neutron-ns-metadata-proxy after the proxy installation - using the and - options. - - -
- -
-Metering Agent -Use the following options in the -metering_agent.ini file for the Metering agent. - -
- -
-Nova -Use the following options in the -neutron.conf file to change nova-related settings. - -
- -
-Policy -Use the following options in the -neutron.conf file to change policy settings. - -
- -
-Quotas -Use the following options in the -neutron.conf file for the quota system. - -
- -
-Scheduler -Use the following options in the -neutron.conf file to change scheduler settings. - -
- -
-Security Groups -Use the following options in the configuration file -for your driver to change security group settings. - - -Now Networking uses iptables to achieve security group functions. - In L2 agent with option enabled, it makes use - of IPset to improve security group's performance, as it represents a hash set - which is insensitive to the number of elements. -When a port is created, L2 agent will add an additional IPset chain to - it's iptables chain, if the security group that this port belongs to has rules - between other security group, the member of that security group will be added - to the ipset chain. -If a member of a security group is changed, it used to reload iptables - rules which is expensive. However, when IPset option is enabled on L2 agent, - it does not need to reload iptables if only members of security group were - changed, it should just update an IPset. - - -A single default security group has been introduced in order - to avoid race conditions when creating a tenant's default security - group. The race conditions are caused by the uniqueness check of - a new security group name. A table default_security_group - implements such a group. It has tenant_id field - as a primary key and security_group_id, which - is an identifier of a default security group. The migration that introduces - this table has a sanity check that verifies if a default security - group is not duplicated in any tenant. - -
- -
-SSL and Certification Authority -Use the following options in the -neutron.conf file to enable SSL. - -
- -
diff --git a/doc/config-reference/networking/section_networking-plugins-ml2.xml b/doc/config-reference/networking/section_networking-plugins-ml2.xml deleted file mode 100644 index 8fc4968959..0000000000 --- a/doc/config-reference/networking/section_networking-plugins-ml2.xml +++ /dev/null @@ -1,130 +0,0 @@ - -
- Modular Layer 2 (ml2) configuration options - The Modular Layer 2 (ml2) plug-in has two components: - network types and mechanisms. You can configure these - components separately. This section describes these - configuration options. - - Configure MTU for VXLAN tunnelling - Specific MTU configuration is necessary for VXLAN to function - as expected: - - - One option is to increase the MTU value of the physical - interface and physical switch fabric by at least 50 bytes. - For example, increase the MTU value to 1550. This value - enables an automatic 50-byte MTU difference between the - physical interface (1500) and the VXLAN interface - (automatically 1500-50 = 1450). An MTU value of 1450 causes - issues when virtual machine taps are configured at an MTU - value of 1500. - - - Another option is to decrease the virtual Ethernet - devices' MTU. Set the - option to 1450 in the neutron.conf file, - and set all guest virtual machines' MTU to the same value by - using a DHCP option. For information about how to use this - option, see - Configure OVS plug-in. - - - - -
- Modular Layer 2 (ml2) Flat Type configuration - options - -
-
- Modular Layer 2 (ml2) GRE Type configuration - options - -
-
- Modular Layer 2 (ml2) VLAN Type configuration - options - -
-
- Modular Layer 2 (ml2) VXLAN Type configuration - options - -
-
- Modular Layer 2 (ml2) Arista Mechanism configuration - options - - -
-
- Modular Layer 2 (ml2) BaGpipe Mechanism configuration - options - -
-
- Modular Layer 2 (ml2) BigSwitch Mechanism configuration - options - -
-
- Modular Layer 2 (ml2) Brocade Mechanism configuration - options - -
-
- Modular Layer 3 (ml2) Brocade MLX ICX Mechanism configuration - options - Configure switch names to be used as group names - as described below - -
-
- Modular Layer 2 (ml2) Cisco Mechanism configuration - options - -
-
- Modular Layer 2 (ml2) Freescale SDN Mechanism configuration - options - -
-
- Modular Layer 2 (ml2) Geneve Mechanism configuration - options - -
-
- Modular Layer 2 (ml2) OpenDaylight Mechanism configuration - options - Use of VLANs with the OpenDaylight mechanism driver requires - OpenDaylight Helium or newer to be installed. - -
-
- Modular Layer 2 (ml2) OpenFlow Agent (ofagent) Mechanism - configuration options - -
-
- Modular Layer 2 (ml2) L2 Population Mechanism - configuration options - -
-
- Modular Layer 2 (ml2) Tail-f NCS Mechanism - configuration options - -
-
- Modular Layer 2 (ml2) SR-IOV Mechanism configuration - options - -
-
diff --git a/doc/config-reference/networking/section_networking-plugins.xml b/doc/config-reference/networking/section_networking-plugins.xml deleted file mode 100644 index 0981dbe909..0000000000 --- a/doc/config-reference/networking/section_networking-plugins.xml +++ /dev/null @@ -1,205 +0,0 @@ - -
- Networking plug-ins - OpenStack Networking introduces the concept of a - plug-in, which is a back-end implementation of the - OpenStack Networking API. A plug-in can use a - variety of technologies to implement the logical API - requests. Some OpenStack Networking plug-ins might - use basic Linux VLANs and IP tables, while others - might use more advanced technologies, such as - L2-in-L3 tunneling or OpenFlow. These sections - detail the configuration options for the various - plug-ins. - - The following plugins have been removed in - Kilo: - - Ryu plugin. The Ryu team recommends - that you migrate to the ML2 plugin with the ofagent - mechanism driver. However, note that the - functionality is not the same. There is no - upgrade procedure currently available. - - Mellanox plugin. - - -
- BaGpipe configuration options - -
-
- BigSwitch configuration options - -
-
- Brocade configuration options - -
-
- Brocade MLX L3 plug-in - Configure switch names to be used as group names - as described below - -
-
- Brocade Vyatta layer 3 plug-in - The Brocade Vyatta Layer 3 plug-in configures Vyatta - vRouter. More information about the plug-in is available at: Brocade_Vyatta_L3_Plugin. - Use the following options to configure the Brocade Vyatta Layer 3 plug-in. - -
-
- CISCO configuration options - -
-
- Fujitsu CFAB configuration options - -
-
- Fujitsu ISM configuration options - -
-
- CloudBase Hyper-V Agent configuration - options - -
-
- Embrane configuration - options - -
-
- IBM SDN-VE configuration options - -
-
- Layer 2 Gateway configuration options - -
-
- Layer 2 Gateway configuration options - -
-
- Linux bridge Agent configuration - options - -
- -
- MidoNet configuration options - -
-
- NEC configuration options - -
-
- One Convergence NVSD configuration options - -
-
- Open Networking Operating System (ONOS) configuration options - -
-
- OpenContrail configuration options - -
-
- Open vSwitch Agent configuration - options - -
-
- Virtual Network for Open vSwitch options - -
-
- IPv6 Prefix Delegation configuradtion options - -
-
- PLUMgrid configuration options - -
-
- SR-IOV configuration options - -
-
- VMware vSphere configuration options - -
-
- VMware NSX configuration options - -
-
- VMware DVS configuration options - -
-
diff --git a/doc/config-reference/networking/section_networking-sample-configuration-files.xml b/doc/config-reference/networking/section_networking-sample-configuration-files.xml deleted file mode 100644 index cfb4440b49..0000000000 --- a/doc/config-reference/networking/section_networking-sample-configuration-files.xml +++ /dev/null @@ -1,64 +0,0 @@ -
- Networking sample configuration files - All the files in this section can be found in /etc/neutron/. -
- neutron.conf - Use the neutron.conf file to configure the - majority of the OpenStack Networking options. - - - -
-
- api-paste.ini - Use the api-paste.ini to configure the OpenStack Networking API. - - - -
-
- policy.json - Use the policy.json file to define additional access controls - that apply to the OpenStack Networking service. - - - -
-
- rootwrap.conf - Use the rootwrap.conf file to define configuration values used by the - rootwrap script when the OpenStack Networking service must escalate its - privileges to those of the root user. - - - -
-
- Configuration files for plug-in agents - Each plug-in agent that runs on an OpenStack Networking node, - to perform local networking configuration for the node's VMs and - networking services, has its own configuration file. -
- dhcp_agent.ini - - - -
-
- l3_agent.ini - - - -
-
- metadata_agent.ini - - - -
-
-
diff --git a/doc/config-reference/networking/section_rpc-for-networking.xml b/doc/config-reference/networking/section_rpc-for-networking.xml deleted file mode 100644 index 9bcbf73ba9..0000000000 --- a/doc/config-reference/networking/section_rpc-for-networking.xml +++ /dev/null @@ -1,123 +0,0 @@ - -
- - Configure the Oslo RPC messaging system - - OpenStack projects use an open standard for messaging - middleware known as AMQP. This messaging middleware enables the - OpenStack services that run on multiple servers to talk to each - other. OpenStack Oslo RPC supports three implementations of AMQP: - RabbitMQ, - Qpid, and - ZeroMQ. - -
- Configure RabbitMQ - - OpenStack Oslo RPC uses RabbitMQ - by default. Use these options to configure the - RabbitMQ message system. The - option is optional as long as - RabbitMQ is the default messaging - system. However, if it is included the configuration, you must - set it to - neutron.openstack.common.rpc.impl_kombu. - - - -rpc_backend=neutron.openstack.common.rpc.impl_kombu - - - Use these options to configure the - RabbitMQ messaging system. You can - configure messaging communication for different installation - scenarios, tune retries for RabbitMQ, and define the size of the - RPC thread pool. To monitor notifications through RabbitMQ, you - must set the option to - neutron.openstack.common.notifier.rpc_notifier in the - neutron.conf file: - -
-
- Configure Qpid - Use these options to configure the - Qpid messaging system for OpenStack - Oslo RPC. Qpid is not the default - messaging system, so you must enable it by setting the - option in the - neutron.conf file: - -rpc_backend=neutron.openstack.common.rpc.impl_qpid - - This critical option points the compute nodes to the - Qpid broker (server). Set the - option to the host name where - the broker runs in the neutron.conf - file. - - The --qpid_hostname parameter accepts a host - name or IP address value. - - -qpid_hostname=hostname.example.com - - - If the Qpid broker listens on a - port other than the AMQP default of 5672, you - must set the option to that - value: - - -qpid_port=12345 - - - If you configure the Qpid broker - to require authentication, you must add a user name and password - to the configuration: - - -qpid_username=username -qpid_password=password - - - By default, TCP is used as the transport. To enable SSL, set - the option: - - -qpid_protocol=ssl - - - Use these additional options to configure the Qpid messaging - driver for OpenStack Oslo RPC. These options are used - infrequently. - - - -
-
- Configure ZeroMQ - Use these options to configure the - ZeroMQ messaging system for - OpenStack Oslo RPC. ZeroMQ is not the - default messaging system, so you must enable it by setting the - option in the - neutron.conf file: - -
-
- Configure messaging - - Use these common options to configure the - RabbitMQ, - Qpid, and - ZeroMq messaging drivers: - - - - -
-
diff --git a/doc/config-reference/object-storage/section_configure_s3.xml b/doc/config-reference/object-storage/section_configure_s3.xml deleted file mode 100644 index 9630d98924..0000000000 --- a/doc/config-reference/object-storage/section_configure_s3.xml +++ /dev/null @@ -1,83 +0,0 @@ - -
- Configure Object Storage with the S3 API - The Swift3 middleware emulates the S3 REST API on top of - Object Storage. - The following operations are currently supported: - - - GET Service - - - DELETE Bucket - - - GET Bucket (List Objects) - - - PUT Bucket - - - DELETE Object - - - GET Object - - - HEAD Object - - - PUT Object - - - PUT Object (Copy) - - - To use this middleware, first download the latest - version from its repository to your proxy - server(s). - $ git clone https://git.openstack.org/openstack/swift3 - Then, install it using standard python mechanisms, such - as: - # python setup.py install - Alternatively, if you have configured the Ubuntu Cloud - Archive, you may use: - # apt-get install swift-python-s3 - To add this middleware to your configuration, add the swift3 - middleware in front of the swauth middleware, and before any other - middleware that looks at Object Storage requests (like rate limiting). - Ensure that your proxy-server.conf file contains - swift3 in the pipeline and the [filter:swift3] - section, as shown below: - [pipeline:main] -pipeline = catch_errors healthcheck cache swift3 swauth proxy-server - -[filter:swift3] -use = egg:swift3#swift3 - Next, configure the tool that you use to connect to the - S3 API. For S3curl, for example, you must add your - host IP information by adding your host IP to the - @endpoints array (line 33 in s3curl.pl): - my @endpoints = ( '1.2.3.4'); - Now you can send commands to the endpoint, such - as: - $ ./s3curl.pl - 'a7811544507ebaf6c9a7a8804f47ea1c' -key 'a7d8e981-e296-d2ba-cb3b-db7dd23159bd' -get - -s -v http://1.2.3.4:8080 - To set up your client, ensure you are using the ec2 credentials, - which can be downloaded from the API Endpoints tab -of the dashboard. - The host should also point to the Object Storage node's hostname. It also - will have to use the old-style calling format, and not the hostname-based container format. - Here is an example client setup using the Python boto library on a locally installed - all-in-one Object Storage installation. - connection = boto.s3.Connection( - aws_access_key_id='a7811544507ebaf6c9a7a8804f47ea1c', - aws_secret_access_key='a7d8e981-e296-d2ba-cb3b-db7dd23159bd', - port=8080, - host='127.0.0.1', - is_secure=False, - calling_format=boto.s3.connection.OrdinaryCallingFormat()) -
diff --git a/doc/config-reference/object-storage/section_object-storage-cors.xml b/doc/config-reference/object-storage/section_object-storage-cors.xml deleted file mode 100644 index e4e7f88a53..0000000000 --- a/doc/config-reference/object-storage/section_object-storage-cors.xml +++ /dev/null @@ -1,16 +0,0 @@ - -
- Cross-origin resource sharing - Cross-Origin Resource Sharing (CORS) is a mechanism that allows code running in a browser - (JavaScript for example) to make requests to a domain, other than the one it was - originated from. OpenStack Object Storage supports CORS requests to containers and objects within - the containers using metadata held on the container. - In addition to the metadata on containers, you can use the - option in the -proxy-server.conf file to set a list of hosts that -are included with any CORS request by default. -
diff --git a/doc/config-reference/object-storage/section_object-storage-features.xml b/doc/config-reference/object-storage/section_object-storage-features.xml deleted file mode 100644 index b3acdb3fe1..0000000000 --- a/doc/config-reference/object-storage/section_object-storage-features.xml +++ /dev/null @@ -1,800 +0,0 @@ - - -%openstack; -]> -
- Configure Object Storage features -
- Object Storage zones - In OpenStack Object Storage, data is placed across - different tiers of failure domains. First, data is spread - across regions, then zones, then servers, and finally - across drives. Data is placed to get the highest failure - domain isolation. If you deploy multiple regions, the - Object Storage service places the data across the regions. - Within a region, each replica of the data should be stored - in unique zones, if possible. If there is only one zone, - data should be placed on different servers. And if there - is only one server, data should be placed on different - drives. - Regions are widely separated installations with a - high-latency or otherwise constrained network link between - them. Zones are arbitrarily assigned, and it is up to the - administrator of the Object Storage cluster to choose an - isolation level and attempt to maintain the isolation - level through appropriate zone assignment. For example, a - zone may be defined as a rack with a single power source. - Or a zone may be a DC room with a common utility provider. - Servers are identified by a unique IP/port. Drives are - locally attached storage volumes identified by mount - point. - In small clusters (five nodes or fewer), everything is - normally in a single zone. Larger Object Storage - deployments may assign zone designations differently; for - example, an entire cabinet or rack of servers may be - designated as a single zone to maintain replica - availability if the cabinet becomes unavailable (for - example, due to failure of the top of rack switches or a - dedicated circuit). In very large deployments, such as - service provider level deployments, each zone might have - an entirely autonomous switching and power infrastructure, - so that even the loss of an electrical circuit or - switching aggregator would result in the loss of a single - replica at most. -
- Rackspace zone recommendations - For ease of maintenance on OpenStack Object Storage, - Rackspace recommends that you set up at least five - nodes. Each node is assigned its own zone (for a total - of five zones), which gives you host level redundancy. - This enables you to take down a single zone for - maintenance and still guarantee object availability in - the event that another zone fails during your - maintenance. - You could keep each server in its own cabinet to achieve cabinet level isolation, - but you may wish to wait until your Object Storage service is better established - before developing cabinet-level isolation. OpenStack Object Storage is flexible; if - you later decide to change the isolation level, you can take down one zone at a time - and move them to appropriate new homes. -
-
-
- RAID controller configuration - OpenStack Object Storage does not require RAID. In fact, - most RAID configurations cause significant performance - degradation. The main reason for using a RAID controller - is the battery-backed cache. It is very important for data - integrity reasons that when the operating system confirms - a write has been committed that the write has actually - been committed to a persistent location. Most disks lie - about hardware commits by default, instead writing to a - faster write cache for performance reasons. In most cases, - that write cache exists only in non-persistent memory. In - the case of a loss of power, this data may never actually - get committed to disk, resulting in discrepancies that the - underlying file system must handle. - - OpenStack Object Storage works best on the XFS file system, and - this document assumes that the hardware being used is configured - appropriately to be mounted with the nobarriers - option. For more information, see the XFS FAQ. - - To get the most out of your hardware, it is essential - that every disk used in OpenStack Object Storage is - configured as a standalone, individual RAID 0 disk; in the - case of 6 disks, you would have six RAID 0s or one JBOD. - Some RAID controllers do not support JBOD or do not - support battery backed cache with JBOD. To ensure the - integrity of your data, you must ensure that the - individual drive caches are disabled and the battery - backed cache in your RAID card is configured and used. - Failure to configure the controller properly in this case - puts data at risk in the case of sudden loss of - power. - You can also use hybrid drives or similar options for - battery backed up cache configurations without a RAID - controller. -
-
- - Throttle resources through rate limits - Rate limiting in OpenStack Object Storage is implemented - as a pluggable middleware that you configure on the proxy - server. Rate limiting is performed on requests that result - in database writes to the account and container SQLite - databases. It uses memcached and is dependent on the proxy - servers having highly synchronized time. The rate limits - are limited by the accuracy of the proxy server - clocks. -
- Configure rate limiting - All configuration is optional. If no account or - container limits are provided, no rate limiting - occurs. Available configuration options - include: - - The container rate limits are linearly interpolated - from the values given. A sample container rate - limiting could be: - container_ratelimit_100 = 100 - container_ratelimit_200 = 50 - container_ratelimit_500 = 20 - This would result in: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Values for Rate Limiting with Sample - Configuration Settings
Container SizeRate Limit
0-99No limiting
100100
15075
50020
100020
-
-
-
- Health check - Provides an easy way to monitor whether the Object Storage proxy server is alive. If - you access the proxy with the path /healthcheck, it responds with - OK in the response body, which monitoring tools can use. - -
-
- Domain remap - Middleware that translates container and account parts - of a domain to path parameters that the proxy server - understands. - -
-
- CNAME lookup - Middleware that translates an unknown domain in the host - header to something that ends with the configured - storage_domain by looking up the given domain's CNAME - record in DNS. - -
-
- - Temporary URL - Allows the creation of URLs to provide temporary access to objects. For example, a - website may wish to provide a link to download a large object in OpenStack Object - Storage, but the Object Storage account has no public access. The website can generate a - URL that provides GET access for a limited time to the resource. When the web browser - user clicks on the link, the browser downloads the object directly from Object Storage, - eliminating the need for the website to act as a proxy for the request. If the user - shares the link with all his friends, or accidentally posts it on a forum, the direct - access is limited to the expiration time set when the website created the link. - A temporary URL is the typical URL associated with an - object, with two additional query parameters: - - temp_url_sig - - A cryptographic signature - - - - temp_url_expires - - An expiration date, in Unix time - - - - An example of a temporary - URL: - https://swift-cluster.example.com/v1/AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30/container/object? - temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& - temp_url_expires=1323479485 - - - To create temporary URLs, first set the - X-Account-Meta-Temp-URL-Key header on your - Object Storage account to an arbitrary string. This string serves - as a secret key. For example, to set a key of - b3968d0207b54ece87cccc06515a89d4 by using the - swift command-line tool: - - $ swift post -m "Temp-URL-Key:b3968d0207b54ece87cccc06515a89d4" - Next, generate an HMAC-SHA1 (RFC 2104) signature to - specify: - - - Which HTTP method to allow (typically - GET or - PUT) - - - The expiry date as a Unix timestamp - - - The full path to the object - - - The secret key set as the - X-Account-Meta-Temp-URL-Key - - - Here is code generating the signature for a GET for 24 - hours on - /v1/AUTH_account/container/object: - import hmac -from hashlib import sha1 -from time import time -method = 'GET' -duration_in_seconds = 60*60*24 -expires = int(time() + duration_in_seconds) -path = '/v1/AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30/container/object' -key = 'mykey' -hmac_body = '%s\n%s\n%s' % (method, expires, path) -sig = hmac.new(key, hmac_body, sha1).hexdigest() -s = 'https://{host}/{path}?temp_url_sig={sig}&temp_url_expires={expires}' -url = s.format(host='swift-cluster.example.com', path=path, sig=sig, expires=expires) - - Any alteration of the resource path or query arguments results in - a 401 Unauthorized - error. Similarly, a PUT where GET was the allowed method returns a - 401 error. HEAD is allowed if GET or PUT is - allowed. Using this in combination with browser form post - translation middleware could also allow direct-from-browser - uploads to specific locations in Object Storage. - - - - Changing the X-Account-Meta-Temp-URL-Key - invalidates any previously generated - temporary URLs within 60 seconds, which is the memcache time for - the key. Object Storage supports up to two keys, - specified by X-Account-Meta-Temp-URL-Key - and X-Account-Meta-Temp-URL-Key-2. - Signatures are checked against both keys, - if present. This process enables key rotation without - invalidating all existing temporary URLs. - - - - Object Storage includes the swift-temp-url - script that generates the query parameters automatically: - - $ bin/swift-temp-url GET 3600 /v1/AUTH_account/container/object mykey -/v1/AUTH_account/container/object? -temp_url_sig=5c4cc8886f36a9d0919d708ade98bf0cc71c9e91& -temp_url_expires=1374497657 - Because this command only returns the path, you must - prefix the Object Storage host name (for example, - https://swift-cluster.example.com). - With GET Temporary URLs, a - Content-Disposition header is set - on the response so that browsers interpret this as a file - attachment to be saved. The file name chosen is based on - the object name, but you can override this with a - filename query parameter. The - following example specifies a filename of My - Test File.pdf: - https://swift-cluster.example.com/v1/AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30/container/object? -temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& -temp_url_expires=1323479485& -filename=My+Test+File.pdf - - If you do not want the object to be downloaded, you can cause - Content-Disposition: inline - to be set on the response by adding - the inline parameter to the query string, as follows: -https://swift-cluster.example.com/v1/AUTH_account/container/object? -temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& -temp_url_expires=1323479485&inline - - To enable Temporary URL functionality, edit - /etc/swift/proxy-server.conf to - add tempurl to the - pipeline variable defined in the - [pipeline:main] section. The - tempurl entry should appear - immediately before the authentication filters in the - pipeline, such as authtoken, - tempauth or - keystoneauth. For - example:[pipeline:main] -pipeline = pipeline = healthcheck cache tempurl authtoken keystoneauth proxy-server - - -
-
- Name check filter - Name Check is a filter that disallows any paths that - contain defined forbidden characters or that exceed a - defined length. - -
-
- Constraints - To change the OpenStack Object Storage internal limits, - update the values in the - swift-constraints section in the - swift.conf file. Use caution when - you update these values because they affect the - performance in the entire cluster. - -
-
- Cluster health - - Use the swift-dispersion-report tool to measure - overall cluster health. This tool checks if a set of deliberately - distributed containers and objects are currently in their proper - places within the cluster. For instance, a common deployment has - three replicas of each object. The health of that object can be - measured by checking if each replica is in its proper place. If - only 2 of the 3 is in place the object's health can be said to be - at 66.66%, where 100% would be perfect. A single object's health, - especially an older object, usually reflects the health of that - entire partition the object is in. If you make enough objects on a - distinct percentage of the partitions in the cluster,you get a - good estimate of the overall cluster health. - - - In practice, about 1% partition coverage seems to balance well - between accuracy and the amount of time it takes to gather - results. To provide this health value, you must create an account - solely for this usage. Next, you must place the containers and - objects throughout the system so that they are on distinct - partitions. Use the swift-dispersion-populate - tool to create random container and object names until they fall - on distinct partitions. - - - Last, and repeatedly for the life of the cluster, you must run the - swift-dispersion-report tool to check the - health of each container and object. - - - These tools must have direct access to the entire cluster and ring - files. Installing them on a proxy server suffices. - - - The swift-dispersion-populate and - swift-dispersion-report commands both use the - same /etc/swift/dispersion.conf configuration - file. Example dispersion.conf file: - - -[dispersion] -auth_url = http://localhost:8080/auth/v1.0 -auth_user = test:tester -auth_key = testing - - - You can use configuration options to specify the dispersion - coverage, which defaults to 1%, retries, concurrency, and so on. - However, the defaults are usually fine. After the configuration is - in place, run the swift-dispersion-populate - tool to populate the containers and objects throughout the - cluster. Now that those containers and objects are in place, you - can run the swift-dispersion-report tool to get - a dispersion report or view the overall health of the cluster. - Here is an example of a cluster in perfect health: - - $ swift-dispersion-report -Queried 2621 containers for dispersion reporting, 19s, 0 retries -100.00% of container copies found (7863 of 7863) -Sample represents 1.00% of the container partition space - -Queried 2619 objects for dispersion reporting, 7s, 0 retries -100.00% of object copies found (7857 of 7857) -Sample represents 1.00% of the object partition space - - Now, deliberately double the weight of a device in the - object ring (with replication turned off) and re-run the - dispersion report to show what impact that has: - $ swift-ring-builder object.builder set_weight d0 200 -$ swift-ring-builder object.builder rebalance -... -$ swift-dispersion-report -Queried 2621 containers for dispersion reporting, 8s, 0 retries -100.00% of container copies found (7863 of 7863) -Sample represents 1.00% of the container partition space - -Queried 2619 objects for dispersion reporting, 7s, 0 retries -There were 1763 partitions missing one copy. -77.56% of object copies found (6094 of 7857) -Sample represents 1.00% of the object partition space - - - You can see the health of the objects in the cluster has gone down - significantly. Of course, this test environment has just four - devices, in a production environment with many devices the impact - of one device change is much less. Next, run the replicators to - get everything put back into place and then rerun the dispersion - report: - - -... start object replicators and monitor logs until they're caught up ... -$ swift-dispersion-report -Queried 2621 containers for dispersion reporting, 17s, 0 retries -100.00% of container copies found (7863 of 7863) -Sample represents 1.00% of the container partition space - -Queried 2619 objects for dispersion reporting, 7s, 0 retries -100.00% of object copies found (7857 of 7857) -Sample represents 1.00% of the object partition space - - Alternatively, the dispersion report can also be output - in JSON format. This allows it to be more easily consumed - by third-party utilities: - $ swift-dispersion-report -j -{"object": {"retries:": 0, "missing_two": 0, "copies_found": 7863, "missing_one": 0, -"copies_expected": 7863, "pct_found": 100.0, "overlapping": 0, "missing_all": 0}, "container": -{"retries:": 0, "missing_two": 0, "copies_found": 12534, "missing_one": 0, "copies_expected": -12534, "pct_found": 100.0, "overlapping": 15, "missing_all": 0}} - -
-
- Static Large Object (SLO) support - This feature is very similar to Dynamic Large Object - (DLO) support in that it enables the user to upload many - objects concurrently and afterwards download them as a - single object. It is different in that it does not rely on - eventually consistent container listings to do so. - Instead, a user-defined manifest of the object segments is - used. - For more information regarding SLO usage and support, please - see: - Static Large Objects. - -
-
- Container quotas - - The container_quotas middleware implements simple - quotas that can be imposed on Object Storage containers by a user - with the ability to set container metadata, most likely the - account administrator. This can be useful for limiting the scope - of containers that are delegated to non-admin users, exposed to - form &POST; uploads, or just as a self-imposed sanity check. - - Any object &PUT; operations that exceed these quotas - return a Forbidden (403) status code. - Quotas are subject to several limitations: eventual - consistency, the timeliness of the cached container_info - (60 second TTL by default), and it is unable to reject - chunked transfer uploads that exceed the quota (though - once the quota is exceeded, new chunked transfers are - refused). - Set quotas by adding meta values to the container. These - values are validated when you set them: - - - X-Container-Meta-Quota-Bytes: Maximum size of - the container, in bytes. - - - X-Container-Meta-Quota-Count: Maximum object - count of the container. - - - -
-
- Account quotas - The x-account-meta-quota-bytes - metadata entry must be requests (PUT, POST) if a given - account quota (in bytes) is exceeded while DELETE requests - are still allowed. - The x-account-meta-quota-bytes - metadata entry must be - set to store and enable the quota. Write requests to this - metadata entry are only permitted for resellers. There is - no account quota limitation on a reseller account even if - x-account-meta-quota-bytes is set. - - Any object PUT operations that exceed the quota return a - 413 response (request entity too large) with a descriptive - body. - The following command uses an admin account that own the - Reseller role to set a quota on the test account: - $ swift -A http://127.0.0.1:8080/auth/v1.0 -U admin:admin -K admin \ ---os-storage-url http://127.0.0.1:8080/v1/AUTH_test post -m quota-bytes:10000 - Here is the stat listing of an account where quota has - been set: - $ swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing stat -Account: AUTH_test -Containers: 0 -Objects: 0 -Bytes: 0 -Meta Quota-Bytes: 10000 -X-Timestamp: 1374075958.37454 -X-Trans-Id: tx602634cf478546a39b1be-0051e6bc7a - This command removes the account quota: - $ swift -A http://127.0.0.1:8080/auth/v1.0 -U admin:admin -K admin --os-storage-url http://127.0.0.1:8080/v1/AUTH_test post -m quota-bytes: -
-
- Bulk delete - Use bulk-delete to delete multiple files - from an account - with a single request. Responds to DELETE requests with a - header 'X-Bulk-Delete: true_value'. The body of the DELETE - request is a new line-separated list of files to delete. - The files listed must be URL encoded and in the - form: - - /container_name/obj_name - - If all files are successfully deleted (or did not - exist), the operation returns HTTPOk. If any - files failed to delete, the operation returns - HTTPBadGateway. In both cases, the response body - is a JSON dictionary that shows the number of files that were - successfully deleted or not found. The files that failed are - listed. - -
- -
- Drive audit - - The configuration items - reference a script that can be run by using - cron to watch for bad drives. If errors are - detected, it unmounts the bad drive so that OpenStack Object - Storage can work around it. It takes the following options: - - -
-
- Form post - - Middleware that enables you to upload objects to a cluster by - using an HTML form &POST;. - - - The format of the form is: - - <![CDATA[ -<form action="<swift-url>" method="POST" - enctype="multipart/form-data"> - <input type="hidden" name="redirect" value="<redirect-url>" /> - <input type="hidden" name="max_file_size" value="<bytes>" /> - <input type="hidden" name="max_file_count" value="<count>" /> - <input type="hidden" name="expires" value="<unix-timestamp>" /> - <input type="hidden" name="signature" value="<hmac>" /> - <input type="hidden" name="x_delete_at" value="<unix-timestamp>"/> - <input type="hidden" name="x_delete_after" value="<seconds>"/> - <input type="file" name="file1" /><br /> - <input type="submit" /> -</form>]]> - - - In the form: - - - - - action="<swift-url>" - - - The URL to the Object Storage destination, such as - https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix. - - - The name of each uploaded file is appended to the specified - swift-url. So, you can upload directly to the root of - container with a URL like - https://swift-cluster.example.com/v1/AUTH_account/container/. - - - Optionally, you can include an object prefix to - separate different users' uploads, such as - https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix. - - - - - method="POST" - - The form method must be &POST;. - - - - - enctype="multipart/form-data - - The enctype must be set to multipart/form-data. - - - - - name="redirect" - - The URL to which to redirect the browser after the upload - completes. The URL has status and message query parameters - added to it that indicate the HTTP status code for the upload - and, optionally, additional error information. The 2nn status code indicates success. If - an error occurs, the URL might include error information, such - as "max_file_size exceeded". - - - - - name="max_file_size" - - Required. The maximum number of bytes that can be uploaded in - a single file upload. - - - - - name="max_file_count" - - Required. The maximum number of files that can be uploaded - with the form. - - - - - name="expires" - - - The expiration date and time for the form in UNIX - Epoch time stamp format. After this date and time, the - form is no longer valid. - - - For example, 1440619048 is equivalent to - Mon, Wed, 26 Aug 2015 19:57:28 GMT. - - - - - name="signature" - - - The HMAC-SHA1 signature of the form. This sample Python code - shows how to compute the signature: - - import hmac -from hashlib import sha1 -from time import time -path = '/v1/account/container/object_prefix' -redirect = 'https://myserver.com/some-page' -max_file_size = 104857600 -max_file_count = 10 -expires = int(time() + 600) -key = 'mykey' -hmac_body = '%s\n%s\n%s\n%s\n%s' % (path, redirect, - max_file_size, max_file_count, expires) -signature = hmac.new(key, hmac_body, sha1).hexdigest() - The key is the value of the - X-Account-Meta-Temp-URL-Key header - on the account. - - Use the full path from the /v1/ value and - onward. - - - During testing, you can use the - swift-form-signature command-line tool to compute the - expires and signature - values. - - - - - name="x_delete_at" - - - The date and time in UNIX Epoch - time stamp format when the object will be removed. - - - For example, 1440619048 is equivalent to - Mon, Wed, 26 Aug 2015 19:57:28 GMT. - - - This attribute enables you to specify the X-Delete- - At header value in the form &POST;. - - - - - name="x_delete_after" - - - The number of seconds after which the object is removed. - Internally, the Object Storage system stores this value in the - X-Delete-At metadata item. This attribute - enables you to specify the X-Delete-After - header value in the form &POST;. - - - - - type="file" name="filexx" - - - Optional. One or more files to upload. Must appear after the other - attributes to be processed correctly. If attributes come after the - file attribute, they are not sent with the sub- - request because on the server side, all attributes in the file - cannot be parsed unless the whole file is read into memory and the - server does not have enough memory to service these requests. So, - attributes that follow the file attribute are - ignored. - - - - -
-
- Static web sites - When configured, this middleware serves container data - as a static web site with index file and error file - resolution and optional file listings. This mode is - normally only active for anonymous requests. - -
- - -
diff --git a/doc/config-reference/object-storage/section_object-storage-general-service-conf.xml b/doc/config-reference/object-storage/section_object-storage-general-service-conf.xml deleted file mode 100644 index 37a78954a5..0000000000 --- a/doc/config-reference/object-storage/section_object-storage-general-service-conf.xml +++ /dev/null @@ -1,121 +0,0 @@ - -
- Object Storage general service configuration - - Most Object Storage services fall into two categories, Object Storage's WSGI servers - and background daemons. - - - Object Storage uses paste.deploy to manage server configurations. Read more at http://pythonpaste.org/deploy/. - - - Default configuration options are set in the `[DEFAULT]` section, - and any options specified there can be overridden in any of the - other sections when the syntax set option_name = value - is in place. - - Configuration for servers and daemons can be expressed together in - the same file for each type of server, or separately. If a required - section for the service trying to start is missing, there will be an - error. Sections not used by the service are ignored. - - - Consider the example of an Object Storage node. By convention - configuration for the object-server, object-updater, object-replicator, and - object-auditor exist - in a single file - /etc/swift/object-server.conf: - - -[DEFAULT] - -[pipeline:main] -pipeline = object-server - -[app:object-server] -use = egg:swift#object - -[object-replicator] -reclaim_age = 259200 - -[object-updater] - -[object-auditor] - - - Object Storage services expect a configuration path as the first argument: - - $ swift-object-auditor -Usage: swift-object-auditor CONFIG [options] - -Error: missing config path argument - - - If you omit the object-auditor section, this file cannot be used - as the configuration path when starting the - swift-object-auditor daemon: - - $ swift-object-auditor /etc/swift/object-server.conf -Unable to find object-auditor config section in /etc/swift/object-server.conf - - - If the configuration path is a directory instead of a file, all of - the files in the directory with the file extension ".conf" - will be combined to generate the configuration object which is - delivered to the Object Storage service. This is referred to generally as - "directory-based configuration". - - - Directory-based configuration leverages ConfigParser's native - multi-file support. Files ending in ".conf" in the given - directory are parsed in lexicographical order. File names starting - with '.' are ignored. A mixture of file and directory configuration - paths is not supported - if the configuration path is a file, only - that file will be parsed. - - - The Object Storage service management tool - swift-init - has adopted the convention of looking for - /etc/swift/{type}-server.conf.d/ if the file - /etc/swift/{type}-server.conf file does not - exist. - - - When using directory-based configuration, if the same option under - the same section appears more than once in different files, the last - value parsed is said to override previous occurrences. You can - ensure proper override precedence by prefixing the files in the - configuration directory with numerical values, as in the following - example file layout: - - -/etc/swift/ - default.base - object-server.conf.d/ - 000_default.conf -> ../default.base - 001_default-override.conf - 010_server.conf - 020_replicator.conf - 030_updater.conf - 040_auditor.conf - - - You can inspect the resulting combined configuration object using - the swift-config command-line tool. - - All the services of an Object Store deployment share a common - configuration in the [swift-hash] section of the - /etc/swift/swift.conf file. The - and - values must be identical on all - the nodes. - -
diff --git a/doc/config-reference/object-storage/section_object-storage-listendpoints.xml b/doc/config-reference/object-storage/section_object-storage-listendpoints.xml deleted file mode 100644 index 3a62255629..0000000000 --- a/doc/config-reference/object-storage/section_object-storage-listendpoints.xml +++ /dev/null @@ -1,28 +0,0 @@ - -
- Endpoint listing middleware - The endpoint listing middleware enables third-party services that use data locality - information to integrate with OpenStack Object Storage. This middleware reduces network - overhead and is designed for third-party services that run inside the firewall. Deploy this - middleware on a proxy server because usage of this middleware is not authenticated. - Format requests for endpoints, as follows: - /endpoints/{account}/{container}/{object} -/endpoints/{account}/{container} -/endpoints/{account} - Use the configuration - option in the proxy_server.conf file to - customize the /endpoints/ path. - Responses are JSON-encoded lists of endpoints, as - follows: - http://{server}:{port}/{dev}/{part}/{acc}/{cont}/{obj} -http://{server}:{port}/{dev}/{part}/{acc}/{cont} -http://{server}:{port}/{dev}/{part}/{acc} - An example response is: - http://10.1.1.1:6000/sda1/2/a/c2/o1 -http://10.1.1.1:6000/sda1/2/a/c2 -http://10.1.1.1:6000/sda1/2/a -
diff --git a/doc/config-reference/orchestration/section_orchestration-api.xml b/doc/config-reference/orchestration/section_orchestration-api.xml deleted file mode 100644 index 8ecb77c980..0000000000 --- a/doc/config-reference/orchestration/section_orchestration-api.xml +++ /dev/null @@ -1,16 +0,0 @@ -
- - Configure APIs - The following options allow configuration of the APIs that - Orchestration supports. Currently this includes compatibility APIs for - CloudFormation and CloudWatch and a native API. - - - - - -
diff --git a/doc/config-reference/orchestration/section_orchestration-clients.xml b/doc/config-reference/orchestration/section_orchestration-clients.xml deleted file mode 100644 index 2bb8517f11..0000000000 --- a/doc/config-reference/orchestration/section_orchestration-clients.xml +++ /dev/null @@ -1,22 +0,0 @@ -
- - Configure Clients - The following options allow configuration of the clients that - Orchestration uses to talk to other services. - - - - - - - - - - - - -
diff --git a/doc/config-reference/orchestration/section_orchestration-rpc.xml b/doc/config-reference/orchestration/section_orchestration-rpc.xml deleted file mode 100644 index cf4c7051e4..0000000000 --- a/doc/config-reference/orchestration/section_orchestration-rpc.xml +++ /dev/null @@ -1,108 +0,0 @@ -
- - Configure the RPC messaging system - OpenStack projects use an open standard for messaging - middleware known as AMQP. This messaging middleware enables the - OpenStack services that run on multiple servers to talk to each - other. OpenStack Oslo RPC supports three implementations of AMQP: - RabbitMQ, - Qpid, and - ZeroMQ. - -
- Configure RabbitMQ - - OpenStack Oslo RPC uses RabbitMQ - by default. Use these options to configure the - RabbitMQ message system. The - option is optional as long as - RabbitMQ is the default messaging - system. However, if it is included in the configuration, you must - set it to - heat.openstack.common.rpc.impl_kombu. - - - rpc_backend = heat.openstack.common.rpc.impl_kombu - - Use these options to configure the - RabbitMQ messaging system. You can - configure messaging communication for different installation - scenarios, tune retries for RabbitMQ, and define the size of the - RPC thread pool. To monitor notifications through RabbitMQ, you - must set the option to - heat.openstack.common.notifier.rpc_notifier in the - heat.conf file: - -
-
- Configure Qpid - Use these options to configure the - Qpid messaging system for OpenStack - Oslo RPC. Qpid is not the default - messaging system, so you must enable it by setting the - option in the - heat.conf file: - rpc_backend=heat.openstack.common.rpc.impl_qpid - This critical option points the compute nodes to the - Qpid broker (server). Set the - option to the host name where - the broker runs in the heat.conf - file. - - The option accepts a host - name or IP address value. - - qpid_hostname = hostname.example.com - - If the Qpid broker listens on a - port other than the AMQP default of 5672, you - must set the option to that - value: - - qpid_port = 12345 - - If you configure the Qpid broker - to require authentication, you must add a user name and password - to the configuration: - - qpid_username = username -qpid_password = password - By default, TCP is used as the transport. To enable SSL, set - the option: - - qpid_protocol = ssl - - Use these additional options to configure the Qpid messaging - driver for OpenStack Oslo RPC. These options are used - infrequently. - - - -
-
- Configure ZeroMQ - Use these options to configure the - ZeroMQ messaging system for - OpenStack Oslo RPC. ZeroMQ is not the - default messaging system, so you must enable it by setting the - option in the - heat.conf file: - -
-
- Configure messaging - - Use these common options to configure the - RabbitMQ, - Qpid, and - ZeroMq messaging drivers: - - - - -
-
diff --git a/doc/config-reference/pom.xml b/doc/config-reference/pom.xml deleted file mode 100644 index 0ce725b49f..0000000000 --- a/doc/config-reference/pom.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - org.openstack.docs - parent-pom - 1.0.0-SNAPSHOT - ../pom.xml - - 4.0.0 - openstack-config-reference - jar - OpenStack Configuration Reference - - - local - 1 - - - - - - - - com.rackspace.cloud.api - clouddocs-maven-plugin - - - - generate-webhelp - - generate-webhelp - - generate-sources - - - ${comments.enabled} - os-config-guide - 1 - UA-17511903-1 - - appendix toc,title - article/appendix nop - article toc,title - book toc,title,figure,table,example,equation - chapter toc,title - section toc - part toc,title - qandadiv toc - qandaset toc - reference toc,title - set toc,title - - - 0 - 1 - 0 - ${release.path.name}/config-reference - config-reference-${release.path.name} - - - - - - true - . - - bk-config-ref.xml - - http://docs.openstack.org/${release.path.name}/config-reference/content - ${basedir}/../glossary/glossary-terms.xml - openstack - - - - - diff --git a/doc/config-ref-rst/setup.cfg b/doc/config-reference/setup.cfg similarity index 96% rename from doc/config-ref-rst/setup.cfg rename to doc/config-reference/setup.cfg index 1f7b6a8079..40846521db 100644 --- a/doc/config-ref-rst/setup.cfg +++ b/doc/config-reference/setup.cfg @@ -1,5 +1,5 @@ [metadata] -name = architecturedesignguide +name = configurationreference summary = OpenStack Configuration Reference author = OpenStack author-email = openstack-docs@lists.openstack.org diff --git a/doc/config-ref-rst/setup.py b/doc/config-reference/setup.py similarity index 100% rename from doc/config-ref-rst/setup.py rename to doc/config-reference/setup.py diff --git a/doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml b/doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml deleted file mode 100644 index 5153ef8082..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/emc-isilon-driver.xml +++ /dev/null @@ -1,137 +0,0 @@ -
- EMC Isilon driver - - The EMC Shared File Systems driver framework (EMCShareDriver) - utilizes EMC - storage products to provide shared file systems to OpenStack. The EMC - driver is a plug-in based driver which is designed to use - different plug-ins to manage different EMC storage products. - - - The Isilon driver is a plug-in for the EMC framework which allows - the Shared File Systems service to interface with an Isilon back end to - provide a shared filesystem. The EMC driver framework with the - Isilon plug-in is referred to as the "Isilon Driver" in - this document. - - - This Isilon Driver interfaces with an Isilon cluster via the REST - Isilon Platform API (PAPI) and the RESTful Access to Namespace API - (RAN). - - - Requirements - - - - Isilon cluster running OneFS 7.2 or higher - - - - - - Supported operations - - The following operations will be supported on an Isilon cluster: - - - - - Create CIFS/NFS share. - - - - - Delete CIFS/NFS share. - - - - - Allow CIFS/NFS share access. - - - - - Only IP access type is supported for NFS and CIFS. - - - - - * Only RW access is supported. - - - - - Deny CIFS/NFS share access - - - - - Create snapshot - - - - - Delete snapshot - - - - - Create share from snapshot - - - - - - Backend configuration - - The following parameters need to be configured in the Shared - File Systems service configuration file for the Isilon driver: - -
- - share_driver = manila.share.drivers.emc.driver.EMCShareDriver - emc_share_backend = isilon emc_nas_server = <IP address of - Isilon cluster> emc_nas_login = <username> - emc_nas_password = <password> isilon_share_root_dir = - <directory on Isilon where shares will be created> - -
- - Restart of manila-share service is needed for the configuration - changes to take effect. - -
- - Restrictions - - The Isilon driver has the following restrictions: - - - - - Only IP access type is supported for NFS and CIFS. - - - - - Only FLAT network is supported. - - - - - Quotas are not yet supported. - - - - - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml b/doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml deleted file mode 100644 index 74522aaee8..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/emc-vnx-driver.xml +++ /dev/null @@ -1,468 +0,0 @@ -
- EMC VNX driver - - The EMC Shared File Systems service driver framework (EMCShareDriver) - utilizes the EMC storage products to provide the shared file systems - to OpenStack. The EMC driver is a plug-in based driver which is designed - to use different plug-ins to manage different EMC storage products. - - - The VNX plug-in is the plug-in which manages the VNX to provide shared - filesystems. The EMC driver framework with the VNX plug-in is referred to as - the VNX driver in this document. - - - This driver performs the operations on VNX by XMLAPI and the file - command line. Each back end manages one Data Mover of VNX. Multiple - Shared File Systems service back ends need to be configured to manage - multiple Data Movers. - - - Requirements - - - - VNX OE for File version 7.1 or higher - - - - - VNX Unified, File only, or Gateway system with a single storage - back end - - - - - The following licenses should be activated on VNX for File: - - - - - CIFS - - - - - NFS - - - - - SnapSure (for snapshot) - - - - - ReplicationV2 (for create share from snapshot) - - - - - - - - Supported operations - - The following operations will be supported on the VNX array: - - - - - Create CIFS/NFS share. - - - - - Delete CIFS/NFS share. - - - - - Allow CIFS/NFS share access. - - - - - Only IP access type is supported for NFS. - - - - - Only user access type is supported for CIFS. - - - - - - - Deny CIFS/NFS share access. - - - - - Create snapshot. - - - - - Delete snapshot. - - - - - Create share from snapshot. - - - - - While the generic driver creates shared filesystems based on - cinder volumes attached to nova VMs, the VNX driver performs - similar operations using the Data Movers on the array. - - - - Pre-configurations on VNX - - - - Enable Unicode on Data Mover. - - - - - The VNX driver requires that the Unicode is enabled on Data Mover. - - - CAUTION: After enabling Unicode, you cannot disable it. If there - are some filesystems created before Unicode is enabled on the VNX, - consult the storage administrator before enabling Unicode. - - - To check the Unicode status on Data Mover, use the following VNX - File command on the VNX control station: - -
- - server_cifs <mover_name> | head where: mover_name = - <name of the Data Mover> - -
- - Check the value of I18N mode field. UNICODE mode is shown as - I18N mode = - UNICODE - - - To enable the Unicode for Data Mover: - -
- - uc_config -on -mover <mover_name> where: mover_name = - <name of the Data Mover> - -
- - Refer to the document - Using International Character Sets on VNX for File on [EMC support - site](http://support.emc.com) - for more information. - - - - - Enable CIFS service on Data Mover. - - - - - Ensure the CIFS service is enabled on the Data Mover which is - going to be managed by VNX driver. - - - To start the CIFS service, use the following command: - -
- - server_setup <mover_name> -Protocol cifs -option start - [=<n>] where: <mover_name> = <name of the Data - Mover> [=<n>] = <number of threads for CIFS - users> - -
- - Note: If there is 1 GB of memory on the Data Mover, the default is - 96 threads; however, if there is over 1 GB of memory, the default - number of threads is 256. - - - To check the CIFS service status, use this command: - -
- - server_cifs <mover_name> | head where: <mover_name> - = <name of the Data Mover> - -
- - The command output will show the number of CIFS threads started. - - - - - NTP settings on Data Mover. - - - - - VNX driver only supports CIFS share creation with share network - which has an Active Directory security-service associated. - - - Creating CIFS share requires that the time on the Data Mover is in - sync with the Active Directory domain so that the CIFS server can - join the domain. Otherwise, the domain join will fail when - creating share with this security service. There is a limitation - that the time of the domains used by security-services even for - different tenants and different share networks should be in sync. - Time difference should be less than 10 minutes. - - - It is recommended to set the NTP server to the same public NTP - server on both the Data Mover and domains used in security - services to ensure the time is in sync everywhere. - - - Check the date and time on Data Mover: - -
- - server_date <mover_name> where: mover_name = <name of - the Data Mover> - -
- - Set the NTP server for Data Mover: - -
- - server_date <mover_name> timesvc start ntp <host> - [<host> ...] where: mover_name = <name of the Data - Mover> host = <IP address of the time server host> - -
- - Note: The host must be running the NTP protocol. Only 4 host - entries are allowed. - - - - - Configure User Mapping on the Data Mover. - - - - - Before creating CIFS share using VNX driver, you must select a - method of mapping Windows SIDs to UIDs and GIDs. EMC recommends - using usermapper in single protocol (CIFS) environment which is - enabled on VNX by default. - - - To check usermapper status, use this command syntax: - -
- - server_usermapper <movername> where: <movername> = - <name of the Data Mover> - -
- - If usermapper is not started, the following command can be used to - start the usermapper: - -
- - server_usermapper <movername> -enable where: - <movername> = <name of the Data Mover> - -
- - For a multiple protocol environment, refer to - Configuring VNX User Mapping on [EMC support - site](http://support.emc.com) - for additional information. - - - - - Network Connection. - - - - - In the current release, the share created by the VNX driver uses the - first network device (physical port on NIC) of Data Mover to - access the network. - - - Go to Unisphere to check the device list: Settings -> Network - -> Settings for File (Unified system only) -> Device. - -
- - Backend configuration - - The following parameters need to be configured in - /etc/manila/manila.conf for the VNX driver: - -
- - emc_share_backend = vnx emc_nas_server = <IP address> - emc_nas_password = <password> emc_nas_login = <user> - emc_nas_server_container = <Data Mover name> - emc_nas_pool_name = <pool name> share_driver = - manila.share.drivers.emc.driver.EMCShareDriver - -
- - - - emc_share_backend is the plug-in name. Set it to - vnx for the VNX driver. - - - - - emc_nas_server is the control station IP address of the VNX - system to be managed. - - - - - emc_nas_password and emc_nas_login fields are used to provide - credentials to the VNX system. Only local users of VNX File is - supported. - - - - - emc_nas_server_container field is the name of the Data Mover - to serve the share service. - - - - - emc_nas_pool_name is the pool name user wants to create volume - from. The pools can be created using Unisphere for VNX. - - - - - Restart of the manila-share service is needed for the configuration - changes to take effect. - -
- - Restrictions - - The VNX driver has the following restrictions: - - - - - Only IP access type is supported for NFS. - - - - - Only user access type is supported for CIFS. - - - - - Only FLAT network and VLAN network are supported. - - - - - VLAN network is supported with limitations. The neutron - subnets in different VLANs that are used to create share - networks cannot have overlapped address spaces. Otherwise, VNX - may have a problem to communicate with the hosts in the VLANs. - To create shares for different VLANs with same subnet address, - use different Data Movers. - - - - - The 'Active Directory' security service is the only supported - security service type and it is required to create CIFS - shares. - - - - - Only one security service can be configured for each share - network. - - - - - Active Directory domain name of the 'active_directory' - security service should be unique even for different tenants. - - - - - The time on Data Mover and the Active Directory domains used - in security services should be in sync (time difference should - be less than 10 minutes). It is recommended to use same NTP - server on both the Data Mover and Active Directory domains. - - - - - On VNX the snapshot is stored in the SavVols. VNX system - allows the space used by SavVol to be created and extended - until the sum of the space consumed by all SavVols on the - system exceeds the default 20% of the total space available on - the system. If the 20% threshold value is reached, an alert - will be generated on VNX. Continuing to create snapshot will - cause the old snapshot to be inactivated (and the snapshot - data to be abandoned). The limit percentage value can be - changed manually by storage administrator based on the storage - needs. Administrator is recommended to configure the - notification on the SavVol usage. Refer to Using VNX SnapSure - document on [EMC support - site](http://support.emc.com) - for more information. - - - - - VNX has limitations on the overall numbers of Virtual Data - Movers, filesystems, shares, checkpoints, etc. Virtual - Data Mover(VDM) is created by the VNX driver on the VNX to - serve as the Shared File Systems service share server. - Similarly, filesystem is - created, mounted, and exported from the VDM over CIFS or NFS - protocol to serve as the Shared File Systems service share. - The VNX checkpoint serves as the Shared File Systems service - share snapshot. Refer to the - NAS Support Matrix document on [EMC support - site](http://support.emc.com) - for the limitations and configure the quotas accordingly. - - - - - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/generic-driver.xml b/doc/config-reference/shared-file-systems/drivers/generic-driver.xml deleted file mode 100644 index 41aa52269d..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/generic-driver.xml +++ /dev/null @@ -1,211 +0,0 @@ -
- Generic approach for share provisioning - - The Shared File Systems service can be configured to - use nova VMs and cinder volumes. There are two modules that handle - them in the Shared File Systems service: - - - service_instance module creates VMs in nova - with predefined image called service image. This module can be used - by any driver for provisioning of service VMs to be able to - separate share resources among tenants. - - - 'generic' module operates - with cinder volumes and VMs created by - service_instance module, - then creates shared filesystems based on volumes attached to VMs. - - - - Network configurations - - Each driver can handle networking in its own way, see: - https://wiki.openstack.org/wiki/manila/Networking. - - - - - One of two possible configurations can be chosen for share - provisioning - - - - using service_instance module: - - - - - - - Service VM has one net interface from net that is connected - to public router. - - - - For successful creation of share, user network should be - connected to public router too. - - - - - - - Service VM has two net interfaces, first one connected to - service network, - - - - second one connected directly to user's network. - - - - - - - Requirements for service image - - - - Linux based distro - - - - - NFS server - - - - - Samba server >=3.2.0, that can be configured by data stored - in registry - - - - - SSH server - - - - - Two net interfaces configured to DHCP (see network approaches) - - - - - 'exportfs' and 'net conf' libraries used for share actions - - - - - - - Following files will be used, so if their paths differ one - needs to create at - - - - least symlinks for them: - - - - - - - - /etc/exports (permanent file with NFS exports) - - - - - /var/lib/nfs/etab (temporary file with NFS exports used by - 'exportfs') - - - - - /etc/fstab (permanent file with mounted filesystems) - - - - - /etc/mtab (temporary file with mounted filesystems used by - 'mount') - - - - - - - - Supported shared filesystems - - - - NFS (access by IP) - - - - - CIFS (access by IP) - - - - - - Known restrictions - - - - One of nova's configurations only allows 26 shares per server. - This limit comes from the maximum number of virtual PCI - interfaces that are used for block device attaching. There are - 28 virtual PCI interfaces, in this configuration, two of them - are used for server needs and other 26 are used for attaching - block devices that are used for shares. - - - - - Juno version works only with neutron. Each share should be - created with neutron-net and neutron-subnet IDs provided via - share-network entity. - - - - - Juno version handles security group, flavor, image, keypair - for nova VM and also creates service networks, but does not - use availability zones for nova VMs and volume types for - cinder block devices. - - - - - Juno version does not use security services data provided with - share-network. These data will be just ignored. - - - - - Liberty version adds a share extend capability. Share access - will be briefly interrupted during an extend operation. - - - - - Liberty version adds a share shrink capability, but this - capability is not effective because generic driver shrinks - only filesystem size and doesn't shrink the size of cinder - volume. - - - - - - Driver options - The following table contains the configuration options specific to this driver. - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml b/doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml deleted file mode 100644 index 21f8e4fcef..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/glusterfs-driver.xml +++ /dev/null @@ -1,217 +0,0 @@ -
- GlusterFS driver - - GlusterFS driver uses GlusterFS, an open source distributed file - system, as the storage back end for serving file shares to the - Shared File Systems clients. - - - Supported shared filesystems - - - - NFS (access by IP) - - - - - - Supported operations - - - - Create NFS share. - - - - - Delete NFS share. - - - - - Allow NFS share access. - - - - - only 'rw' access - - - - - - - Deny NFS share access. - - - - - - Requirements - - - - Install glusterfs-server package, version >= 3.5.x, on the - storage back end. - - - - - Install NFS-Ganesha, version >=2.1, if using NFS-Ganesha as - the NFS server for the GlusterFS back end. - - - - - Install glusterfs and glusterfs-fuse package, version - >=3.5.x, on the Shared File Systems service host. - - - - - Establish network connection between the Shared File Systems service - host and the storage back end. - - - - - - Shared File Systems service driver configuration setting - - The following parameters in the Shared File Systems service's - configuration file need to be set: - - - - - share_driver = - manila.share.drivers.glusterfs.GlusterfsShareDriver - - - - - If the back-end GlusterFS server runs on the Shared File Systems - service host machine, - - - - - glusterfs_target = - <glustervolserver>:/<glustervolid> - - - - - And if the back-end GlusterFS server runs remotely, - - - - - glusterfs_target = - <username>@<glustervolserver>:/<glustervolid> - - - - - - - The following configuration parameters are optional: - - - - - - - glusterfs_nfs_server_type = - - - - <NFS server type used by the GlusterFS - back end, Gluster or Ganesha. Gluster is the default - type> - - - - - - - - - - glusterfs_mount_point_base = - - - - <base path of GlusterFS volume mounted on the - Shared File Systems service host> - - - - - - - - glusterfs_path_to_private_key = <path to Shared File Systems - service host's private key file> - - - - - glusterfs_server_password = <password of remote GlusterFS - server machine> - - - - - - Known restrictions - - - - The driver does not support network segmented multi-tenancy - model, but instead works over a flat network, where the - tenants share a network. - - - - - If NFS Ganesha is the NFS server used by the GlusterFS - back end, then the shares can be accessed by NFSv3 and v4 - protocols. However, if Gluster NFS is used by the GlusterFS - back end, then the shares can only be accessed by NFSv3 - protocol. - - - - - All Shared File Systems service shares, which map to - subdirectories within a - GlusterFS volume, are currently created within a single - GlusterFS volume of a GlusterFS storage pool. - - - - - The driver does not provide read-only access level for shares. - - - - - - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml b/doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml deleted file mode 100644 index b59ffb7054..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/glusterfs-native-driver.xml +++ /dev/null @@ -1,270 +0,0 @@ -
- GlusterFS Native driver - - GlusterFS Native driver uses GlusterFS, an open source distributed - file system, as the storage back end for serving file shares to - Shared File Systems service clients. - - - A Shared File Systems service share is a GlusterFS volume. - This driver uses flat-network - (share-server-less) model. Instances directly talk with the - GlusterFS back end storage pool. The instances use 'glusterfs' - protocol to mount the GlusterFS shares. Access to each share is - allowed via TLS Certificates. Only the instance which has the TLS - trust established with the GlusterFS back end can mount and hence use - the share. Currently only 'rw' access is supported. - - - Network approach - - L3 connectivity between the storage back end and the host running - the Shared File Systems share service should exist. - - - - Supported shared filesystems - - - - GlusterFS (access by TLS Certificates (cert - access type)) - - - - - - Multi-tenancy model - - The driver does not support network segmented multi-tenancy model. - Instead multi-tenancy is supported using tenant specific TLS - certificates. - - - - Supported operations - - - - Create GlusterFS share. - - - - - Delete GlusterFS share. - - - - - Allow GlusterFS share access (rw). - - - - - Deny GlusterFS share access. - - - - - Create GlusterFS snapshot. - - - - - Delete GlusterFS snapshot. - - - - - - Requirements - - - - Install glusterfs-server package, version >= 3.6.x, on the - storage back end. - - - - - Install glusterfs and glusterfs-fuse package, version - >=3.6.x, on the Shared File Systems service host. - - - - - Establish network connection between the Shared File Systems - service host and the storage back end. - - - - - - Shared File Systems service driver configuration setting - - The following parameters in the Shared File Systems service's - configuration file need to be - set: - - - - - - - share_driver = - - - - manila.share.drivers.glusterfs_native.GlusterfsNativeShareDriver - - - - - - - - - - glusterfs_servers = List of GlusterFS servers which - provide volumes - - - - that can be used to create shares. The servers are - expected to be of distinct Gluster clusters (ie. should - not be gluster peers). Each server should be of the form - [<remoteuser>@]<glustervolserver>. - - - The optional <remoteuser>@ part - of the server URI indicates SSH access for cluster - management (see related optional parameters below). If - it is not given, direct command line management is - performed (the Shared File Systems service host is - assumed to be part of the - GlusterFS cluster the server belongs to). - - - - - - - - - - glusterfs_volume_pattern = Regular expression template - - - - used to filter GlusterFS volumes for share creation. The - regex template can contain the #{size} parameter which - matches a number (sequence of digits) and the value - shall be interpreted as size of the volume in GB. - Examples: manila-share-volume-\d+$, - manila-share-volume-#{size}G-\d+$; - with matching volume names, respectively: - manila-share-volume-12, - manila-share-volume-3G-13". In - latter example, the number that matches - #{size}, that is, 3, is an indication - that the size of volume is 3G. - - - - - - - - The following configuration parameters are optional: - - - - - - - glusterfs_mount_point_base = - - - - <base path of GlusterFS - volume mounted on the Shared File Systems service host> - - - - - - - - glusterfs_path_to_private_key = <path to Shared File Systems - service host's private key file> - - - - - glusterfs_server_password = <password of remote GlusterFS - server machine> - - - - - - Known restrictions - - - - GlusterFS volumes are not created on demand. A pre-existing - set of GlusterFS volumes should be supplied by the GlusterFS - cluster(s), conforming to the naming convention encoded by - glusterfs_volume_pattern. However, the - GlusterFS endpoint is allowed to extend this set any time (so - the Shared File Systems service and GlusterFS endpoints are - expected to communicate - volume supply/demand out-of-band). - glusterfs_volume_pattern can include a size - hint (with #{size} syntax), which, if - present, requires the GlusterFS end to indicate the size of - the shares in GB in the name. (On share creation, the - Shared File Systems service picks - volumes at least as big as the requested - one.) - - - - - Certificate setup (also known as trust setup) between instance and - storage back end is out of band of the Shared File Systems service. - - - - - Support for 'create_share_from_snapshot' is planned for - Liberty release. - - - - - For the Shared File Systems service to use GlusterFS volumes, - the name of the trashcan - directory in GlusterFS volumes must not be changed from the - default. - - - - - - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml b/doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml deleted file mode 100644 index c0e78c82ba..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/hdfs-native-driver.xml +++ /dev/null @@ -1,204 +0,0 @@ -
- HDFS native driver - - HDFS native driver is a plug-in based on the Shared File Systems - service, which uses Hadoop distributed file system (HDFS), a - distributed file system designed to hold very large amounts of data, - and provide high-throughput access to the data. - - - A Shared File Systems service share in this driver is a subdirectory - in hdfs root - directory. Instances talk directly to the HDFS storage back end with - 'hdfs' protocol. And access to each share is allowed by user based - access type, which is aligned with HDFS ACLs to support access - control of multiple users and groups. - - - Network configuration - - The storage back end and Shared File Systems service hosts should - be in a flat network, - otherwise, the L3 connectivity between them should exist. - - - - Supported shared filesystems - - - - HDFS (authentication by user) - - - - - - Supported operations - - - - Create HDFS share. - - - - - Delete HDFS share. - - - - - Allow HDFS share access. - - - - - Only support user access type. - - - - - * Support level of access (ro/rw). - - - - - Deny HDFS share access. - - - - - Create snapshot. - - - - - Delete snapshot. - - - - - Create share from snapshot. - - - - - - Requirements - - - - Install HDFS package, version >= 2.4.x, on the storage - back end. - - - - - To enable access control, the HDFS file system must have ACLs - enabled. - - - - - Establish network connection between the Shared File Systems - service host and storage back end. - - - - - - Shared File Systems service driver configuration - - - - share_driver = - manila.share.drivers.hdfs.hdfs_native.HDFSNativeShareDriver - - - - - - - hdfs_namenode_ip = the IP address of the HDFS namenode, - and only single - - - - namenode is supported now - - - - - - - - hdfs_namenode_port = the port of the HDFS namenode service - - - - - hdfs_ssh_port = HDFS namenode SSH port - - - - - hdfs_ssh_name = HDFS namenode SSH login name - - - - - - - hdfs_ssh_pw = HDFS namenode SSH login password, this - parameter is not - - - - necessary, if the following hdfs_ssh_private_key is - configured - - - - - - - - hdfs_ssh_private_key = Path to the HDFS namenode private key - to ssh login - - - - - - Known restrictions - - - - This driver does not support network segmented multi-tenancy - model. Instead multi-tenancy is supported by the tenant - specific user authentication. - - - - - Only support for single HDFS namenode in Kilo release. - - - - - - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml b/doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml deleted file mode 100644 index 5837665df9..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/hpe-3par-share-driver.xml +++ /dev/null @@ -1,499 +0,0 @@ -
- HPE 3PAR driver - - The HPE 3PAR driver provides NFS and CIFS shared file systems - to OpenStack using HPE 3PAR's File Persona capabilities. - - - Supported operations - - The following operations are supported with HPE 3PAR File Persona: - - - - - Create/delete NFS and CIFS shares. - - - - - Shares are not accessible until access rules allow access. - - - - - - - Allow/deny NFS share access. - - - - - IP access rules are required for NFS share access. - - - - - User access rules are not allowed for NFS shares. - - - - - Access level (RW/RO) is ignored. - - - - - Shares created from snapshots are always read-only. - - - - - Shares not created from snapshots are read-write (and - subject to ACLs). - - - - - - - Allow/deny CIFS share access. - - - - - Both IP and user access rules are required for CIFS share - access. - - - - - User access requires a 3PAR local user (LDAP and AD is not - yet supported). - - - - - Access level (RW/RO) is ignored. - - - - - Shares created from snapshots are always read-only. - - - - - Shares not created from snapshots are read-write (and - subject to ACLs). - - - - - - - Create/delete snapshots. - - - - - Create shares from snapshots. - - - - - Shares created from snapshots are always read-only. - - - - - - - Share networks are not supported. Shares are created directly on - the 3PAR without the use of a share server or service VM. Network - connectivity is setup outside of the Shared File Systems service. - - - - Requirements - - On the system running the manila-share service: - - - - - python-3parclient version 4.0.0 or newer from PyPI. - - - - - On the HPE 3PAR array: - - - - - HPE 3PAR Operating System software version 3.2.1 MU3 or higher. - - - - - A license that enables the File Persona feature. - - - - - The array class and hardware configuration must support File. - Persona - - - - - - Pre-configuration on the HPE 3PAR - - - - HPE 3PAR File Persona must be initialized and started (startfs). - - - - - A File Provisioning Group (FPG) must be created for use with - the Shared File Systems service. - - - - - A Virtual File Server (VFS) must be created for the FPG. - - - - - The VFS must be configured with an appropriate share export IP - address. - - - - - A local user in the Administrators group is needed for CIFS - shares. - - - - - - Backend configuration - - The following parameters need to be configured in the Shared - File Systems service configuration file for the HPE 3PAR driver: - - - - - share_backend_name = <back end name to enable> - - - - - share_driver = - manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver - - - - - driver_handles_share_servers = False - - - - - hpe3par_fpg = <FPG to use for share creation> - - - - - hpe3par_share_ip_address = <IP address to use for share - export location> - - - - - hpe3par_san_ip = <IP address for SSH access to the SAN - controller> - - - - - hpe3par_api_url = <3PAR WS API Server URL> - - - - - hpe3par_username = <3PAR username with the 'edit' role> - - - - - hpe3par_password = <3PAR password for the user specified in - hpe3par_username> - - - - - hpe3par_san_login = <Username for SSH access to the SAN - controller> - - - - - hpe3par_san_password = <Password for SSH access to the SAN - controller> - - - - - hpe3par_debug = <False or True for extra debug logging> - - - - - The hpe3par_share_ip_address must be a valid IP address for the - configured FPG's VFS. This IP address is used in export locations - for shares that are created. Networking must be configured to - allow connectivity from clients to shares. - - - Restart of manila-share service is needed for the configuration - changes to take effect. - - - - Network approach - - Connectivity between the storage array (SSH/CLI and WSAPI) and the - Shared File Systems service host is required for share management. - - - Connectivity between the clients and the VFS is required for - mounting and using the shares. This includes: - - - - - Routing from the client to the external network - - - - - Assigning the client an external IP address (e.g., a floating - IP) - - - - - Configuring the Shared File Systems service host networking - properly for IP forwarding - - - - - Configuring the VFS networking properly for client subnets - - - - - - Share types - - When creating a share, a share type can be specified to determine - where and how the share will be created. If a share type is not - specified, the default_share_type set in the Shared File Systems - service configuration - file is used. - - - The Shared File Systems service requires that the share type - includes the - driver_handles_share_servers extra-spec. This ensures that the - share will be created on a back end that supports the requested - driver_handles_share_servers (share networks) capability. For the - HPE 3PAR driver, this must be set to False. - - - Another common Shared File Systems service extra-spec used to determine - where a share - is created is share_backend_name. When this extra-spec is defined - in the share type, the share will be created on a back end with a - matching share_backend_name. - - - The HPE 3PAR driver automatically reports capabilities based on the - FPG used for each back end. Share types with extra specs can be - created by an administrator to control which share types are - allowed to use FPGs with or without specific capabilities. The - following extra-specs are used with the capabilities filter and - the HPE 3PAR driver: - - - - - hpe3par_flash_cache = '<is> True' or '<is> False' - - - - - thin_provisioning = '<is> True' or '<is> False' - - - - - dedupe = '<is> True' or '<is> False' - - - - - hpe3par_flash_cache will be reported as True for back ends that have - 3PAR's Adaptive Flash Cache enabled. - - - thin_provisioning will be reported as True for back ends that use - thin provisioned volumes. FPGs that use fully provisioned volumes - will report False. Backends that use thin provisioning also - support the Shared File Systems service's over-subscription feature. - - - dedupe will be reported as True for back ends that use - deduplication technology. - - - Scoped extra-specs are used to influence vendor-specific - implementation details. Scoped extra-specs use a prefix followed - by a colon. For HPE 3PAR these extra-specs have a prefix of hpe3par. - - - The following HPE 3PAR extra-specs are used when creating CIFS - (SMB) shares: - - - - - hpe3par:smb_access_based_enum = true or false - - - - - hpe3par:smb_continuous_avail = true or false - - - - - hpe3par:smb_cache = off, manual, optimized or auto - - - - - smb_access_based_enum (Access Based Enumeration) specifies if - users can see only the files and directories to which they have - been allowed access on the shares. The default is false. - - - smb_continuous_avail (Continuous Availability) specifies if SMB3 - continuous availability features should be enabled for this share. - If not specified, the default is true. - - - smb_cache specifies client-side caching for offline files. Valid - values are: - - - - - `off`: The client must not cache any files from this share. - The share is configured to disallow caching. - - - - - `manual`: The client must allow only manual caching for the - files open from this share. - - - - - `optimized`: The client may cache every file that it opens - from this share. Also, the client may satisfy the file - requests from its local cache. The share is configured to - allow automatic caching of programs and documents. - - - - - `auto`: The client may cache every file that it opens from - this share. The share is configured to allow automatic caching - of documents. - - - - - If this is not specified, the default is manual. - - - - - The following HPE 3PAR extra-specs are used when creating NFS - shares: - - - - - hpe3par:nfs_options = Comma separated list of NFS export - options. - - - - - The NFS export options have the following limitations: - -
- - - - ro and rw are not allowed (will be determined by the driver). - - - - - no_subtree_check and fsid are not allowed per HPE 3PAR CLI - support. - - - - - (in)secure and (no_)root_squash are not allowed because the - HPE 3PAR driver controls those settings. - - - -
- - All other NFS options are forwarded to the HPE 3PAR as part of - share creation. The HPE 3PAR will do additional validation at share - creation time. Refer to HPE 3PAR CLI help for more details. - -
- - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml b/doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml deleted file mode 100644 index 5e8a5d7871..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/huawei-nas-driver.xml +++ /dev/null @@ -1,295 +0,0 @@ -
- Huawei driver - Huawei NAS driver is a plug-in based on the Shared File Systems service. The Huawei NAS - driver can be used to provide functions such as the share and snapshot for virtual machines, or - instances, in OpenStack. Huawei NAS driver enables the OceanStor V3 series V300R002 storage - system to provide only network filesystems for OpenStack. - - Requirements - - - - The OceanStor V3 series V300R002 storage system. - - - - The following licenses should be activated on V3 for File: CIFS, NFS, HyperSnap - License (for snapshot) - - - - - Supported operations - The following operations are supported on V3 storage: - - - Create CIFS/NFS share. - - - Delete CIFS/NFS share. - - - Allow CIFS/NFS share access. - - - Only IP access type is supported for NFS(ro/rw). - - - Only USER access type is supported for CIFS(ro/rw). - - - - - Deny CIFS/NFS share access. - - - - Create snapshot. - - - - - Delete snapshot. - - - - Manage CIFS/NFS share. - - - Support pools in one backend. - - - Extend share. - - - Shrink share. - - - Support multi RestURLs. - - - - - Pre-configurations on Huawei - - 1. Create a driver configuration file. The driver configuration - file name must be the same as the manila_huawei_conf_file item in - the manila_conf configuration file. - - - 2. Configure Product. Product indicates the storage system type. - For the OceanStor V3 series V300R002 storage systems, the driver - configuration file is as follows: - - -<?xml version='1.0' encoding='UTF-8'?> -<Config> - <Storage> - <Product>V3</Product> - <LogicalPortIP>x.x.x.x</LogicalPortIP> - <RestURL>https://x.x.x.x:8088/deviceManager/rest/</RestURL> - <UserName>xxxxxxxxx</UserName> - <UserPassword>xxxxxxxxx</UserPassword> - </Storage> - <Filesystem> - <Thin_StoragePool>xxxxxxxxx</Thin_StoragePool> - <Thick_StoragePool>xxxxxxxxx</Thick_StoragePool> - <WaitInterval>3</WaitInterval> - <Timeout>60</Timeout> - </Filesystem> -</Config> - - - - Product is a type of storage product. Set it to V3. - - - LogicalPortIP is an IP address of the logical port. - - - RestURL is an access address of the REST interface. Multi RestURLs can be configured - in <RestURL> (separated by ";"). When one of the RestURL fails to connect, the driver - will retry another automatically. - - - - UserName is a user name of an administrator. - - - - - UserPassword is a password of an administrator. - - - - Thin_StoragePool is a name of a thin storage pool to be used. - - - Thick_StoragePool is a name of a thick storage pool to be used. - - - - WaitInterval is the interval time of querying the file system - status. - - - - - Timeout is the timeout period for waiting command execution of - a device to complete. - - - - - - Backend configuration - Modify the manila.conf Shared File Systems service configuration file and add share_driver - and manila_huawei_conf_file items. Example for configuring a storage system: - - - - share_driver = - manila.share.drivers.huawei.huawei_nas.HuaweiNasDriver - - - - - manila_huawei_conf_file = /etc/manila/manila_huawei_conf.xml - - - - - driver_handles_share_servers = False - - - -
- As far as the Shared File Systems service requires share type for creation of shares, make - sure that used share type has extra spec driver_handles_share_servers set to False - otherwise Huawei back end will be filtered by manila-scheduler. If you do not provide - share type with share creation request then default share type and its extra specs will be - used. -
- Restart of manila-share service is needed for the configuration changes to take effect. -
- - Share types - When creating a share, a share type can be specified to determine where and how the share - will be created. If a share type is not specified, the default_share_type set in the Manila - configuration file is used. - The Shared File Systems service requires that the share type includes the - driver_handles_share_servers extra-spec. This ensures that the share will be created on a - backend that supports the requested driver_handles_share_servers (share networks) capability. - For the Huawei driver, this must be set to False. - Another common extra-spec used to determine where a share is created is - share_backend_name. When this extra-spec is defined in the share type, the share will be - created on a backend with a matching share_backend_name. - The Shared File Systems service "share types" may contain qualified extra-specs, - extra-specs that have significance for the backend driver and the CapabilityFilter. This - commit makes the Huawei driver report the following boolean capabilities: - - capabilities:dedupe - - - capabilities:compression - - - capabilities:thin_provisioning - - - capabilities:huawei_smartcache - - huawei_smartcache:cachename - - - - - capabilities:huawei_smartpartition - - huawei_smartpartition:partitionname - - - - - The scheduler will choose a host that supports the needed capability when the - CapabilityFilter is used and a share type uses one or more of the following extra-specs: - - capabilities:dedupe='<is> True' or '<is> False' - - - capabilities:compression='<is> True' or '<is> False' - - - capabilities:thin_provisioning='<is> True' or '<is> False' - - - capabilities:huawei_smartcache='<is> True' or '<is> False' - - - huawei_smartcache:cachename=test_cache_name - - - - - capabilities:huawei_smartpartition='<is> True' or '<is> False' - - - huawei_smartpartition:partitionname=test_partition_name - - - - - thin_provisioning will be reported as True for backends that use thin provisioned pool. - Backends that use thin provisioning also support Manila's over-subscription feature. - 'thin_provisioning' will be reported as False for backends that use thick provisioned - pool. - dedupe will be reported as True for backends that use deduplication technology. - compression will be reported as True for backends that use compression technology. - huawei_smartcache will be reported as True for backends that use smartcache technology. - Adds SSDs into a high-speed cache pool and divides the pool into multiple cache partitions to - cache hotspot data in random and small read I/Os. - huawei_smartpartition will be reported as True for backends that use smartpartition - technology. Add share to the smartpartition named 'test_partition_name'. Allocates cache - resources based on service characteristics, ensuring the quality of critical services. -
- snapshot_support will be reported as True for backends that support all snapshot - functionalities, including create_snapshot, delete_snapshot, and - create_share_from_snapshot. Huawei Driver does not support create_share_from_snapshot API - now, so make sure that used share type has extra spec snapshot_support set to False. -
-
- - Restrictions - - The Huawei driver has the following restrictions: - - - - - Only IP access type is supported for NFS. - - - - - Only USER access type is supported for CIFS. - - - - - - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml b/doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml deleted file mode 100644 index 317e0eaea6..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/ibm-gpfs-driver.xml +++ /dev/null @@ -1,239 +0,0 @@ -
- IBM GPFS driver - - GPFS driver uses IBM General Parallel File System (GPFS), a - high-performance, clustered file system, developed by IBM, as the - storage back end for serving file shares to the Shared File Systems - service clients. - - - Supported shared filesystems - - - - NFS (access by IP) - - - - - - Supported operations - - - - Create NFS share. - - - - - Delete NFS share. - - - - - Create share snapshot. - - - - - Delete share snapshot. - - - - - Create share from a share snapshot. - - - - - Allow NFS share access. - - - - - Currently only 'rw' access level is supported. - - - - - - - Deny NFS share access. - - - - - - Requirements - - - - Install GPFS with server license, version >= 2.0, on the - storage back end. - - - - - Install Kernel NFS or Ganesha NFS server on the storage - back-end servers. - - - - - If using Ganesha NFS, currently NFS Ganesha v1.5 and v2.0 are - supported. - - - - - Create a GPFS cluster and create a filesystem on the cluster, - that will be used to create the Shared File Systems service shares. - - - - - Enable quotas for the GPFS file system (mmchfs -Q yes). - - - - - Establish network connection between the Shared File Systems - Service host and the storage back end. - - - - - - Shared File Systems service driver configuration setting - - The following parameters in the Shared File Systems service - configuration file need to be set: - - - - - share_driver = manila.share.drivers.ibm.gpfs.GPFSShareDriver - - - - - gpfs_share_export_ip = <IP to be added to GPFS export - string> - - - - - If the back-end GPFS server is not running on the Shared File - Systems service host - machine, the following options are required to SSH to the - remote GPFS back-end server: - - - - - gpfs_ssh_login = <GPFS server SSH login name> - - - and one of the following settings is required to execute - commands over SSH: - - - - - gpfs_ssh_private_key = <path to GPFS server SSH private - key for login> - - - - - gpfs_ssh_password = <GPFS server SSH login password> - - - - - - - The following configuration parameters are optional: - - - - - gpfs_mount_point_base = <base folder where exported shares - are located> - - - - - gpfs_nfs_server_type = <KNFS|GNFS> - - - - - gpfs_nfs_server_list = <list of the fully qualified NFS - server names> - - - - - gpfs_ssh_port = <ssh port number> - - - - - - - knfs_export_options = <options to use when creating a - share using kernel> - - - - <NFS server> - - - - - - - - Restart of manila-share service is needed for the configuration - changes to take effect. - - - - Known restrictions - - - - The driver does not support a segmented-network multi-tenancy - model but instead works over a flat network where the tenants - share a network. - - - - - While using remote GPFS node, with Ganesha NFS, - 'gpfs_ssh_private_key' for remote login to the GPFS node must - be specified and there must be a passwordless authentication - already setup between the manila-share service and the remote - GPFS node. - - - - - - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml b/doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml deleted file mode 100644 index a870d6a700..0000000000 --- a/doc/config-reference/shared-file-systems/drivers/netapp-cluster-mode-driver.xml +++ /dev/null @@ -1,96 +0,0 @@ -
- NetApp Clustered Data ONTAP driver - - The Shared File Systems service can be configured to - use NetApp clustered Data ONTAP version 8. - - - Network approach - - L3 connectivity between the storage cluster and Shared File Systems - service host should exist, and VLAN segmentation should be configured. - - - The clustered Data ONTAP driver creates storage virtual machines - (SVM, previously known as vServers) as representations of the - Shared File Systems service share server interface, configures - logical interfaces (LIFs) and stores shares there. - - - - Supported shared filesystems - - - - NFS (access by IP) - - - - - CIFS (authentication by user) - - - - - - Required licenses - - - - NFS - - - - - CIFS - - - - - FlexClone - - - - - - Known restrictions - - - - For CIFS shares an external active directory service is - required. Its data should be provided via security-service - that is attached to used share-network. - - - - - Share access rule by user for CIFS shares can be created only - for existing user in active directory. - - - - - To be able to configure clients to security services, the time - on these external security services and storage should be - synchronized. The maximum allowed clock skew is 5 minutes. - - - - - - - Driver configuration options - - Configuration options specific to this driver are documented here: - . - - -
diff --git a/doc/config-reference/shared-file-systems/section_manila-log-files.xml b/doc/config-reference/shared-file-systems/section_manila-log-files.xml deleted file mode 100644 index 85187fa35f..0000000000 --- a/doc/config-reference/shared-file-systems/section_manila-log-files.xml +++ /dev/null @@ -1,77 +0,0 @@ - -
- Log files used by Shared File Systems - The corresponding log file of each Shared File Systems service - is stored in the /var/log/manila/ - directory of the host on which each service runs. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Log files used by Shared File Systems services
- Log file - - Service/interface (for CentOS, Fedora, openSUSE, Red Hat - Enterprise Linux, and SUSE Linux Enterprise) - - Service/interface (for Ubuntu and Debian) -
- api.log - - openstack-manila-api - - manila-api -
- manila-manage.log - - manila-manage - - manila-manage -
- scheduler.log - - openstack-manila-scheduler - - manila-scheduler -
- share.log - - openstack-manila-share - - manila-share -
-
diff --git a/doc/config-reference/shared-file-systems/section_manila-misc.xml b/doc/config-reference/shared-file-systems/section_manila-misc.xml deleted file mode 100644 index 5a7431dd2b..0000000000 --- a/doc/config-reference/shared-file-systems/section_manila-misc.xml +++ /dev/null @@ -1,45 +0,0 @@ - -
- Configuration Options - - - These following options can be set in the manila.conf file. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml b/doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml deleted file mode 100644 index 18122dc217..0000000000 --- a/doc/config-reference/shared-file-systems/section_manila-sample-configuration-files.xml +++ /dev/null @@ -1,40 +0,0 @@ -
- Shared File Systems sample configuration files - All the files in this section can be found in /etc/manila. -
- manila.conf - The manila.conf file is installed in - /etc/manila by default. When you manually - install the Shared File Systems service, the options in the - manila.conf file are set to default values. - The manila.conf file contains most - of the options to configure the Shared File Systems service. - - - - - -
-
- api-paste.ini - Use the api-paste.ini file to configure the Shared File Systems API service. - - -
-
- policy.json - The policy.json file defines additional access controls that apply to the Shared File Systems service. - -
-
- rootwrap.conf - The rootwrap.conf file defines configuration values used by the - rootwrap script when the Shared File Systems service must - escalate its privileges to those of the root user. - -
-
diff --git a/doc/config-reference/shared-file-systems/section_share-drivers.xml b/doc/config-reference/shared-file-systems/section_share-drivers.xml deleted file mode 100644 index 9428e20a19..0000000000 --- a/doc/config-reference/shared-file-systems/section_share-drivers.xml +++ /dev/null @@ -1,28 +0,0 @@ - -
- Share drivers - To use different share drivers for the manila-share service, use - the parameters described in these sections. - The share drivers are included in the Shared File Systems repository - (https://git.openstack.org/cgit/openstack/manila/tree/manila/share/drivers). - To set a share driver, use the share_driver flag. - For example, to use the generic reference driver: - - share_driver=manila.share.drivers.generic.GenericShareDriver - - - - - - - - - - - -
diff --git a/doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml b/doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml deleted file mode 100644 index 87ba9588e2..0000000000 --- a/doc/config-reference/shared-file-systems/section_shared-file-systems-overview.xml +++ /dev/null @@ -1,104 +0,0 @@ - -
- Introduction to the Shared File Systems service - The Shared File Systems service provides shared file systems that - Compute instances can consume. - The Shared File Systems service provides: - - - manila-api. A WSGI app that authenticates - and routes requests throughout the Shared File Systems service. It supports the - OpenStack APIs. - - - manila-scheduler. Schedules and routes - requests to the appropriate share service. The scheduler uses configurable filters - and weighers to route requests. The Filter Scheduler is the default and enables - filters on things like Capacity, Availability Zone, Share Types, and Capabilities - as well as custom filters. - - - manila-share. - Manages back-end devices that provide shared file systems. - A manila-share service can run in one of two modes, with or without handling of - share servers. Share servers export file shares via share networks. - When share servers are not used, the networking requirements are handled - outside of Manila. - - - The Shared File Systems service contains the following components: - - - Back-end storage devices. The Shared File - Services service requires some form of back-end shared file system provider - that the service is built on. The reference implementation uses the Block Storage - service (Cinder) and a service VM to provide shares. Additional drivers are - used to access shared file systems from a variety of vendor solutions. - - - Users and tenants (projects). The Shared File - Systems service can be used by many different cloud computing consumers or customers - (tenants on a shared system), using role-based access assignments. Roles control the - actions that a user is allowed to perform. In the default configuration, most - actions do not require a particular role unless they are restricted to - administrators, but this can be configured by the system administrator in the - appropriate policy.json file that maintains the rules. - A user's access to manage particular shares is limited by tenant. Guest access - to mount and use shares is secured by IP and/or user access rules. - Quotas used to control resource consumption across available hardware resources - are per tenant. - For tenants, quota controls are available to limit: - - - The number of shares that can be created. - - - The number of gigabytes that can be provisioned for shares. - - - The number of share snapshots that can be created. - - - The number of gigabytes that can be provisioned for share snapshots. - - - The number of share networks that can be created. - - - You can revise the default quota values with the Shared File Systems CLI, - so the limits placed by quotas are editable by admin users. - - - Shares, snapshots, and share networks. The basic - resources offered by the Shared File Systems service are shares, snapshots - and share networks: - - - Shares. A share is a unit of storage - with a protocol, a size, and an access list. Shares are the basic - primitive provided by Manila. All shares exist on a - backend. Some shares are associated with share networks and share servers. - The main protocols supported are NFS and CIFS, but other protocols are - supported as well. - - - Snapshots. A snapshot is a point in - time copy of a share. Snapshots can only be used to create new shares - (containing the snapshotted data). Shares cannot be deleted until all - associated snapshots are deleted. - - - Share networks. A share network is a - tenant-defined object that informs Manila about the security and - network configuration for a group of shares. Share networks are only - relevant for backends that manage share servers. A share network contains - a security service and network/subnet. - - - - -
diff --git a/doc/config-ref-rst/source/bare-metal.rst b/doc/config-reference/source/bare-metal.rst similarity index 100% rename from doc/config-ref-rst/source/bare-metal.rst rename to doc/config-reference/source/bare-metal.rst diff --git a/doc/config-ref-rst/source/block-storage.rst b/doc/config-reference/source/block-storage.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage.rst rename to doc/config-reference/source/block-storage.rst diff --git a/doc/config-ref-rst/source/block-storage/backup-drivers.rst b/doc/config-reference/source/block-storage/backup-drivers.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/backup-drivers.rst rename to doc/config-reference/source/block-storage/backup-drivers.rst diff --git a/doc/config-ref-rst/source/block-storage/backup/ceph-backup-driver.rst b/doc/config-reference/source/block-storage/backup/ceph-backup-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/backup/ceph-backup-driver.rst rename to doc/config-reference/source/block-storage/backup/ceph-backup-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/backup/nfs-backup-driver.rst b/doc/config-reference/source/block-storage/backup/nfs-backup-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/backup/nfs-backup-driver.rst rename to doc/config-reference/source/block-storage/backup/nfs-backup-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/backup/swift-backup-driver.rst b/doc/config-reference/source/block-storage/backup/swift-backup-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/backup/swift-backup-driver.rst rename to doc/config-reference/source/block-storage/backup/swift-backup-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/backup/tsm-backup-driver.rst b/doc/config-reference/source/block-storage/backup/tsm-backup-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/backup/tsm-backup-driver.rst rename to doc/config-reference/source/block-storage/backup/tsm-backup-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/block-storage-overview.rst b/doc/config-reference/source/block-storage/block-storage-overview.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/block-storage-overview.rst rename to doc/config-reference/source/block-storage/block-storage-overview.rst diff --git a/doc/config-ref-rst/source/block-storage/block-storage-sample-configuration-files.rst b/doc/config-reference/source/block-storage/block-storage-sample-configuration-files.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/block-storage-sample-configuration-files.rst rename to doc/config-reference/source/block-storage/block-storage-sample-configuration-files.rst diff --git a/doc/config-ref-rst/source/block-storage/cinder-log-files.rst b/doc/config-reference/source/block-storage/cinder-log-files.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/cinder-log-files.rst rename to doc/config-reference/source/block-storage/cinder-log-files.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/blockbridge-eps-driver.rst b/doc/config-reference/source/block-storage/drivers/blockbridge-eps-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/blockbridge-eps-driver.rst rename to doc/config-reference/source/block-storage/drivers/blockbridge-eps-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/ceph-rbd-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/ceph-rbd-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/ceph-rbd-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/ceph-rbd-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/dell-equallogic-driver.rst b/doc/config-reference/source/block-storage/drivers/dell-equallogic-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/dell-equallogic-driver.rst rename to doc/config-reference/source/block-storage/drivers/dell-equallogic-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/dell-storagecenter-driver.rst b/doc/config-reference/source/block-storage/drivers/dell-storagecenter-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/dell-storagecenter-driver.rst rename to doc/config-reference/source/block-storage/drivers/dell-storagecenter-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/dothill-driver.rst b/doc/config-reference/source/block-storage/drivers/dothill-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/dothill-driver.rst rename to doc/config-reference/source/block-storage/drivers/dothill-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/emc-scaleio-driver.rst b/doc/config-reference/source/block-storage/drivers/emc-scaleio-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/emc-scaleio-driver.rst rename to doc/config-reference/source/block-storage/drivers/emc-scaleio-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/emc-vmax-driver.rst b/doc/config-reference/source/block-storage/drivers/emc-vmax-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/emc-vmax-driver.rst rename to doc/config-reference/source/block-storage/drivers/emc-vmax-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/emc-vnx-driver.rst b/doc/config-reference/source/block-storage/drivers/emc-vnx-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/emc-vnx-driver.rst rename to doc/config-reference/source/block-storage/drivers/emc-vnx-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/emc-xtremio-driver.rst b/doc/config-reference/source/block-storage/drivers/emc-xtremio-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/emc-xtremio-driver.rst rename to doc/config-reference/source/block-storage/drivers/emc-xtremio-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/glusterfs-driver.rst b/doc/config-reference/source/block-storage/drivers/glusterfs-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/glusterfs-driver.rst rename to doc/config-reference/source/block-storage/drivers/glusterfs-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/hds-hnas-driver.rst b/doc/config-reference/source/block-storage/drivers/hds-hnas-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/hds-hnas-driver.rst rename to doc/config-reference/source/block-storage/drivers/hds-hnas-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/hitachi-storage-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/hitachi-storage-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/hitachi-storage-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/hitachi-storage-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/hp-msa-driver.rst b/doc/config-reference/source/block-storage/drivers/hp-msa-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/hp-msa-driver.rst rename to doc/config-reference/source/block-storage/drivers/hp-msa-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/hpe-3par-driver.rst b/doc/config-reference/source/block-storage/drivers/hpe-3par-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/hpe-3par-driver.rst rename to doc/config-reference/source/block-storage/drivers/hpe-3par-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/hpe-lefthand-driver.rst b/doc/config-reference/source/block-storage/drivers/hpe-lefthand-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/hpe-lefthand-driver.rst rename to doc/config-reference/source/block-storage/drivers/hpe-lefthand-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/huawei-storage-driver.rst b/doc/config-reference/source/block-storage/drivers/huawei-storage-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/huawei-storage-driver.rst rename to doc/config-reference/source/block-storage/drivers/huawei-storage-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/ibm-flashsystem-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/ibm-flashsystem-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/ibm-flashsystem-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/ibm-flashsystem-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/ibm-gpfs-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/ibm-gpfs-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/ibm-gpfs-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/ibm-gpfs-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/ibm-sonas-7k-driver.rst b/doc/config-reference/source/block-storage/drivers/ibm-sonas-7k-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/ibm-sonas-7k-driver.rst rename to doc/config-reference/source/block-storage/drivers/ibm-sonas-7k-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/ibm-storwize-svc-driver.rst b/doc/config-reference/source/block-storage/drivers/ibm-storwize-svc-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/ibm-storwize-svc-driver.rst rename to doc/config-reference/source/block-storage/drivers/ibm-storwize-svc-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/ibm-xiv-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/ibm-xiv-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/ibm-xiv-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/ibm-xiv-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/lenovo-driver.rst b/doc/config-reference/source/block-storage/drivers/lenovo-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/lenovo-driver.rst rename to doc/config-reference/source/block-storage/drivers/lenovo-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/lvm-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/lvm-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/lvm-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/lvm-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/netapp-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/netapp-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/netapp-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/netapp-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/nfs-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/nfs-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/nfs-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/nfs-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/nimble-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/nimble-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/nimble-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/nimble-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/prophetstor-dpl-driver.rst b/doc/config-reference/source/block-storage/drivers/prophetstor-dpl-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/prophetstor-dpl-driver.rst rename to doc/config-reference/source/block-storage/drivers/prophetstor-dpl-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/pure-storage-driver.rst b/doc/config-reference/source/block-storage/drivers/pure-storage-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/pure-storage-driver.rst rename to doc/config-reference/source/block-storage/drivers/pure-storage-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/quobyte-driver.rst b/doc/config-reference/source/block-storage/drivers/quobyte-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/quobyte-driver.rst rename to doc/config-reference/source/block-storage/drivers/quobyte-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/scality-sofs-driver.rst b/doc/config-reference/source/block-storage/drivers/scality-sofs-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/scality-sofs-driver.rst rename to doc/config-reference/source/block-storage/drivers/scality-sofs-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/sheepdog-driver.rst b/doc/config-reference/source/block-storage/drivers/sheepdog-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/sheepdog-driver.rst rename to doc/config-reference/source/block-storage/drivers/sheepdog-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/smbfs-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/smbfs-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/smbfs-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/smbfs-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/solidfire-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/solidfire-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/solidfire-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/solidfire-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/tintri-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/tintri-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/tintri-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/tintri-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/violin-v6000-driver.rst b/doc/config-reference/source/block-storage/drivers/violin-v6000-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/violin-v6000-driver.rst rename to doc/config-reference/source/block-storage/drivers/violin-v6000-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/violin-v7000-driver.rst b/doc/config-reference/source/block-storage/drivers/violin-v7000-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/violin-v7000-driver.rst rename to doc/config-reference/source/block-storage/drivers/violin-v7000-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/vmware-vmdk-driver.rst b/doc/config-reference/source/block-storage/drivers/vmware-vmdk-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/vmware-vmdk-driver.rst rename to doc/config-reference/source/block-storage/drivers/vmware-vmdk-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/windows-iscsi-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/windows-iscsi-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/windows-iscsi-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/windows-iscsi-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/xio-volume-driver.rst b/doc/config-reference/source/block-storage/drivers/xio-volume-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/xio-volume-driver.rst rename to doc/config-reference/source/block-storage/drivers/xio-volume-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/zfssa-iscsi-driver.rst b/doc/config-reference/source/block-storage/drivers/zfssa-iscsi-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/zfssa-iscsi-driver.rst rename to doc/config-reference/source/block-storage/drivers/zfssa-iscsi-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/drivers/zfssa-nfs-driver.rst b/doc/config-reference/source/block-storage/drivers/zfssa-nfs-driver.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/drivers/zfssa-nfs-driver.rst rename to doc/config-reference/source/block-storage/drivers/zfssa-nfs-driver.rst diff --git a/doc/config-ref-rst/source/block-storage/fc-zoning.rst b/doc/config-reference/source/block-storage/fc-zoning.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/fc-zoning.rst rename to doc/config-reference/source/block-storage/fc-zoning.rst diff --git a/doc/config-ref-rst/source/block-storage/nested-quota.rst b/doc/config-reference/source/block-storage/nested-quota.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/nested-quota.rst rename to doc/config-reference/source/block-storage/nested-quota.rst diff --git a/doc/config-ref-rst/source/block-storage/volume-drivers.rst b/doc/config-reference/source/block-storage/volume-drivers.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/volume-drivers.rst rename to doc/config-reference/source/block-storage/volume-drivers.rst diff --git a/doc/config-ref-rst/source/block-storage/volume-encryption.rst b/doc/config-reference/source/block-storage/volume-encryption.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/volume-encryption.rst rename to doc/config-reference/source/block-storage/volume-encryption.rst diff --git a/doc/config-ref-rst/source/block-storage/volume-misc.rst b/doc/config-reference/source/block-storage/volume-misc.rst similarity index 100% rename from doc/config-ref-rst/source/block-storage/volume-misc.rst rename to doc/config-reference/source/block-storage/volume-misc.rst diff --git a/doc/config-ref-rst/source/common b/doc/config-reference/source/common similarity index 100% rename from doc/config-ref-rst/source/common rename to doc/config-reference/source/common diff --git a/doc/config-ref-rst/source/compute.rst b/doc/config-reference/source/compute.rst similarity index 100% rename from doc/config-ref-rst/source/compute.rst rename to doc/config-reference/source/compute.rst diff --git a/doc/config-ref-rst/source/compute/api-paste.ini.rst b/doc/config-reference/source/compute/api-paste.ini.rst similarity index 100% rename from doc/config-ref-rst/source/compute/api-paste.ini.rst rename to doc/config-reference/source/compute/api-paste.ini.rst diff --git a/doc/config-ref-rst/source/compute/api.rst b/doc/config-reference/source/compute/api.rst similarity index 100% rename from doc/config-ref-rst/source/compute/api.rst rename to doc/config-reference/source/compute/api.rst diff --git a/doc/config-ref-rst/source/compute/authentication-authorization.rst b/doc/config-reference/source/compute/authentication-authorization.rst similarity index 100% rename from doc/config-ref-rst/source/compute/authentication-authorization.rst rename to doc/config-reference/source/compute/authentication-authorization.rst diff --git a/doc/config-ref-rst/source/compute/cells.rst b/doc/config-reference/source/compute/cells.rst similarity index 100% rename from doc/config-ref-rst/source/compute/cells.rst rename to doc/config-reference/source/compute/cells.rst diff --git a/doc/config-ref-rst/source/compute/conductor.rst b/doc/config-reference/source/compute/conductor.rst similarity index 100% rename from doc/config-ref-rst/source/compute/conductor.rst rename to doc/config-reference/source/compute/conductor.rst diff --git a/doc/config-ref-rst/source/compute/config-options.rst b/doc/config-reference/source/compute/config-options.rst similarity index 100% rename from doc/config-ref-rst/source/compute/config-options.rst rename to doc/config-reference/source/compute/config-options.rst diff --git a/doc/config-ref-rst/source/compute/config-samples.rst b/doc/config-reference/source/compute/config-samples.rst similarity index 100% rename from doc/config-ref-rst/source/compute/config-samples.rst rename to doc/config-reference/source/compute/config-samples.rst diff --git a/doc/config-ref-rst/source/compute/database-connections.rst b/doc/config-reference/source/compute/database-connections.rst similarity index 100% rename from doc/config-ref-rst/source/compute/database-connections.rst rename to doc/config-reference/source/compute/database-connections.rst diff --git a/doc/config-ref-rst/source/compute/ec2-api.rst b/doc/config-reference/source/compute/ec2-api.rst similarity index 100% rename from doc/config-ref-rst/source/compute/ec2-api.rst rename to doc/config-reference/source/compute/ec2-api.rst diff --git a/doc/config-ref-rst/source/compute/fibre-channel.rst b/doc/config-reference/source/compute/fibre-channel.rst similarity index 100% rename from doc/config-ref-rst/source/compute/fibre-channel.rst rename to doc/config-reference/source/compute/fibre-channel.rst diff --git a/doc/config-ref-rst/source/compute/hypervisor-basics.rst b/doc/config-reference/source/compute/hypervisor-basics.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisor-basics.rst rename to doc/config-reference/source/compute/hypervisor-basics.rst diff --git a/doc/config-ref-rst/source/compute/hypervisor-hyper-v.rst b/doc/config-reference/source/compute/hypervisor-hyper-v.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisor-hyper-v.rst rename to doc/config-reference/source/compute/hypervisor-hyper-v.rst diff --git a/doc/config-ref-rst/source/compute/hypervisor-kvm.rst b/doc/config-reference/source/compute/hypervisor-kvm.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisor-kvm.rst rename to doc/config-reference/source/compute/hypervisor-kvm.rst diff --git a/doc/config-ref-rst/source/compute/hypervisor-lxc.rst b/doc/config-reference/source/compute/hypervisor-lxc.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisor-lxc.rst rename to doc/config-reference/source/compute/hypervisor-lxc.rst diff --git a/doc/config-ref-rst/source/compute/hypervisor-qemu.rst b/doc/config-reference/source/compute/hypervisor-qemu.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisor-qemu.rst rename to doc/config-reference/source/compute/hypervisor-qemu.rst diff --git a/doc/config-ref-rst/source/compute/hypervisor-vmware.rst b/doc/config-reference/source/compute/hypervisor-vmware.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisor-vmware.rst rename to doc/config-reference/source/compute/hypervisor-vmware.rst diff --git a/doc/config-ref-rst/source/compute/hypervisor-xen-api.rst b/doc/config-reference/source/compute/hypervisor-xen-api.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisor-xen-api.rst rename to doc/config-reference/source/compute/hypervisor-xen-api.rst diff --git a/doc/config-ref-rst/source/compute/hypervisor-xen-libvirt.rst b/doc/config-reference/source/compute/hypervisor-xen-libvirt.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisor-xen-libvirt.rst rename to doc/config-reference/source/compute/hypervisor-xen-libvirt.rst diff --git a/doc/config-ref-rst/source/compute/hypervisors.rst b/doc/config-reference/source/compute/hypervisors.rst similarity index 100% rename from doc/config-ref-rst/source/compute/hypervisors.rst rename to doc/config-reference/source/compute/hypervisors.rst diff --git a/doc/config-ref-rst/source/compute/iscsi-offload.rst b/doc/config-reference/source/compute/iscsi-offload.rst similarity index 100% rename from doc/config-ref-rst/source/compute/iscsi-offload.rst rename to doc/config-reference/source/compute/iscsi-offload.rst diff --git a/doc/config-ref-rst/source/compute/logging.rst b/doc/config-reference/source/compute/logging.rst similarity index 100% rename from doc/config-ref-rst/source/compute/logging.rst rename to doc/config-reference/source/compute/logging.rst diff --git a/doc/config-ref-rst/source/compute/nova-conf-samples.rst b/doc/config-reference/source/compute/nova-conf-samples.rst similarity index 100% rename from doc/config-ref-rst/source/compute/nova-conf-samples.rst rename to doc/config-reference/source/compute/nova-conf-samples.rst diff --git a/doc/config-ref-rst/source/compute/nova-conf.rst b/doc/config-reference/source/compute/nova-conf.rst similarity index 100% rename from doc/config-ref-rst/source/compute/nova-conf.rst rename to doc/config-reference/source/compute/nova-conf.rst diff --git a/doc/config-ref-rst/source/compute/nova-logs.rst b/doc/config-reference/source/compute/nova-logs.rst similarity index 100% rename from doc/config-ref-rst/source/compute/nova-logs.rst rename to doc/config-reference/source/compute/nova-logs.rst diff --git a/doc/config-ref-rst/source/compute/nova.conf b/doc/config-reference/source/compute/nova.conf similarity index 100% rename from doc/config-ref-rst/source/compute/nova.conf rename to doc/config-reference/source/compute/nova.conf diff --git a/doc/config-ref-rst/source/compute/policy.json.rst b/doc/config-reference/source/compute/policy.json.rst similarity index 100% rename from doc/config-ref-rst/source/compute/policy.json.rst rename to doc/config-reference/source/compute/policy.json.rst diff --git a/doc/config-ref-rst/source/compute/resize.rst b/doc/config-reference/source/compute/resize.rst similarity index 100% rename from doc/config-ref-rst/source/compute/resize.rst rename to doc/config-reference/source/compute/resize.rst diff --git a/doc/config-ref-rst/source/compute/rootwrap.conf.rst b/doc/config-reference/source/compute/rootwrap.conf.rst similarity index 100% rename from doc/config-ref-rst/source/compute/rootwrap.conf.rst rename to doc/config-reference/source/compute/rootwrap.conf.rst diff --git a/doc/config-ref-rst/source/compute/rpc.rst b/doc/config-reference/source/compute/rpc.rst similarity index 100% rename from doc/config-ref-rst/source/compute/rpc.rst rename to doc/config-reference/source/compute/rpc.rst diff --git a/doc/config-ref-rst/source/compute/sample-configuration-files.rst b/doc/config-reference/source/compute/sample-configuration-files.rst similarity index 100% rename from doc/config-ref-rst/source/compute/sample-configuration-files.rst rename to doc/config-reference/source/compute/sample-configuration-files.rst diff --git a/doc/config-ref-rst/source/compute/scheduler.rst b/doc/config-reference/source/compute/scheduler.rst similarity index 100% rename from doc/config-ref-rst/source/compute/scheduler.rst rename to doc/config-reference/source/compute/scheduler.rst diff --git a/doc/config-ref-rst/source/conf.py b/doc/config-reference/source/conf.py similarity index 100% rename from doc/config-ref-rst/source/conf.py rename to doc/config-reference/source/conf.py diff --git a/doc/config-ref-rst/source/config-format.rst b/doc/config-reference/source/config-format.rst similarity index 100% rename from doc/config-ref-rst/source/config-format.rst rename to doc/config-reference/source/config-format.rst diff --git a/doc/config-ref-rst/source/config-overview.rst b/doc/config-reference/source/config-overview.rst similarity index 100% rename from doc/config-ref-rst/source/config-overview.rst rename to doc/config-reference/source/config-overview.rst diff --git a/doc/config-ref-rst/source/dashboard.rst b/doc/config-reference/source/dashboard.rst similarity index 100% rename from doc/config-ref-rst/source/dashboard.rst rename to doc/config-reference/source/dashboard.rst diff --git a/doc/config-ref-rst/source/dashboard/configure.rst b/doc/config-reference/source/dashboard/configure.rst similarity index 100% rename from doc/config-ref-rst/source/dashboard/configure.rst rename to doc/config-reference/source/dashboard/configure.rst diff --git a/doc/config-ref-rst/source/dashboard/customize.rst b/doc/config-reference/source/dashboard/customize.rst similarity index 100% rename from doc/config-ref-rst/source/dashboard/customize.rst rename to doc/config-reference/source/dashboard/customize.rst diff --git a/doc/config-ref-rst/source/dashboard/keystone_policy.json.rst b/doc/config-reference/source/dashboard/keystone_policy.json.rst similarity index 100% rename from doc/config-ref-rst/source/dashboard/keystone_policy.json.rst rename to doc/config-reference/source/dashboard/keystone_policy.json.rst diff --git a/doc/config-ref-rst/source/dashboard/log-files.rst b/doc/config-reference/source/dashboard/log-files.rst similarity index 100% rename from doc/config-ref-rst/source/dashboard/log-files.rst rename to doc/config-reference/source/dashboard/log-files.rst diff --git a/doc/config-ref-rst/source/dashboard/nova_policy.json.rst b/doc/config-reference/source/dashboard/nova_policy.json.rst similarity index 100% rename from doc/config-ref-rst/source/dashboard/nova_policy.json.rst rename to doc/config-reference/source/dashboard/nova_policy.json.rst diff --git a/doc/config-ref-rst/source/dashboard/sample-configuration-files.rst b/doc/config-reference/source/dashboard/sample-configuration-files.rst similarity index 100% rename from doc/config-ref-rst/source/dashboard/sample-configuration-files.rst rename to doc/config-reference/source/dashboard/sample-configuration-files.rst diff --git a/doc/config-ref-rst/source/data-processing-service.rst b/doc/config-reference/source/data-processing-service.rst similarity index 100% rename from doc/config-ref-rst/source/data-processing-service.rst rename to doc/config-reference/source/data-processing-service.rst diff --git a/doc/config-ref-rst/source/database-service.rst b/doc/config-reference/source/database-service.rst similarity index 100% rename from doc/config-ref-rst/source/database-service.rst rename to doc/config-reference/source/database-service.rst diff --git a/doc/config-ref-rst/source/database-service/databaseservice_db.rst b/doc/config-reference/source/database-service/databaseservice_db.rst similarity index 100% rename from doc/config-ref-rst/source/database-service/databaseservice_db.rst rename to doc/config-reference/source/database-service/databaseservice_db.rst diff --git a/doc/config-ref-rst/source/database-service/databaseservice_rpc.rst b/doc/config-reference/source/database-service/databaseservice_rpc.rst similarity index 100% rename from doc/config-ref-rst/source/database-service/databaseservice_rpc.rst rename to doc/config-reference/source/database-service/databaseservice_rpc.rst diff --git a/doc/config-ref-rst/source/ext/__init__.py b/doc/config-reference/source/ext/__init__.py similarity index 100% rename from doc/config-ref-rst/source/ext/__init__.py rename to doc/config-reference/source/ext/__init__.py diff --git a/doc/config-ref-rst/source/ext/remote.py b/doc/config-reference/source/ext/remote.py similarity index 100% rename from doc/config-ref-rst/source/ext/remote.py rename to doc/config-reference/source/ext/remote.py diff --git a/doc/config-ref-rst/source/figures/bb-cinder-fig1.png b/doc/config-reference/source/figures/bb-cinder-fig1.png similarity index 100% rename from doc/config-ref-rst/source/figures/bb-cinder-fig1.png rename to doc/config-reference/source/figures/bb-cinder-fig1.png diff --git a/doc/config-ref-rst/source/figures/emc-enabler.png b/doc/config-reference/source/figures/emc-enabler.png similarity index 100% rename from doc/config-ref-rst/source/figures/emc-enabler.png rename to doc/config-reference/source/figures/emc-enabler.png diff --git a/doc/config-ref-rst/source/figures/filteringWorkflow1.png b/doc/config-reference/source/figures/filteringWorkflow1.png similarity index 100% rename from doc/config-ref-rst/source/figures/filteringWorkflow1.png rename to doc/config-reference/source/figures/filteringWorkflow1.png diff --git a/doc/config-ref-rst/source/figures/filteringWorkflow2.png b/doc/config-reference/source/figures/filteringWorkflow2.png similarity index 100% rename from doc/config-ref-rst/source/figures/filteringWorkflow2.png rename to doc/config-reference/source/figures/filteringWorkflow2.png diff --git a/doc/config-ref-rst/source/figures/nova-conf-kvm-flat.png b/doc/config-reference/source/figures/nova-conf-kvm-flat.png similarity index 100% rename from doc/config-ref-rst/source/figures/nova-conf-kvm-flat.png rename to doc/config-reference/source/figures/nova-conf-kvm-flat.png diff --git a/doc/config-ref-rst/source/figures/nova-conf-kvm-flat.svg b/doc/config-reference/source/figures/nova-conf-kvm-flat.svg similarity index 100% rename from doc/config-ref-rst/source/figures/nova-conf-kvm-flat.svg rename to doc/config-reference/source/figures/nova-conf-kvm-flat.svg diff --git a/doc/config-ref-rst/source/figures/nova-conf-kvm-flat.vsd b/doc/config-reference/source/figures/nova-conf-kvm-flat.vsd similarity index 100% rename from doc/config-ref-rst/source/figures/nova-conf-kvm-flat.vsd rename to doc/config-reference/source/figures/nova-conf-kvm-flat.vsd diff --git a/doc/config-ref-rst/source/figures/nova-conf-xen-flat.png b/doc/config-reference/source/figures/nova-conf-xen-flat.png similarity index 100% rename from doc/config-ref-rst/source/figures/nova-conf-xen-flat.png rename to doc/config-reference/source/figures/nova-conf-xen-flat.png diff --git a/doc/config-ref-rst/source/figures/nova-conf-xen-flat.svg b/doc/config-reference/source/figures/nova-conf-xen-flat.svg similarity index 100% rename from doc/config-ref-rst/source/figures/nova-conf-xen-flat.svg rename to doc/config-reference/source/figures/nova-conf-xen-flat.svg diff --git a/doc/config-ref-rst/source/figures/nova-conf-xen-flat.vsd b/doc/config-reference/source/figures/nova-conf-xen-flat.vsd similarity index 100% rename from doc/config-ref-rst/source/figures/nova-conf-xen-flat.vsd rename to doc/config-reference/source/figures/nova-conf-xen-flat.vsd diff --git a/doc/config-ref-rst/source/figures/nova-weighting-hosts.png b/doc/config-reference/source/figures/nova-weighting-hosts.png similarity index 100% rename from doc/config-ref-rst/source/figures/nova-weighting-hosts.png rename to doc/config-reference/source/figures/nova-weighting-hosts.png diff --git a/doc/config-ref-rst/source/firewalls-default-ports.rst b/doc/config-reference/source/firewalls-default-ports.rst similarity index 100% rename from doc/config-ref-rst/source/firewalls-default-ports.rst rename to doc/config-reference/source/firewalls-default-ports.rst diff --git a/doc/config-ref-rst/source/identity.rst b/doc/config-reference/source/identity.rst similarity index 100% rename from doc/config-ref-rst/source/identity.rst rename to doc/config-reference/source/identity.rst diff --git a/doc/config-ref-rst/source/identity/caching.rst b/doc/config-reference/source/identity/caching.rst similarity index 100% rename from doc/config-ref-rst/source/identity/caching.rst rename to doc/config-reference/source/identity/caching.rst diff --git a/doc/config-ref-rst/source/identity/options.rst b/doc/config-reference/source/identity/options.rst similarity index 100% rename from doc/config-ref-rst/source/identity/options.rst rename to doc/config-reference/source/identity/options.rst diff --git a/doc/config-ref-rst/source/identity/sample-configuration-files.rst b/doc/config-reference/source/identity/sample-configuration-files.rst similarity index 100% rename from doc/config-ref-rst/source/identity/sample-configuration-files.rst rename to doc/config-reference/source/identity/sample-configuration-files.rst diff --git a/doc/config-ref-rst/source/image-service.rst b/doc/config-reference/source/image-service.rst similarity index 100% rename from doc/config-ref-rst/source/image-service.rst rename to doc/config-reference/source/image-service.rst diff --git a/doc/config-ref-rst/source/image-service/image_service_ISO_support.rst b/doc/config-reference/source/image-service/image_service_ISO_support.rst similarity index 100% rename from doc/config-ref-rst/source/image-service/image_service_ISO_support.rst rename to doc/config-reference/source/image-service/image_service_ISO_support.rst diff --git a/doc/config-ref-rst/source/image-service/image_service_api.rst b/doc/config-reference/source/image-service/image_service_api.rst similarity index 100% rename from doc/config-ref-rst/source/image-service/image_service_api.rst rename to doc/config-reference/source/image-service/image_service_api.rst diff --git a/doc/config-ref-rst/source/image-service/image_service_backends.rst b/doc/config-reference/source/image-service/image_service_backends.rst similarity index 100% rename from doc/config-ref-rst/source/image-service/image_service_backends.rst rename to doc/config-reference/source/image-service/image_service_backends.rst diff --git a/doc/config-ref-rst/source/image-service/image_service_rpc.rst b/doc/config-reference/source/image-service/image_service_rpc.rst similarity index 100% rename from doc/config-ref-rst/source/image-service/image_service_rpc.rst rename to doc/config-reference/source/image-service/image_service_rpc.rst diff --git a/doc/config-ref-rst/source/image-service/sample-configuration-files.rst b/doc/config-reference/source/image-service/sample-configuration-files.rst similarity index 100% rename from doc/config-ref-rst/source/image-service/sample-configuration-files.rst rename to doc/config-reference/source/image-service/sample-configuration-files.rst diff --git a/doc/config-ref-rst/source/index.rst b/doc/config-reference/source/index.rst similarity index 100% rename from doc/config-ref-rst/source/index.rst rename to doc/config-reference/source/index.rst diff --git a/doc/config-ref-rst/source/networking.rst b/doc/config-reference/source/networking.rst similarity index 100% rename from doc/config-ref-rst/source/networking.rst rename to doc/config-reference/source/networking.rst diff --git a/doc/config-ref-rst/source/networking/networking_log.rst b/doc/config-reference/source/networking/networking_log.rst similarity index 100% rename from doc/config-ref-rst/source/networking/networking_log.rst rename to doc/config-reference/source/networking/networking_log.rst diff --git a/doc/config-ref-rst/source/networking/networking_options_reference.rst b/doc/config-reference/source/networking/networking_options_reference.rst similarity index 100% rename from doc/config-ref-rst/source/networking/networking_options_reference.rst rename to doc/config-reference/source/networking/networking_options_reference.rst diff --git a/doc/config-ref-rst/source/networking/sample-configuration-files.rst b/doc/config-reference/source/networking/sample-configuration-files.rst similarity index 100% rename from doc/config-ref-rst/source/networking/sample-configuration-files.rst rename to doc/config-reference/source/networking/sample-configuration-files.rst diff --git a/doc/config-ref-rst/source/object-storage.rst b/doc/config-reference/source/object-storage.rst similarity index 100% rename from doc/config-ref-rst/source/object-storage.rst rename to doc/config-reference/source/object-storage.rst diff --git a/doc/config-ref-rst/source/object-storage/about.rst b/doc/config-reference/source/object-storage/about.rst similarity index 100% rename from doc/config-ref-rst/source/object-storage/about.rst rename to doc/config-reference/source/object-storage/about.rst diff --git a/doc/config-ref-rst/source/object-storage/configure-s3.rst b/doc/config-reference/source/object-storage/configure-s3.rst similarity index 100% rename from doc/config-ref-rst/source/object-storage/configure-s3.rst rename to doc/config-reference/source/object-storage/configure-s3.rst diff --git a/doc/config-ref-rst/source/object-storage/configure.rst b/doc/config-reference/source/object-storage/configure.rst similarity index 100% rename from doc/config-ref-rst/source/object-storage/configure.rst rename to doc/config-reference/source/object-storage/configure.rst diff --git a/doc/config-ref-rst/source/object-storage/cors.rst b/doc/config-reference/source/object-storage/cors.rst similarity index 100% rename from doc/config-ref-rst/source/object-storage/cors.rst rename to doc/config-reference/source/object-storage/cors.rst diff --git a/doc/config-ref-rst/source/object-storage/features.rst b/doc/config-reference/source/object-storage/features.rst similarity index 100% rename from doc/config-ref-rst/source/object-storage/features.rst rename to doc/config-reference/source/object-storage/features.rst diff --git a/doc/config-ref-rst/source/object-storage/general-service-conf.rst b/doc/config-reference/source/object-storage/general-service-conf.rst similarity index 100% rename from doc/config-ref-rst/source/object-storage/general-service-conf.rst rename to doc/config-reference/source/object-storage/general-service-conf.rst diff --git a/doc/config-ref-rst/source/object-storage/listendpoints.rst b/doc/config-reference/source/object-storage/listendpoints.rst similarity index 100% rename from doc/config-ref-rst/source/object-storage/listendpoints.rst rename to doc/config-reference/source/object-storage/listendpoints.rst diff --git a/doc/config-ref-rst/source/orchestration.rst b/doc/config-reference/source/orchestration.rst similarity index 100% rename from doc/config-ref-rst/source/orchestration.rst rename to doc/config-reference/source/orchestration.rst diff --git a/doc/config-ref-rst/source/orchestration/orchestration_api.rst b/doc/config-reference/source/orchestration/orchestration_api.rst similarity index 100% rename from doc/config-ref-rst/source/orchestration/orchestration_api.rst rename to doc/config-reference/source/orchestration/orchestration_api.rst diff --git a/doc/config-ref-rst/source/orchestration/orchestration_clients.rst b/doc/config-reference/source/orchestration/orchestration_clients.rst similarity index 100% rename from doc/config-ref-rst/source/orchestration/orchestration_clients.rst rename to doc/config-reference/source/orchestration/orchestration_clients.rst diff --git a/doc/config-ref-rst/source/orchestration/orchestration_rpc.rst b/doc/config-reference/source/orchestration/orchestration_rpc.rst similarity index 100% rename from doc/config-ref-rst/source/orchestration/orchestration_rpc.rst rename to doc/config-reference/source/orchestration/orchestration_rpc.rst diff --git a/doc/config-ref-rst/source/policy-json-file.rst b/doc/config-reference/source/policy-json-file.rst similarity index 100% rename from doc/config-ref-rst/source/policy-json-file.rst rename to doc/config-reference/source/policy-json-file.rst diff --git a/doc/config-ref-rst/source/samples/neutron/dhcp_agent.ini.sample b/doc/config-reference/source/samples/neutron/dhcp_agent.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/dhcp_agent.ini.sample rename to doc/config-reference/source/samples/neutron/dhcp_agent.ini.sample diff --git a/doc/config-ref-rst/source/samples/neutron/l3_agent.ini.sample b/doc/config-reference/source/samples/neutron/l3_agent.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/l3_agent.ini.sample rename to doc/config-reference/source/samples/neutron/l3_agent.ini.sample diff --git a/doc/config-ref-rst/source/samples/neutron/linuxbridge_agent.ini.sample b/doc/config-reference/source/samples/neutron/linuxbridge_agent.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/linuxbridge_agent.ini.sample rename to doc/config-reference/source/samples/neutron/linuxbridge_agent.ini.sample diff --git a/doc/config-ref-rst/source/samples/neutron/metadata_agent.ini.sample b/doc/config-reference/source/samples/neutron/metadata_agent.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/metadata_agent.ini.sample rename to doc/config-reference/source/samples/neutron/metadata_agent.ini.sample diff --git a/doc/config-ref-rst/source/samples/neutron/metering_agent.ini.sample b/doc/config-reference/source/samples/neutron/metering_agent.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/metering_agent.ini.sample rename to doc/config-reference/source/samples/neutron/metering_agent.ini.sample diff --git a/doc/config-ref-rst/source/samples/neutron/ml2_conf.ini.sample b/doc/config-reference/source/samples/neutron/ml2_conf.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/ml2_conf.ini.sample rename to doc/config-reference/source/samples/neutron/ml2_conf.ini.sample diff --git a/doc/config-ref-rst/source/samples/neutron/ml2_conf_sriov.ini.sample b/doc/config-reference/source/samples/neutron/ml2_conf_sriov.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/ml2_conf_sriov.ini.sample rename to doc/config-reference/source/samples/neutron/ml2_conf_sriov.ini.sample diff --git a/doc/config-ref-rst/source/samples/neutron/neutron.conf.sample b/doc/config-reference/source/samples/neutron/neutron.conf.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/neutron.conf.sample rename to doc/config-reference/source/samples/neutron/neutron.conf.sample diff --git a/doc/config-ref-rst/source/samples/neutron/openvswitch_agent.ini.sample b/doc/config-reference/source/samples/neutron/openvswitch_agent.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/openvswitch_agent.ini.sample rename to doc/config-reference/source/samples/neutron/openvswitch_agent.ini.sample diff --git a/doc/config-ref-rst/source/samples/neutron/sriov_agent.ini.sample b/doc/config-reference/source/samples/neutron/sriov_agent.ini.sample similarity index 100% rename from doc/config-ref-rst/source/samples/neutron/sriov_agent.ini.sample rename to doc/config-reference/source/samples/neutron/sriov_agent.ini.sample diff --git a/doc/config-ref-rst/source/shared-file-systems.rst b/doc/config-reference/source/shared-file-systems.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems.rst rename to doc/config-reference/source/shared-file-systems.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers.rst b/doc/config-reference/source/shared-file-systems/drivers.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers.rst rename to doc/config-reference/source/shared-file-systems/drivers.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/emc-isilon-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/emc-isilon-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/emc-isilon-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/emc-isilon-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/emc-vnx-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/emc-vnx-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/emc-vnx-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/emc-vnx-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/generic-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/generic-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/generic-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/generic-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/glusterfs-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/glusterfs-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/glusterfs-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/glusterfs-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/glusterfs-native-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/glusterfs-native-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/glusterfs-native-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/glusterfs-native-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/hdfs-native-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/hdfs-native-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/hdfs-native-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/hdfs-native-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/hitachi-hnas-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/hitachi-hnas-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/hitachi-hnas-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/hitachi-hnas-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/hpe-3par-share-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/hpe-3par-share-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/hpe-3par-share-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/hpe-3par-share-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/huawei-nas-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/huawei-nas-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/huawei-nas-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/huawei-nas-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/ibm-gpfs-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/ibm-gpfs-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/ibm-gpfs-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/ibm-gpfs-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/netapp-cluster-mode-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/netapp-cluster-mode-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/netapp-cluster-mode-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/netapp-cluster-mode-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/drivers/quobyte-driver.rst b/doc/config-reference/source/shared-file-systems/drivers/quobyte-driver.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/drivers/quobyte-driver.rst rename to doc/config-reference/source/shared-file-systems/drivers/quobyte-driver.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/log-files.rst b/doc/config-reference/source/shared-file-systems/log-files.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/log-files.rst rename to doc/config-reference/source/shared-file-systems/log-files.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/misc.rst b/doc/config-reference/source/shared-file-systems/misc.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/misc.rst rename to doc/config-reference/source/shared-file-systems/misc.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/overview.rst b/doc/config-reference/source/shared-file-systems/overview.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/overview.rst rename to doc/config-reference/source/shared-file-systems/overview.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/rpc.rst b/doc/config-reference/source/shared-file-systems/rpc.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/rpc.rst rename to doc/config-reference/source/shared-file-systems/rpc.rst diff --git a/doc/config-ref-rst/source/shared-file-systems/sample-configuration-files.rst b/doc/config-reference/source/shared-file-systems/sample-configuration-files.rst similarity index 100% rename from doc/config-ref-rst/source/shared-file-systems/sample-configuration-files.rst rename to doc/config-reference/source/shared-file-systems/sample-configuration-files.rst diff --git a/doc/config-ref-rst/source/tables/aodh-amqp.rst b/doc/config-reference/source/tables/aodh-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-amqp.rst rename to doc/config-reference/source/tables/aodh-amqp.rst diff --git a/doc/config-ref-rst/source/tables/aodh-api.rst b/doc/config-reference/source/tables/aodh-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-api.rst rename to doc/config-reference/source/tables/aodh-api.rst diff --git a/doc/config-ref-rst/source/tables/aodh-auth.rst b/doc/config-reference/source/tables/aodh-auth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-auth.rst rename to doc/config-reference/source/tables/aodh-auth.rst diff --git a/doc/config-ref-rst/source/tables/aodh-auth_token.rst b/doc/config-reference/source/tables/aodh-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-auth_token.rst rename to doc/config-reference/source/tables/aodh-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/aodh-common.rst b/doc/config-reference/source/tables/aodh-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-common.rst rename to doc/config-reference/source/tables/aodh-common.rst diff --git a/doc/config-ref-rst/source/tables/aodh-coordination.rst b/doc/config-reference/source/tables/aodh-coordination.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-coordination.rst rename to doc/config-reference/source/tables/aodh-coordination.rst diff --git a/doc/config-ref-rst/source/tables/aodh-cors.rst b/doc/config-reference/source/tables/aodh-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-cors.rst rename to doc/config-reference/source/tables/aodh-cors.rst diff --git a/doc/config-ref-rst/source/tables/aodh-database.rst b/doc/config-reference/source/tables/aodh-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-database.rst rename to doc/config-reference/source/tables/aodh-database.rst diff --git a/doc/config-ref-rst/source/tables/aodh-logging.rst b/doc/config-reference/source/tables/aodh-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-logging.rst rename to doc/config-reference/source/tables/aodh-logging.rst diff --git a/doc/config-ref-rst/source/tables/aodh-policy.rst b/doc/config-reference/source/tables/aodh-policy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-policy.rst rename to doc/config-reference/source/tables/aodh-policy.rst diff --git a/doc/config-ref-rst/source/tables/aodh-qpid.rst b/doc/config-reference/source/tables/aodh-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-qpid.rst rename to doc/config-reference/source/tables/aodh-qpid.rst diff --git a/doc/config-ref-rst/source/tables/aodh-rabbitmq.rst b/doc/config-reference/source/tables/aodh-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-rabbitmq.rst rename to doc/config-reference/source/tables/aodh-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/aodh-redis.rst b/doc/config-reference/source/tables/aodh-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-redis.rst rename to doc/config-reference/source/tables/aodh-redis.rst diff --git a/doc/config-ref-rst/source/tables/aodh-rpc.rst b/doc/config-reference/source/tables/aodh-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-rpc.rst rename to doc/config-reference/source/tables/aodh-rpc.rst diff --git a/doc/config-ref-rst/source/tables/aodh-zeromq.rst b/doc/config-reference/source/tables/aodh-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/aodh-zeromq.rst rename to doc/config-reference/source/tables/aodh-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-amqp.rst b/doc/config-reference/source/tables/ceilometer-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-amqp.rst rename to doc/config-reference/source/tables/ceilometer-amqp.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-api.rst b/doc/config-reference/source/tables/ceilometer-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-api.rst rename to doc/config-reference/source/tables/ceilometer-api.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-auth.rst b/doc/config-reference/source/tables/ceilometer-auth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-auth.rst rename to doc/config-reference/source/tables/ceilometer-auth.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-auth_token.rst b/doc/config-reference/source/tables/ceilometer-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-auth_token.rst rename to doc/config-reference/source/tables/ceilometer-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-collector.rst b/doc/config-reference/source/tables/ceilometer-collector.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-collector.rst rename to doc/config-reference/source/tables/ceilometer-collector.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-common.rst b/doc/config-reference/source/tables/ceilometer-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-common.rst rename to doc/config-reference/source/tables/ceilometer-common.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-cors.rst b/doc/config-reference/source/tables/ceilometer-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-cors.rst rename to doc/config-reference/source/tables/ceilometer-cors.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-database.rst b/doc/config-reference/source/tables/ceilometer-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-database.rst rename to doc/config-reference/source/tables/ceilometer-database.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-debug.rst b/doc/config-reference/source/tables/ceilometer-debug.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-debug.rst rename to doc/config-reference/source/tables/ceilometer-debug.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-dispatcher_gnocchi.rst b/doc/config-reference/source/tables/ceilometer-dispatcher_gnocchi.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-dispatcher_gnocchi.rst rename to doc/config-reference/source/tables/ceilometer-dispatcher_gnocchi.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-dispatcher_http.rst b/doc/config-reference/source/tables/ceilometer-dispatcher_http.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-dispatcher_http.rst rename to doc/config-reference/source/tables/ceilometer-dispatcher_http.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-dispatchers.rst b/doc/config-reference/source/tables/ceilometer-dispatchers.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-dispatchers.rst rename to doc/config-reference/source/tables/ceilometer-dispatchers.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-events.rst b/doc/config-reference/source/tables/ceilometer-events.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-events.rst rename to doc/config-reference/source/tables/ceilometer-events.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-exchange.rst b/doc/config-reference/source/tables/ceilometer-exchange.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-exchange.rst rename to doc/config-reference/source/tables/ceilometer-exchange.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-glance.rst b/doc/config-reference/source/tables/ceilometer-glance.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-glance.rst rename to doc/config-reference/source/tables/ceilometer-glance.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-inspector.rst b/doc/config-reference/source/tables/ceilometer-inspector.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-inspector.rst rename to doc/config-reference/source/tables/ceilometer-inspector.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-ipmi.rst b/doc/config-reference/source/tables/ceilometer-ipmi.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-ipmi.rst rename to doc/config-reference/source/tables/ceilometer-ipmi.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-logging.rst b/doc/config-reference/source/tables/ceilometer-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-logging.rst rename to doc/config-reference/source/tables/ceilometer-logging.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-magnetodb.rst b/doc/config-reference/source/tables/ceilometer-magnetodb.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-magnetodb.rst rename to doc/config-reference/source/tables/ceilometer-magnetodb.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-notification.rst b/doc/config-reference/source/tables/ceilometer-notification.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-notification.rst rename to doc/config-reference/source/tables/ceilometer-notification.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-policy.rst b/doc/config-reference/source/tables/ceilometer-policy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-policy.rst rename to doc/config-reference/source/tables/ceilometer-policy.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-qpid.rst b/doc/config-reference/source/tables/ceilometer-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-qpid.rst rename to doc/config-reference/source/tables/ceilometer-qpid.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-rabbitmq.rst b/doc/config-reference/source/tables/ceilometer-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-rabbitmq.rst rename to doc/config-reference/source/tables/ceilometer-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-redis.rst b/doc/config-reference/source/tables/ceilometer-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-redis.rst rename to doc/config-reference/source/tables/ceilometer-redis.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-rgw.rst b/doc/config-reference/source/tables/ceilometer-rgw.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-rgw.rst rename to doc/config-reference/source/tables/ceilometer-rgw.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-rpc.rst b/doc/config-reference/source/tables/ceilometer-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-rpc.rst rename to doc/config-reference/source/tables/ceilometer-rpc.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-service_types.rst b/doc/config-reference/source/tables/ceilometer-service_types.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-service_types.rst rename to doc/config-reference/source/tables/ceilometer-service_types.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-storage.rst b/doc/config-reference/source/tables/ceilometer-storage.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-storage.rst rename to doc/config-reference/source/tables/ceilometer-storage.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-swift.rst b/doc/config-reference/source/tables/ceilometer-swift.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-swift.rst rename to doc/config-reference/source/tables/ceilometer-swift.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-tripleo.rst b/doc/config-reference/source/tables/ceilometer-tripleo.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-tripleo.rst rename to doc/config-reference/source/tables/ceilometer-tripleo.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-vmware.rst b/doc/config-reference/source/tables/ceilometer-vmware.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-vmware.rst rename to doc/config-reference/source/tables/ceilometer-vmware.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-xenapi.rst b/doc/config-reference/source/tables/ceilometer-xenapi.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-xenapi.rst rename to doc/config-reference/source/tables/ceilometer-xenapi.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-zaqar.rst b/doc/config-reference/source/tables/ceilometer-zaqar.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-zaqar.rst rename to doc/config-reference/source/tables/ceilometer-zaqar.rst diff --git a/doc/config-ref-rst/source/tables/ceilometer-zeromq.rst b/doc/config-reference/source/tables/ceilometer-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ceilometer-zeromq.rst rename to doc/config-reference/source/tables/ceilometer-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/cinder-amqp.rst b/doc/config-reference/source/tables/cinder-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-amqp.rst rename to doc/config-reference/source/tables/cinder-amqp.rst diff --git a/doc/config-ref-rst/source/tables/cinder-api.rst b/doc/config-reference/source/tables/cinder-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-api.rst rename to doc/config-reference/source/tables/cinder-api.rst diff --git a/doc/config-ref-rst/source/tables/cinder-auth.rst b/doc/config-reference/source/tables/cinder-auth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-auth.rst rename to doc/config-reference/source/tables/cinder-auth.rst diff --git a/doc/config-ref-rst/source/tables/cinder-auth_token.rst b/doc/config-reference/source/tables/cinder-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-auth_token.rst rename to doc/config-reference/source/tables/cinder-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/cinder-backups.rst b/doc/config-reference/source/tables/cinder-backups.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-backups.rst rename to doc/config-reference/source/tables/cinder-backups.rst diff --git a/doc/config-ref-rst/source/tables/cinder-backups_ceph.rst b/doc/config-reference/source/tables/cinder-backups_ceph.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-backups_ceph.rst rename to doc/config-reference/source/tables/cinder-backups_ceph.rst diff --git a/doc/config-ref-rst/source/tables/cinder-backups_nfs.rst b/doc/config-reference/source/tables/cinder-backups_nfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-backups_nfs.rst rename to doc/config-reference/source/tables/cinder-backups_nfs.rst diff --git a/doc/config-ref-rst/source/tables/cinder-backups_swift.rst b/doc/config-reference/source/tables/cinder-backups_swift.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-backups_swift.rst rename to doc/config-reference/source/tables/cinder-backups_swift.rst diff --git a/doc/config-ref-rst/source/tables/cinder-backups_tsm.rst b/doc/config-reference/source/tables/cinder-backups_tsm.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-backups_tsm.rst rename to doc/config-reference/source/tables/cinder-backups_tsm.rst diff --git a/doc/config-ref-rst/source/tables/cinder-block-device.rst b/doc/config-reference/source/tables/cinder-block-device.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-block-device.rst rename to doc/config-reference/source/tables/cinder-block-device.rst diff --git a/doc/config-ref-rst/source/tables/cinder-blockbridge.rst b/doc/config-reference/source/tables/cinder-blockbridge.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-blockbridge.rst rename to doc/config-reference/source/tables/cinder-blockbridge.rst diff --git a/doc/config-ref-rst/source/tables/cinder-ca.rst b/doc/config-reference/source/tables/cinder-ca.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-ca.rst rename to doc/config-reference/source/tables/cinder-ca.rst diff --git a/doc/config-ref-rst/source/tables/cinder-cloudbyte.rst b/doc/config-reference/source/tables/cinder-cloudbyte.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-cloudbyte.rst rename to doc/config-reference/source/tables/cinder-cloudbyte.rst diff --git a/doc/config-ref-rst/source/tables/cinder-common.rst b/doc/config-reference/source/tables/cinder-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-common.rst rename to doc/config-reference/source/tables/cinder-common.rst diff --git a/doc/config-ref-rst/source/tables/cinder-compute.rst b/doc/config-reference/source/tables/cinder-compute.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-compute.rst rename to doc/config-reference/source/tables/cinder-compute.rst diff --git a/doc/config-ref-rst/source/tables/cinder-cors.rst b/doc/config-reference/source/tables/cinder-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-cors.rst rename to doc/config-reference/source/tables/cinder-cors.rst diff --git a/doc/config-ref-rst/source/tables/cinder-database.rst b/doc/config-reference/source/tables/cinder-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-database.rst rename to doc/config-reference/source/tables/cinder-database.rst diff --git a/doc/config-ref-rst/source/tables/cinder-datera.rst b/doc/config-reference/source/tables/cinder-datera.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-datera.rst rename to doc/config-reference/source/tables/cinder-datera.rst diff --git a/doc/config-ref-rst/source/tables/cinder-debug.rst b/doc/config-reference/source/tables/cinder-debug.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-debug.rst rename to doc/config-reference/source/tables/cinder-debug.rst diff --git a/doc/config-ref-rst/source/tables/cinder-dellsc.rst b/doc/config-reference/source/tables/cinder-dellsc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-dellsc.rst rename to doc/config-reference/source/tables/cinder-dellsc.rst diff --git a/doc/config-ref-rst/source/tables/cinder-dothill.rst b/doc/config-reference/source/tables/cinder-dothill.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-dothill.rst rename to doc/config-reference/source/tables/cinder-dothill.rst diff --git a/doc/config-ref-rst/source/tables/cinder-drbd.rst b/doc/config-reference/source/tables/cinder-drbd.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-drbd.rst rename to doc/config-reference/source/tables/cinder-drbd.rst diff --git a/doc/config-ref-rst/source/tables/cinder-emc.rst b/doc/config-reference/source/tables/cinder-emc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-emc.rst rename to doc/config-reference/source/tables/cinder-emc.rst diff --git a/doc/config-ref-rst/source/tables/cinder-emc_sio.rst b/doc/config-reference/source/tables/cinder-emc_sio.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-emc_sio.rst rename to doc/config-reference/source/tables/cinder-emc_sio.rst diff --git a/doc/config-ref-rst/source/tables/cinder-eqlx.rst b/doc/config-reference/source/tables/cinder-eqlx.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-eqlx.rst rename to doc/config-reference/source/tables/cinder-eqlx.rst diff --git a/doc/config-ref-rst/source/tables/cinder-flashsystem.rst b/doc/config-reference/source/tables/cinder-flashsystem.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-flashsystem.rst rename to doc/config-reference/source/tables/cinder-flashsystem.rst diff --git a/doc/config-ref-rst/source/tables/cinder-hds-hnas.rst b/doc/config-reference/source/tables/cinder-hds-hnas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-hds-hnas.rst rename to doc/config-reference/source/tables/cinder-hds-hnas.rst diff --git a/doc/config-ref-rst/source/tables/cinder-hgst.rst b/doc/config-reference/source/tables/cinder-hgst.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-hgst.rst rename to doc/config-reference/source/tables/cinder-hgst.rst diff --git a/doc/config-ref-rst/source/tables/cinder-hitachi-hbsd.rst b/doc/config-reference/source/tables/cinder-hitachi-hbsd.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-hitachi-hbsd.rst rename to doc/config-reference/source/tables/cinder-hitachi-hbsd.rst diff --git a/doc/config-ref-rst/source/tables/cinder-hpe3par.rst b/doc/config-reference/source/tables/cinder-hpe3par.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-hpe3par.rst rename to doc/config-reference/source/tables/cinder-hpe3par.rst diff --git a/doc/config-ref-rst/source/tables/cinder-hpelefthand.rst b/doc/config-reference/source/tables/cinder-hpelefthand.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-hpelefthand.rst rename to doc/config-reference/source/tables/cinder-hpelefthand.rst diff --git a/doc/config-ref-rst/source/tables/cinder-hpmsa.rst b/doc/config-reference/source/tables/cinder-hpmsa.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-hpmsa.rst rename to doc/config-reference/source/tables/cinder-hpmsa.rst diff --git a/doc/config-ref-rst/source/tables/cinder-hpxp.rst b/doc/config-reference/source/tables/cinder-hpxp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-hpxp.rst rename to doc/config-reference/source/tables/cinder-hpxp.rst diff --git a/doc/config-ref-rst/source/tables/cinder-huawei.rst b/doc/config-reference/source/tables/cinder-huawei.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-huawei.rst rename to doc/config-reference/source/tables/cinder-huawei.rst diff --git a/doc/config-ref-rst/source/tables/cinder-ibmnas.rst b/doc/config-reference/source/tables/cinder-ibmnas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-ibmnas.rst rename to doc/config-reference/source/tables/cinder-ibmnas.rst diff --git a/doc/config-ref-rst/source/tables/cinder-images.rst b/doc/config-reference/source/tables/cinder-images.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-images.rst rename to doc/config-reference/source/tables/cinder-images.rst diff --git a/doc/config-ref-rst/source/tables/cinder-infortrend.rst b/doc/config-reference/source/tables/cinder-infortrend.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-infortrend.rst rename to doc/config-reference/source/tables/cinder-infortrend.rst diff --git a/doc/config-ref-rst/source/tables/cinder-keymgr.rst b/doc/config-reference/source/tables/cinder-keymgr.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-keymgr.rst rename to doc/config-reference/source/tables/cinder-keymgr.rst diff --git a/doc/config-ref-rst/source/tables/cinder-lenovo.rst b/doc/config-reference/source/tables/cinder-lenovo.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-lenovo.rst rename to doc/config-reference/source/tables/cinder-lenovo.rst diff --git a/doc/config-ref-rst/source/tables/cinder-logging.rst b/doc/config-reference/source/tables/cinder-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-logging.rst rename to doc/config-reference/source/tables/cinder-logging.rst diff --git a/doc/config-ref-rst/source/tables/cinder-lvm.rst b/doc/config-reference/source/tables/cinder-lvm.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-lvm.rst rename to doc/config-reference/source/tables/cinder-lvm.rst diff --git a/doc/config-ref-rst/source/tables/cinder-netapp_7mode_iscsi.rst b/doc/config-reference/source/tables/cinder-netapp_7mode_iscsi.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-netapp_7mode_iscsi.rst rename to doc/config-reference/source/tables/cinder-netapp_7mode_iscsi.rst diff --git a/doc/config-ref-rst/source/tables/cinder-netapp_7mode_nfs.rst b/doc/config-reference/source/tables/cinder-netapp_7mode_nfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-netapp_7mode_nfs.rst rename to doc/config-reference/source/tables/cinder-netapp_7mode_nfs.rst diff --git a/doc/config-ref-rst/source/tables/cinder-netapp_cdot_iscsi.rst b/doc/config-reference/source/tables/cinder-netapp_cdot_iscsi.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-netapp_cdot_iscsi.rst rename to doc/config-reference/source/tables/cinder-netapp_cdot_iscsi.rst diff --git a/doc/config-ref-rst/source/tables/cinder-netapp_cdot_nfs.rst b/doc/config-reference/source/tables/cinder-netapp_cdot_nfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-netapp_cdot_nfs.rst rename to doc/config-reference/source/tables/cinder-netapp_cdot_nfs.rst diff --git a/doc/config-ref-rst/source/tables/cinder-netapp_eseries_iscsi.rst b/doc/config-reference/source/tables/cinder-netapp_eseries_iscsi.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-netapp_eseries_iscsi.rst rename to doc/config-reference/source/tables/cinder-netapp_eseries_iscsi.rst diff --git a/doc/config-ref-rst/source/tables/cinder-nimble.rst b/doc/config-reference/source/tables/cinder-nimble.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-nimble.rst rename to doc/config-reference/source/tables/cinder-nimble.rst diff --git a/doc/config-ref-rst/source/tables/cinder-profiler.rst b/doc/config-reference/source/tables/cinder-profiler.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-profiler.rst rename to doc/config-reference/source/tables/cinder-profiler.rst diff --git a/doc/config-ref-rst/source/tables/cinder-prophetstor_dpl.rst b/doc/config-reference/source/tables/cinder-prophetstor_dpl.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-prophetstor_dpl.rst rename to doc/config-reference/source/tables/cinder-prophetstor_dpl.rst diff --git a/doc/config-ref-rst/source/tables/cinder-pure.rst b/doc/config-reference/source/tables/cinder-pure.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-pure.rst rename to doc/config-reference/source/tables/cinder-pure.rst diff --git a/doc/config-ref-rst/source/tables/cinder-qpid.rst b/doc/config-reference/source/tables/cinder-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-qpid.rst rename to doc/config-reference/source/tables/cinder-qpid.rst diff --git a/doc/config-ref-rst/source/tables/cinder-quobyte.rst b/doc/config-reference/source/tables/cinder-quobyte.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-quobyte.rst rename to doc/config-reference/source/tables/cinder-quobyte.rst diff --git a/doc/config-ref-rst/source/tables/cinder-quota.rst b/doc/config-reference/source/tables/cinder-quota.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-quota.rst rename to doc/config-reference/source/tables/cinder-quota.rst diff --git a/doc/config-ref-rst/source/tables/cinder-rabbitmq.rst b/doc/config-reference/source/tables/cinder-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-rabbitmq.rst rename to doc/config-reference/source/tables/cinder-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/cinder-redis.rst b/doc/config-reference/source/tables/cinder-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-redis.rst rename to doc/config-reference/source/tables/cinder-redis.rst diff --git a/doc/config-ref-rst/source/tables/cinder-rpc.rst b/doc/config-reference/source/tables/cinder-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-rpc.rst rename to doc/config-reference/source/tables/cinder-rpc.rst diff --git a/doc/config-ref-rst/source/tables/cinder-san.rst b/doc/config-reference/source/tables/cinder-san.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-san.rst rename to doc/config-reference/source/tables/cinder-san.rst diff --git a/doc/config-ref-rst/source/tables/cinder-scality.rst b/doc/config-reference/source/tables/cinder-scality.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-scality.rst rename to doc/config-reference/source/tables/cinder-scality.rst diff --git a/doc/config-ref-rst/source/tables/cinder-scheduler.rst b/doc/config-reference/source/tables/cinder-scheduler.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-scheduler.rst rename to doc/config-reference/source/tables/cinder-scheduler.rst diff --git a/doc/config-ref-rst/source/tables/cinder-scst.rst b/doc/config-reference/source/tables/cinder-scst.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-scst.rst rename to doc/config-reference/source/tables/cinder-scst.rst diff --git a/doc/config-ref-rst/source/tables/cinder-sheepdog.rst b/doc/config-reference/source/tables/cinder-sheepdog.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-sheepdog.rst rename to doc/config-reference/source/tables/cinder-sheepdog.rst diff --git a/doc/config-ref-rst/source/tables/cinder-smbfs.rst b/doc/config-reference/source/tables/cinder-smbfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-smbfs.rst rename to doc/config-reference/source/tables/cinder-smbfs.rst diff --git a/doc/config-ref-rst/source/tables/cinder-solidfire.rst b/doc/config-reference/source/tables/cinder-solidfire.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-solidfire.rst rename to doc/config-reference/source/tables/cinder-solidfire.rst diff --git a/doc/config-ref-rst/source/tables/cinder-srb.rst b/doc/config-reference/source/tables/cinder-srb.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-srb.rst rename to doc/config-reference/source/tables/cinder-srb.rst diff --git a/doc/config-ref-rst/source/tables/cinder-storage.rst b/doc/config-reference/source/tables/cinder-storage.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-storage.rst rename to doc/config-reference/source/tables/cinder-storage.rst diff --git a/doc/config-ref-rst/source/tables/cinder-storage_ceph.rst b/doc/config-reference/source/tables/cinder-storage_ceph.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-storage_ceph.rst rename to doc/config-reference/source/tables/cinder-storage_ceph.rst diff --git a/doc/config-ref-rst/source/tables/cinder-storage_glusterfs.rst b/doc/config-reference/source/tables/cinder-storage_glusterfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-storage_glusterfs.rst rename to doc/config-reference/source/tables/cinder-storage_glusterfs.rst diff --git a/doc/config-ref-rst/source/tables/cinder-storage_gpfs.rst b/doc/config-reference/source/tables/cinder-storage_gpfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-storage_gpfs.rst rename to doc/config-reference/source/tables/cinder-storage_gpfs.rst diff --git a/doc/config-ref-rst/source/tables/cinder-storage_nfs.rst b/doc/config-reference/source/tables/cinder-storage_nfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-storage_nfs.rst rename to doc/config-reference/source/tables/cinder-storage_nfs.rst diff --git a/doc/config-ref-rst/source/tables/cinder-storwize.rst b/doc/config-reference/source/tables/cinder-storwize.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-storwize.rst rename to doc/config-reference/source/tables/cinder-storwize.rst diff --git a/doc/config-ref-rst/source/tables/cinder-swift.rst b/doc/config-reference/source/tables/cinder-swift.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-swift.rst rename to doc/config-reference/source/tables/cinder-swift.rst diff --git a/doc/config-ref-rst/source/tables/cinder-tintri.rst b/doc/config-reference/source/tables/cinder-tintri.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-tintri.rst rename to doc/config-reference/source/tables/cinder-tintri.rst diff --git a/doc/config-ref-rst/source/tables/cinder-violin.rst b/doc/config-reference/source/tables/cinder-violin.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-violin.rst rename to doc/config-reference/source/tables/cinder-violin.rst diff --git a/doc/config-ref-rst/source/tables/cinder-vmware.rst b/doc/config-reference/source/tables/cinder-vmware.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-vmware.rst rename to doc/config-reference/source/tables/cinder-vmware.rst diff --git a/doc/config-ref-rst/source/tables/cinder-vzstorage.rst b/doc/config-reference/source/tables/cinder-vzstorage.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-vzstorage.rst rename to doc/config-reference/source/tables/cinder-vzstorage.rst diff --git a/doc/config-ref-rst/source/tables/cinder-windows.rst b/doc/config-reference/source/tables/cinder-windows.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-windows.rst rename to doc/config-reference/source/tables/cinder-windows.rst diff --git a/doc/config-ref-rst/source/tables/cinder-xio.rst b/doc/config-reference/source/tables/cinder-xio.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-xio.rst rename to doc/config-reference/source/tables/cinder-xio.rst diff --git a/doc/config-ref-rst/source/tables/cinder-xiv.rst b/doc/config-reference/source/tables/cinder-xiv.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-xiv.rst rename to doc/config-reference/source/tables/cinder-xiv.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zeromq.rst b/doc/config-reference/source/tables/cinder-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zeromq.rst rename to doc/config-reference/source/tables/cinder-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zfssa-iscsi.rst b/doc/config-reference/source/tables/cinder-zfssa-iscsi.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zfssa-iscsi.rst rename to doc/config-reference/source/tables/cinder-zfssa-iscsi.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zfssa-nfs.rst b/doc/config-reference/source/tables/cinder-zfssa-nfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zfssa-nfs.rst rename to doc/config-reference/source/tables/cinder-zfssa-nfs.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zones.rst b/doc/config-reference/source/tables/cinder-zones.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zones.rst rename to doc/config-reference/source/tables/cinder-zones.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zoning.rst b/doc/config-reference/source/tables/cinder-zoning.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zoning.rst rename to doc/config-reference/source/tables/cinder-zoning.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zoning_fabric.rst b/doc/config-reference/source/tables/cinder-zoning_fabric.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zoning_fabric.rst rename to doc/config-reference/source/tables/cinder-zoning_fabric.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zoning_fabric_cisco.rst b/doc/config-reference/source/tables/cinder-zoning_fabric_cisco.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zoning_fabric_cisco.rst rename to doc/config-reference/source/tables/cinder-zoning_fabric_cisco.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zoning_manager.rst b/doc/config-reference/source/tables/cinder-zoning_manager.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zoning_manager.rst rename to doc/config-reference/source/tables/cinder-zoning_manager.rst diff --git a/doc/config-ref-rst/source/tables/cinder-zoning_manager_cisco.rst b/doc/config-reference/source/tables/cinder-zoning_manager_cisco.rst similarity index 100% rename from doc/config-ref-rst/source/tables/cinder-zoning_manager_cisco.rst rename to doc/config-reference/source/tables/cinder-zoning_manager_cisco.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/README.txt b/doc/config-reference/source/tables/conf-changes/README.txt similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/README.txt rename to doc/config-reference/source/tables/conf-changes/README.txt diff --git a/doc/config-ref-rst/source/tables/conf-changes/ceilometer.rst b/doc/config-reference/source/tables/conf-changes/ceilometer.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/ceilometer.rst rename to doc/config-reference/source/tables/conf-changes/ceilometer.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/cinder.rst b/doc/config-reference/source/tables/conf-changes/cinder.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/cinder.rst rename to doc/config-reference/source/tables/conf-changes/cinder.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/glance.rst b/doc/config-reference/source/tables/conf-changes/glance.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/glance.rst rename to doc/config-reference/source/tables/conf-changes/glance.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/heat.rst b/doc/config-reference/source/tables/conf-changes/heat.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/heat.rst rename to doc/config-reference/source/tables/conf-changes/heat.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/ironic.rst b/doc/config-reference/source/tables/conf-changes/ironic.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/ironic.rst rename to doc/config-reference/source/tables/conf-changes/ironic.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/keystone.rst b/doc/config-reference/source/tables/conf-changes/keystone.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/keystone.rst rename to doc/config-reference/source/tables/conf-changes/keystone.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/manila.rst b/doc/config-reference/source/tables/conf-changes/manila.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/manila.rst rename to doc/config-reference/source/tables/conf-changes/manila.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/neutron.rst b/doc/config-reference/source/tables/conf-changes/neutron.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/neutron.rst rename to doc/config-reference/source/tables/conf-changes/neutron.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/nova.rst b/doc/config-reference/source/tables/conf-changes/nova.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/nova.rst rename to doc/config-reference/source/tables/conf-changes/nova.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/sahara.rst b/doc/config-reference/source/tables/conf-changes/sahara.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/sahara.rst rename to doc/config-reference/source/tables/conf-changes/sahara.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/swift.rst b/doc/config-reference/source/tables/conf-changes/swift.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/swift.rst rename to doc/config-reference/source/tables/conf-changes/swift.rst diff --git a/doc/config-ref-rst/source/tables/conf-changes/trove.rst b/doc/config-reference/source/tables/conf-changes/trove.rst similarity index 100% rename from doc/config-ref-rst/source/tables/conf-changes/trove.rst rename to doc/config-reference/source/tables/conf-changes/trove.rst diff --git a/doc/config-ref-rst/source/tables/glance-amqp.rst b/doc/config-reference/source/tables/glance-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-amqp.rst rename to doc/config-reference/source/tables/glance-amqp.rst diff --git a/doc/config-ref-rst/source/tables/glance-api.rst b/doc/config-reference/source/tables/glance-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-api.rst rename to doc/config-reference/source/tables/glance-api.rst diff --git a/doc/config-ref-rst/source/tables/glance-auth_token.rst b/doc/config-reference/source/tables/glance-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-auth_token.rst rename to doc/config-reference/source/tables/glance-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/glance-ca.rst b/doc/config-reference/source/tables/glance-ca.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-ca.rst rename to doc/config-reference/source/tables/glance-ca.rst diff --git a/doc/config-ref-rst/source/tables/glance-cinder.rst b/doc/config-reference/source/tables/glance-cinder.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-cinder.rst rename to doc/config-reference/source/tables/glance-cinder.rst diff --git a/doc/config-ref-rst/source/tables/glance-common.rst b/doc/config-reference/source/tables/glance-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-common.rst rename to doc/config-reference/source/tables/glance-common.rst diff --git a/doc/config-ref-rst/source/tables/glance-cors.rst b/doc/config-reference/source/tables/glance-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-cors.rst rename to doc/config-reference/source/tables/glance-cors.rst diff --git a/doc/config-ref-rst/source/tables/glance-database.rst b/doc/config-reference/source/tables/glance-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-database.rst rename to doc/config-reference/source/tables/glance-database.rst diff --git a/doc/config-ref-rst/source/tables/glance-filesystem.rst b/doc/config-reference/source/tables/glance-filesystem.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-filesystem.rst rename to doc/config-reference/source/tables/glance-filesystem.rst diff --git a/doc/config-ref-rst/source/tables/glance-gridfs.rst b/doc/config-reference/source/tables/glance-gridfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-gridfs.rst rename to doc/config-reference/source/tables/glance-gridfs.rst diff --git a/doc/config-ref-rst/source/tables/glance-imagecache.rst b/doc/config-reference/source/tables/glance-imagecache.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-imagecache.rst rename to doc/config-reference/source/tables/glance-imagecache.rst diff --git a/doc/config-ref-rst/source/tables/glance-logging.rst b/doc/config-reference/source/tables/glance-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-logging.rst rename to doc/config-reference/source/tables/glance-logging.rst diff --git a/doc/config-ref-rst/source/tables/glance-policy.rst b/doc/config-reference/source/tables/glance-policy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-policy.rst rename to doc/config-reference/source/tables/glance-policy.rst diff --git a/doc/config-ref-rst/source/tables/glance-profiler.rst b/doc/config-reference/source/tables/glance-profiler.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-profiler.rst rename to doc/config-reference/source/tables/glance-profiler.rst diff --git a/doc/config-ref-rst/source/tables/glance-qpid.rst b/doc/config-reference/source/tables/glance-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-qpid.rst rename to doc/config-reference/source/tables/glance-qpid.rst diff --git a/doc/config-ref-rst/source/tables/glance-rabbitmq.rst b/doc/config-reference/source/tables/glance-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-rabbitmq.rst rename to doc/config-reference/source/tables/glance-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/glance-rbd.rst b/doc/config-reference/source/tables/glance-rbd.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-rbd.rst rename to doc/config-reference/source/tables/glance-rbd.rst diff --git a/doc/config-ref-rst/source/tables/glance-redis.rst b/doc/config-reference/source/tables/glance-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-redis.rst rename to doc/config-reference/source/tables/glance-redis.rst diff --git a/doc/config-ref-rst/source/tables/glance-registry.rst b/doc/config-reference/source/tables/glance-registry.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-registry.rst rename to doc/config-reference/source/tables/glance-registry.rst diff --git a/doc/config-ref-rst/source/tables/glance-replicator.rst b/doc/config-reference/source/tables/glance-replicator.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-replicator.rst rename to doc/config-reference/source/tables/glance-replicator.rst diff --git a/doc/config-ref-rst/source/tables/glance-rpc.rst b/doc/config-reference/source/tables/glance-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-rpc.rst rename to doc/config-reference/source/tables/glance-rpc.rst diff --git a/doc/config-ref-rst/source/tables/glance-s3.rst b/doc/config-reference/source/tables/glance-s3.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-s3.rst rename to doc/config-reference/source/tables/glance-s3.rst diff --git a/doc/config-ref-rst/source/tables/glance-scrubber.rst b/doc/config-reference/source/tables/glance-scrubber.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-scrubber.rst rename to doc/config-reference/source/tables/glance-scrubber.rst diff --git a/doc/config-ref-rst/source/tables/glance-sheepdog.rst b/doc/config-reference/source/tables/glance-sheepdog.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-sheepdog.rst rename to doc/config-reference/source/tables/glance-sheepdog.rst diff --git a/doc/config-ref-rst/source/tables/glance-swift.rst b/doc/config-reference/source/tables/glance-swift.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-swift.rst rename to doc/config-reference/source/tables/glance-swift.rst diff --git a/doc/config-ref-rst/source/tables/glance-taskflow.rst b/doc/config-reference/source/tables/glance-taskflow.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-taskflow.rst rename to doc/config-reference/source/tables/glance-taskflow.rst diff --git a/doc/config-ref-rst/source/tables/glance-testing.rst b/doc/config-reference/source/tables/glance-testing.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-testing.rst rename to doc/config-reference/source/tables/glance-testing.rst diff --git a/doc/config-ref-rst/source/tables/glance-vmware.rst b/doc/config-reference/source/tables/glance-vmware.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-vmware.rst rename to doc/config-reference/source/tables/glance-vmware.rst diff --git a/doc/config-ref-rst/source/tables/glance-zeromq.rst b/doc/config-reference/source/tables/glance-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/glance-zeromq.rst rename to doc/config-reference/source/tables/glance-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/heat-amqp.rst b/doc/config-reference/source/tables/heat-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-amqp.rst rename to doc/config-reference/source/tables/heat-amqp.rst diff --git a/doc/config-ref-rst/source/tables/heat-api.rst b/doc/config-reference/source/tables/heat-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-api.rst rename to doc/config-reference/source/tables/heat-api.rst diff --git a/doc/config-ref-rst/source/tables/heat-auth_token.rst b/doc/config-reference/source/tables/heat-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-auth_token.rst rename to doc/config-reference/source/tables/heat-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/heat-cfn_api.rst b/doc/config-reference/source/tables/heat-cfn_api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-cfn_api.rst rename to doc/config-reference/source/tables/heat-cfn_api.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients.rst b/doc/config-reference/source/tables/heat-clients.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients.rst rename to doc/config-reference/source/tables/heat-clients.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_backends.rst b/doc/config-reference/source/tables/heat-clients_backends.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_backends.rst rename to doc/config-reference/source/tables/heat-clients_backends.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_ceilometer.rst b/doc/config-reference/source/tables/heat-clients_ceilometer.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_ceilometer.rst rename to doc/config-reference/source/tables/heat-clients_ceilometer.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_cinder.rst b/doc/config-reference/source/tables/heat-clients_cinder.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_cinder.rst rename to doc/config-reference/source/tables/heat-clients_cinder.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_glance.rst b/doc/config-reference/source/tables/heat-clients_glance.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_glance.rst rename to doc/config-reference/source/tables/heat-clients_glance.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_heat.rst b/doc/config-reference/source/tables/heat-clients_heat.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_heat.rst rename to doc/config-reference/source/tables/heat-clients_heat.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_keystone.rst b/doc/config-reference/source/tables/heat-clients_keystone.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_keystone.rst rename to doc/config-reference/source/tables/heat-clients_keystone.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_neutron.rst b/doc/config-reference/source/tables/heat-clients_neutron.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_neutron.rst rename to doc/config-reference/source/tables/heat-clients_neutron.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_nova.rst b/doc/config-reference/source/tables/heat-clients_nova.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_nova.rst rename to doc/config-reference/source/tables/heat-clients_nova.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_sahara.rst b/doc/config-reference/source/tables/heat-clients_sahara.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_sahara.rst rename to doc/config-reference/source/tables/heat-clients_sahara.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_swift.rst b/doc/config-reference/source/tables/heat-clients_swift.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_swift.rst rename to doc/config-reference/source/tables/heat-clients_swift.rst diff --git a/doc/config-ref-rst/source/tables/heat-clients_trove.rst b/doc/config-reference/source/tables/heat-clients_trove.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-clients_trove.rst rename to doc/config-reference/source/tables/heat-clients_trove.rst diff --git a/doc/config-ref-rst/source/tables/heat-cloudwatch_api.rst b/doc/config-reference/source/tables/heat-cloudwatch_api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-cloudwatch_api.rst rename to doc/config-reference/source/tables/heat-cloudwatch_api.rst diff --git a/doc/config-ref-rst/source/tables/heat-common.rst b/doc/config-reference/source/tables/heat-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-common.rst rename to doc/config-reference/source/tables/heat-common.rst diff --git a/doc/config-ref-rst/source/tables/heat-cors.rst b/doc/config-reference/source/tables/heat-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-cors.rst rename to doc/config-reference/source/tables/heat-cors.rst diff --git a/doc/config-ref-rst/source/tables/heat-crypt.rst b/doc/config-reference/source/tables/heat-crypt.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-crypt.rst rename to doc/config-reference/source/tables/heat-crypt.rst diff --git a/doc/config-ref-rst/source/tables/heat-database.rst b/doc/config-reference/source/tables/heat-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-database.rst rename to doc/config-reference/source/tables/heat-database.rst diff --git a/doc/config-ref-rst/source/tables/heat-loadbalancer.rst b/doc/config-reference/source/tables/heat-loadbalancer.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-loadbalancer.rst rename to doc/config-reference/source/tables/heat-loadbalancer.rst diff --git a/doc/config-ref-rst/source/tables/heat-logging.rst b/doc/config-reference/source/tables/heat-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-logging.rst rename to doc/config-reference/source/tables/heat-logging.rst diff --git a/doc/config-ref-rst/source/tables/heat-metadata_api.rst b/doc/config-reference/source/tables/heat-metadata_api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-metadata_api.rst rename to doc/config-reference/source/tables/heat-metadata_api.rst diff --git a/doc/config-ref-rst/source/tables/heat-notification.rst b/doc/config-reference/source/tables/heat-notification.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-notification.rst rename to doc/config-reference/source/tables/heat-notification.rst diff --git a/doc/config-ref-rst/source/tables/heat-qpid.rst b/doc/config-reference/source/tables/heat-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-qpid.rst rename to doc/config-reference/source/tables/heat-qpid.rst diff --git a/doc/config-ref-rst/source/tables/heat-quota.rst b/doc/config-reference/source/tables/heat-quota.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-quota.rst rename to doc/config-reference/source/tables/heat-quota.rst diff --git a/doc/config-ref-rst/source/tables/heat-rabbitmq.rst b/doc/config-reference/source/tables/heat-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-rabbitmq.rst rename to doc/config-reference/source/tables/heat-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/heat-redis.rst b/doc/config-reference/source/tables/heat-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-redis.rst rename to doc/config-reference/source/tables/heat-redis.rst diff --git a/doc/config-ref-rst/source/tables/heat-rpc.rst b/doc/config-reference/source/tables/heat-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-rpc.rst rename to doc/config-reference/source/tables/heat-rpc.rst diff --git a/doc/config-ref-rst/source/tables/heat-testing.rst b/doc/config-reference/source/tables/heat-testing.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-testing.rst rename to doc/config-reference/source/tables/heat-testing.rst diff --git a/doc/config-ref-rst/source/tables/heat-trustee.rst b/doc/config-reference/source/tables/heat-trustee.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-trustee.rst rename to doc/config-reference/source/tables/heat-trustee.rst diff --git a/doc/config-ref-rst/source/tables/heat-waitcondition_api.rst b/doc/config-reference/source/tables/heat-waitcondition_api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-waitcondition_api.rst rename to doc/config-reference/source/tables/heat-waitcondition_api.rst diff --git a/doc/config-ref-rst/source/tables/heat-zeromq.rst b/doc/config-reference/source/tables/heat-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/heat-zeromq.rst rename to doc/config-reference/source/tables/heat-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/ironic-agent.rst b/doc/config-reference/source/tables/ironic-agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-agent.rst rename to doc/config-reference/source/tables/ironic-agent.rst diff --git a/doc/config-ref-rst/source/tables/ironic-amqp.rst b/doc/config-reference/source/tables/ironic-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-amqp.rst rename to doc/config-reference/source/tables/ironic-amqp.rst diff --git a/doc/config-ref-rst/source/tables/ironic-amt.rst b/doc/config-reference/source/tables/ironic-amt.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-amt.rst rename to doc/config-reference/source/tables/ironic-amt.rst diff --git a/doc/config-ref-rst/source/tables/ironic-api.rst b/doc/config-reference/source/tables/ironic-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-api.rst rename to doc/config-reference/source/tables/ironic-api.rst diff --git a/doc/config-ref-rst/source/tables/ironic-auth.rst b/doc/config-reference/source/tables/ironic-auth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-auth.rst rename to doc/config-reference/source/tables/ironic-auth.rst diff --git a/doc/config-ref-rst/source/tables/ironic-auth_token.rst b/doc/config-reference/source/tables/ironic-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-auth_token.rst rename to doc/config-reference/source/tables/ironic-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/ironic-cisco_ucs.rst b/doc/config-reference/source/tables/ironic-cisco_ucs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-cisco_ucs.rst rename to doc/config-reference/source/tables/ironic-cisco_ucs.rst diff --git a/doc/config-ref-rst/source/tables/ironic-common.rst b/doc/config-reference/source/tables/ironic-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-common.rst rename to doc/config-reference/source/tables/ironic-common.rst diff --git a/doc/config-ref-rst/source/tables/ironic-conductor.rst b/doc/config-reference/source/tables/ironic-conductor.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-conductor.rst rename to doc/config-reference/source/tables/ironic-conductor.rst diff --git a/doc/config-ref-rst/source/tables/ironic-console.rst b/doc/config-reference/source/tables/ironic-console.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-console.rst rename to doc/config-reference/source/tables/ironic-console.rst diff --git a/doc/config-ref-rst/source/tables/ironic-database.rst b/doc/config-reference/source/tables/ironic-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-database.rst rename to doc/config-reference/source/tables/ironic-database.rst diff --git a/doc/config-ref-rst/source/tables/ironic-debug.rst b/doc/config-reference/source/tables/ironic-debug.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-debug.rst rename to doc/config-reference/source/tables/ironic-debug.rst diff --git a/doc/config-ref-rst/source/tables/ironic-deploy.rst b/doc/config-reference/source/tables/ironic-deploy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-deploy.rst rename to doc/config-reference/source/tables/ironic-deploy.rst diff --git a/doc/config-ref-rst/source/tables/ironic-dhcp.rst b/doc/config-reference/source/tables/ironic-dhcp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-dhcp.rst rename to doc/config-reference/source/tables/ironic-dhcp.rst diff --git a/doc/config-ref-rst/source/tables/ironic-disk_partitioner.rst b/doc/config-reference/source/tables/ironic-disk_partitioner.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-disk_partitioner.rst rename to doc/config-reference/source/tables/ironic-disk_partitioner.rst diff --git a/doc/config-ref-rst/source/tables/ironic-drac.rst b/doc/config-reference/source/tables/ironic-drac.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-drac.rst rename to doc/config-reference/source/tables/ironic-drac.rst diff --git a/doc/config-ref-rst/source/tables/ironic-glance.rst b/doc/config-reference/source/tables/ironic-glance.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-glance.rst rename to doc/config-reference/source/tables/ironic-glance.rst diff --git a/doc/config-ref-rst/source/tables/ironic-iboot.rst b/doc/config-reference/source/tables/ironic-iboot.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-iboot.rst rename to doc/config-reference/source/tables/ironic-iboot.rst diff --git a/doc/config-ref-rst/source/tables/ironic-ilo.rst b/doc/config-reference/source/tables/ironic-ilo.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-ilo.rst rename to doc/config-reference/source/tables/ironic-ilo.rst diff --git a/doc/config-ref-rst/source/tables/ironic-inspector.rst b/doc/config-reference/source/tables/ironic-inspector.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-inspector.rst rename to doc/config-reference/source/tables/ironic-inspector.rst diff --git a/doc/config-ref-rst/source/tables/ironic-ipmi.rst b/doc/config-reference/source/tables/ironic-ipmi.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-ipmi.rst rename to doc/config-reference/source/tables/ironic-ipmi.rst diff --git a/doc/config-ref-rst/source/tables/ironic-irmc.rst b/doc/config-reference/source/tables/ironic-irmc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-irmc.rst rename to doc/config-reference/source/tables/ironic-irmc.rst diff --git a/doc/config-ref-rst/source/tables/ironic-keystone.rst b/doc/config-reference/source/tables/ironic-keystone.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-keystone.rst rename to doc/config-reference/source/tables/ironic-keystone.rst diff --git a/doc/config-ref-rst/source/tables/ironic-logging.rst b/doc/config-reference/source/tables/ironic-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-logging.rst rename to doc/config-reference/source/tables/ironic-logging.rst diff --git a/doc/config-ref-rst/source/tables/ironic-neutron.rst b/doc/config-reference/source/tables/ironic-neutron.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-neutron.rst rename to doc/config-reference/source/tables/ironic-neutron.rst diff --git a/doc/config-ref-rst/source/tables/ironic-oneview.rst b/doc/config-reference/source/tables/ironic-oneview.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-oneview.rst rename to doc/config-reference/source/tables/ironic-oneview.rst diff --git a/doc/config-ref-rst/source/tables/ironic-policy.rst b/doc/config-reference/source/tables/ironic-policy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-policy.rst rename to doc/config-reference/source/tables/ironic-policy.rst diff --git a/doc/config-ref-rst/source/tables/ironic-pxe.rst b/doc/config-reference/source/tables/ironic-pxe.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-pxe.rst rename to doc/config-reference/source/tables/ironic-pxe.rst diff --git a/doc/config-ref-rst/source/tables/ironic-qpid.rst b/doc/config-reference/source/tables/ironic-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-qpid.rst rename to doc/config-reference/source/tables/ironic-qpid.rst diff --git a/doc/config-ref-rst/source/tables/ironic-rabbitmq.rst b/doc/config-reference/source/tables/ironic-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-rabbitmq.rst rename to doc/config-reference/source/tables/ironic-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/ironic-redis.rst b/doc/config-reference/source/tables/ironic-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-redis.rst rename to doc/config-reference/source/tables/ironic-redis.rst diff --git a/doc/config-ref-rst/source/tables/ironic-rpc.rst b/doc/config-reference/source/tables/ironic-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-rpc.rst rename to doc/config-reference/source/tables/ironic-rpc.rst diff --git a/doc/config-ref-rst/source/tables/ironic-seamicro.rst b/doc/config-reference/source/tables/ironic-seamicro.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-seamicro.rst rename to doc/config-reference/source/tables/ironic-seamicro.rst diff --git a/doc/config-ref-rst/source/tables/ironic-snmp.rst b/doc/config-reference/source/tables/ironic-snmp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-snmp.rst rename to doc/config-reference/source/tables/ironic-snmp.rst diff --git a/doc/config-ref-rst/source/tables/ironic-ssh.rst b/doc/config-reference/source/tables/ironic-ssh.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-ssh.rst rename to doc/config-reference/source/tables/ironic-ssh.rst diff --git a/doc/config-ref-rst/source/tables/ironic-swift.rst b/doc/config-reference/source/tables/ironic-swift.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-swift.rst rename to doc/config-reference/source/tables/ironic-swift.rst diff --git a/doc/config-ref-rst/source/tables/ironic-virtualbox.rst b/doc/config-reference/source/tables/ironic-virtualbox.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-virtualbox.rst rename to doc/config-reference/source/tables/ironic-virtualbox.rst diff --git a/doc/config-ref-rst/source/tables/ironic-zeromq.rst b/doc/config-reference/source/tables/ironic-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/ironic-zeromq.rst rename to doc/config-reference/source/tables/ironic-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/keystone-amqp.rst b/doc/config-reference/source/tables/keystone-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-amqp.rst rename to doc/config-reference/source/tables/keystone-amqp.rst diff --git a/doc/config-ref-rst/source/tables/keystone-api.rst b/doc/config-reference/source/tables/keystone-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-api.rst rename to doc/config-reference/source/tables/keystone-api.rst diff --git a/doc/config-ref-rst/source/tables/keystone-assignment.rst b/doc/config-reference/source/tables/keystone-assignment.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-assignment.rst rename to doc/config-reference/source/tables/keystone-assignment.rst diff --git a/doc/config-ref-rst/source/tables/keystone-auth.rst b/doc/config-reference/source/tables/keystone-auth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-auth.rst rename to doc/config-reference/source/tables/keystone-auth.rst diff --git a/doc/config-ref-rst/source/tables/keystone-auth_token.rst b/doc/config-reference/source/tables/keystone-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-auth_token.rst rename to doc/config-reference/source/tables/keystone-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/keystone-ca.rst b/doc/config-reference/source/tables/keystone-ca.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-ca.rst rename to doc/config-reference/source/tables/keystone-ca.rst diff --git a/doc/config-ref-rst/source/tables/keystone-cache.rst b/doc/config-reference/source/tables/keystone-cache.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-cache.rst rename to doc/config-reference/source/tables/keystone-cache.rst diff --git a/doc/config-ref-rst/source/tables/keystone-catalog.rst b/doc/config-reference/source/tables/keystone-catalog.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-catalog.rst rename to doc/config-reference/source/tables/keystone-catalog.rst diff --git a/doc/config-ref-rst/source/tables/keystone-common.rst b/doc/config-reference/source/tables/keystone-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-common.rst rename to doc/config-reference/source/tables/keystone-common.rst diff --git a/doc/config-ref-rst/source/tables/keystone-cors.rst b/doc/config-reference/source/tables/keystone-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-cors.rst rename to doc/config-reference/source/tables/keystone-cors.rst diff --git a/doc/config-ref-rst/source/tables/keystone-credential.rst b/doc/config-reference/source/tables/keystone-credential.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-credential.rst rename to doc/config-reference/source/tables/keystone-credential.rst diff --git a/doc/config-ref-rst/source/tables/keystone-database.rst b/doc/config-reference/source/tables/keystone-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-database.rst rename to doc/config-reference/source/tables/keystone-database.rst diff --git a/doc/config-ref-rst/source/tables/keystone-debug.rst b/doc/config-reference/source/tables/keystone-debug.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-debug.rst rename to doc/config-reference/source/tables/keystone-debug.rst diff --git a/doc/config-ref-rst/source/tables/keystone-domain.rst b/doc/config-reference/source/tables/keystone-domain.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-domain.rst rename to doc/config-reference/source/tables/keystone-domain.rst diff --git a/doc/config-ref-rst/source/tables/keystone-federation.rst b/doc/config-reference/source/tables/keystone-federation.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-federation.rst rename to doc/config-reference/source/tables/keystone-federation.rst diff --git a/doc/config-ref-rst/source/tables/keystone-fernet_tokens.rst b/doc/config-reference/source/tables/keystone-fernet_tokens.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-fernet_tokens.rst rename to doc/config-reference/source/tables/keystone-fernet_tokens.rst diff --git a/doc/config-ref-rst/source/tables/keystone-identity.rst b/doc/config-reference/source/tables/keystone-identity.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-identity.rst rename to doc/config-reference/source/tables/keystone-identity.rst diff --git a/doc/config-ref-rst/source/tables/keystone-kvs.rst b/doc/config-reference/source/tables/keystone-kvs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-kvs.rst rename to doc/config-reference/source/tables/keystone-kvs.rst diff --git a/doc/config-ref-rst/source/tables/keystone-ldap.rst b/doc/config-reference/source/tables/keystone-ldap.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-ldap.rst rename to doc/config-reference/source/tables/keystone-ldap.rst diff --git a/doc/config-ref-rst/source/tables/keystone-logging.rst b/doc/config-reference/source/tables/keystone-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-logging.rst rename to doc/config-reference/source/tables/keystone-logging.rst diff --git a/doc/config-ref-rst/source/tables/keystone-mapping.rst b/doc/config-reference/source/tables/keystone-mapping.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-mapping.rst rename to doc/config-reference/source/tables/keystone-mapping.rst diff --git a/doc/config-ref-rst/source/tables/keystone-memcache.rst b/doc/config-reference/source/tables/keystone-memcache.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-memcache.rst rename to doc/config-reference/source/tables/keystone-memcache.rst diff --git a/doc/config-ref-rst/source/tables/keystone-oauth.rst b/doc/config-reference/source/tables/keystone-oauth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-oauth.rst rename to doc/config-reference/source/tables/keystone-oauth.rst diff --git a/doc/config-ref-rst/source/tables/keystone-os_inherit.rst b/doc/config-reference/source/tables/keystone-os_inherit.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-os_inherit.rst rename to doc/config-reference/source/tables/keystone-os_inherit.rst diff --git a/doc/config-ref-rst/source/tables/keystone-policy.rst b/doc/config-reference/source/tables/keystone-policy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-policy.rst rename to doc/config-reference/source/tables/keystone-policy.rst diff --git a/doc/config-ref-rst/source/tables/keystone-qpid.rst b/doc/config-reference/source/tables/keystone-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-qpid.rst rename to doc/config-reference/source/tables/keystone-qpid.rst diff --git a/doc/config-ref-rst/source/tables/keystone-rabbitmq.rst b/doc/config-reference/source/tables/keystone-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-rabbitmq.rst rename to doc/config-reference/source/tables/keystone-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/keystone-redis.rst b/doc/config-reference/source/tables/keystone-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-redis.rst rename to doc/config-reference/source/tables/keystone-redis.rst diff --git a/doc/config-ref-rst/source/tables/keystone-revoke.rst b/doc/config-reference/source/tables/keystone-revoke.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-revoke.rst rename to doc/config-reference/source/tables/keystone-revoke.rst diff --git a/doc/config-ref-rst/source/tables/keystone-role.rst b/doc/config-reference/source/tables/keystone-role.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-role.rst rename to doc/config-reference/source/tables/keystone-role.rst diff --git a/doc/config-ref-rst/source/tables/keystone-rpc.rst b/doc/config-reference/source/tables/keystone-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-rpc.rst rename to doc/config-reference/source/tables/keystone-rpc.rst diff --git a/doc/config-ref-rst/source/tables/keystone-saml.rst b/doc/config-reference/source/tables/keystone-saml.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-saml.rst rename to doc/config-reference/source/tables/keystone-saml.rst diff --git a/doc/config-ref-rst/source/tables/keystone-security.rst b/doc/config-reference/source/tables/keystone-security.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-security.rst rename to doc/config-reference/source/tables/keystone-security.rst diff --git a/doc/config-ref-rst/source/tables/keystone-token.rst b/doc/config-reference/source/tables/keystone-token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-token.rst rename to doc/config-reference/source/tables/keystone-token.rst diff --git a/doc/config-ref-rst/source/tables/keystone-tokenless.rst b/doc/config-reference/source/tables/keystone-tokenless.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-tokenless.rst rename to doc/config-reference/source/tables/keystone-tokenless.rst diff --git a/doc/config-ref-rst/source/tables/keystone-trust.rst b/doc/config-reference/source/tables/keystone-trust.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-trust.rst rename to doc/config-reference/source/tables/keystone-trust.rst diff --git a/doc/config-ref-rst/source/tables/keystone-zeromq.rst b/doc/config-reference/source/tables/keystone-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/keystone-zeromq.rst rename to doc/config-reference/source/tables/keystone-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/manila-amqp.rst b/doc/config-reference/source/tables/manila-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-amqp.rst rename to doc/config-reference/source/tables/manila-amqp.rst diff --git a/doc/config-ref-rst/source/tables/manila-api.rst b/doc/config-reference/source/tables/manila-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-api.rst rename to doc/config-reference/source/tables/manila-api.rst diff --git a/doc/config-ref-rst/source/tables/manila-auth.rst b/doc/config-reference/source/tables/manila-auth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-auth.rst rename to doc/config-reference/source/tables/manila-auth.rst diff --git a/doc/config-ref-rst/source/tables/manila-auth_token.rst b/doc/config-reference/source/tables/manila-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-auth_token.rst rename to doc/config-reference/source/tables/manila-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/manila-ca.rst b/doc/config-reference/source/tables/manila-ca.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-ca.rst rename to doc/config-reference/source/tables/manila-ca.rst diff --git a/doc/config-ref-rst/source/tables/manila-common.rst b/doc/config-reference/source/tables/manila-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-common.rst rename to doc/config-reference/source/tables/manila-common.rst diff --git a/doc/config-ref-rst/source/tables/manila-compute.rst b/doc/config-reference/source/tables/manila-compute.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-compute.rst rename to doc/config-reference/source/tables/manila-compute.rst diff --git a/doc/config-ref-rst/source/tables/manila-cors.rst b/doc/config-reference/source/tables/manila-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-cors.rst rename to doc/config-reference/source/tables/manila-cors.rst diff --git a/doc/config-ref-rst/source/tables/manila-database.rst b/doc/config-reference/source/tables/manila-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-database.rst rename to doc/config-reference/source/tables/manila-database.rst diff --git a/doc/config-ref-rst/source/tables/manila-emc.rst b/doc/config-reference/source/tables/manila-emc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-emc.rst rename to doc/config-reference/source/tables/manila-emc.rst diff --git a/doc/config-ref-rst/source/tables/manila-ganesha.rst b/doc/config-reference/source/tables/manila-ganesha.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-ganesha.rst rename to doc/config-reference/source/tables/manila-ganesha.rst diff --git a/doc/config-ref-rst/source/tables/manila-generic.rst b/doc/config-reference/source/tables/manila-generic.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-generic.rst rename to doc/config-reference/source/tables/manila-generic.rst diff --git a/doc/config-ref-rst/source/tables/manila-glusterfs.rst b/doc/config-reference/source/tables/manila-glusterfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-glusterfs.rst rename to doc/config-reference/source/tables/manila-glusterfs.rst diff --git a/doc/config-ref-rst/source/tables/manila-gpfs.rst b/doc/config-reference/source/tables/manila-gpfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-gpfs.rst rename to doc/config-reference/source/tables/manila-gpfs.rst diff --git a/doc/config-ref-rst/source/tables/manila-hdfs.rst b/doc/config-reference/source/tables/manila-hdfs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-hdfs.rst rename to doc/config-reference/source/tables/manila-hdfs.rst diff --git a/doc/config-ref-rst/source/tables/manila-hds_hnas.rst b/doc/config-reference/source/tables/manila-hds_hnas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-hds_hnas.rst rename to doc/config-reference/source/tables/manila-hds_hnas.rst diff --git a/doc/config-ref-rst/source/tables/manila-hp3par.rst b/doc/config-reference/source/tables/manila-hp3par.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-hp3par.rst rename to doc/config-reference/source/tables/manila-hp3par.rst diff --git a/doc/config-ref-rst/source/tables/manila-hpe3par.rst b/doc/config-reference/source/tables/manila-hpe3par.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-hpe3par.rst rename to doc/config-reference/source/tables/manila-hpe3par.rst diff --git a/doc/config-ref-rst/source/tables/manila-huawei.rst b/doc/config-reference/source/tables/manila-huawei.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-huawei.rst rename to doc/config-reference/source/tables/manila-huawei.rst diff --git a/doc/config-ref-rst/source/tables/manila-logging.rst b/doc/config-reference/source/tables/manila-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-logging.rst rename to doc/config-reference/source/tables/manila-logging.rst diff --git a/doc/config-ref-rst/source/tables/manila-netapp.rst b/doc/config-reference/source/tables/manila-netapp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-netapp.rst rename to doc/config-reference/source/tables/manila-netapp.rst diff --git a/doc/config-ref-rst/source/tables/manila-qpid.rst b/doc/config-reference/source/tables/manila-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-qpid.rst rename to doc/config-reference/source/tables/manila-qpid.rst diff --git a/doc/config-ref-rst/source/tables/manila-quobyte.rst b/doc/config-reference/source/tables/manila-quobyte.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-quobyte.rst rename to doc/config-reference/source/tables/manila-quobyte.rst diff --git a/doc/config-ref-rst/source/tables/manila-quota.rst b/doc/config-reference/source/tables/manila-quota.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-quota.rst rename to doc/config-reference/source/tables/manila-quota.rst diff --git a/doc/config-ref-rst/source/tables/manila-rabbitmq.rst b/doc/config-reference/source/tables/manila-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-rabbitmq.rst rename to doc/config-reference/source/tables/manila-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/manila-redis.rst b/doc/config-reference/source/tables/manila-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-redis.rst rename to doc/config-reference/source/tables/manila-redis.rst diff --git a/doc/config-ref-rst/source/tables/manila-rpc.rst b/doc/config-reference/source/tables/manila-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-rpc.rst rename to doc/config-reference/source/tables/manila-rpc.rst diff --git a/doc/config-ref-rst/source/tables/manila-san.rst b/doc/config-reference/source/tables/manila-san.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-san.rst rename to doc/config-reference/source/tables/manila-san.rst diff --git a/doc/config-ref-rst/source/tables/manila-scheduler.rst b/doc/config-reference/source/tables/manila-scheduler.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-scheduler.rst rename to doc/config-reference/source/tables/manila-scheduler.rst diff --git a/doc/config-ref-rst/source/tables/manila-share.rst b/doc/config-reference/source/tables/manila-share.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-share.rst rename to doc/config-reference/source/tables/manila-share.rst diff --git a/doc/config-ref-rst/source/tables/manila-winrm.rst b/doc/config-reference/source/tables/manila-winrm.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-winrm.rst rename to doc/config-reference/source/tables/manila-winrm.rst diff --git a/doc/config-ref-rst/source/tables/manila-zeromq.rst b/doc/config-reference/source/tables/manila-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-zeromq.rst rename to doc/config-reference/source/tables/manila-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/manila-zfssa.rst b/doc/config-reference/source/tables/manila-zfssa.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manila-zfssa.rst rename to doc/config-reference/source/tables/manila-zfssa.rst diff --git a/doc/config-ref-rst/source/tables/manual/cinder-netapp_cdot_extraspecs.rst b/doc/config-reference/source/tables/manual/cinder-netapp_cdot_extraspecs.rst similarity index 100% rename from doc/config-ref-rst/source/tables/manual/cinder-netapp_cdot_extraspecs.rst rename to doc/config-reference/source/tables/manual/cinder-netapp_cdot_extraspecs.rst diff --git a/doc/config-ref-rst/source/tables/neutron-agent.rst b/doc/config-reference/source/tables/neutron-agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-agent.rst rename to doc/config-reference/source/tables/neutron-agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-amqp.rst b/doc/config-reference/source/tables/neutron-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-amqp.rst rename to doc/config-reference/source/tables/neutron-amqp.rst diff --git a/doc/config-ref-rst/source/tables/neutron-api.rst b/doc/config-reference/source/tables/neutron-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-api.rst rename to doc/config-reference/source/tables/neutron-api.rst diff --git a/doc/config-ref-rst/source/tables/neutron-auth_token.rst b/doc/config-reference/source/tables/neutron-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-auth_token.rst rename to doc/config-reference/source/tables/neutron-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/neutron-brocade.rst b/doc/config-reference/source/tables/neutron-brocade.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-brocade.rst rename to doc/config-reference/source/tables/neutron-brocade.rst diff --git a/doc/config-ref-rst/source/tables/neutron-cisco.rst b/doc/config-reference/source/tables/neutron-cisco.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-cisco.rst rename to doc/config-reference/source/tables/neutron-cisco.rst diff --git a/doc/config-ref-rst/source/tables/neutron-common.rst b/doc/config-reference/source/tables/neutron-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-common.rst rename to doc/config-reference/source/tables/neutron-common.rst diff --git a/doc/config-ref-rst/source/tables/neutron-compute.rst b/doc/config-reference/source/tables/neutron-compute.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-compute.rst rename to doc/config-reference/source/tables/neutron-compute.rst diff --git a/doc/config-ref-rst/source/tables/neutron-cors.rst b/doc/config-reference/source/tables/neutron-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-cors.rst rename to doc/config-reference/source/tables/neutron-cors.rst diff --git a/doc/config-ref-rst/source/tables/neutron-database.rst b/doc/config-reference/source/tables/neutron-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-database.rst rename to doc/config-reference/source/tables/neutron-database.rst diff --git a/doc/config-ref-rst/source/tables/neutron-dhcp_agent.rst b/doc/config-reference/source/tables/neutron-dhcp_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-dhcp_agent.rst rename to doc/config-reference/source/tables/neutron-dhcp_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-dvr.rst b/doc/config-reference/source/tables/neutron-dvr.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-dvr.rst rename to doc/config-reference/source/tables/neutron-dvr.rst diff --git a/doc/config-ref-rst/source/tables/neutron-fwaas.rst b/doc/config-reference/source/tables/neutron-fwaas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-fwaas.rst rename to doc/config-reference/source/tables/neutron-fwaas.rst diff --git a/doc/config-ref-rst/source/tables/neutron-fwaas_ngfw.rst b/doc/config-reference/source/tables/neutron-fwaas_ngfw.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-fwaas_ngfw.rst rename to doc/config-reference/source/tables/neutron-fwaas_ngfw.rst diff --git a/doc/config-ref-rst/source/tables/neutron-fwaas_varmour.rst b/doc/config-reference/source/tables/neutron-fwaas_varmour.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-fwaas_varmour.rst rename to doc/config-reference/source/tables/neutron-fwaas_varmour.rst diff --git a/doc/config-ref-rst/source/tables/neutron-hyperv_agent.rst b/doc/config-reference/source/tables/neutron-hyperv_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-hyperv_agent.rst rename to doc/config-reference/source/tables/neutron-hyperv_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ipv6_ra.rst b/doc/config-reference/source/tables/neutron-ipv6_ra.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ipv6_ra.rst rename to doc/config-reference/source/tables/neutron-ipv6_ra.rst diff --git a/doc/config-ref-rst/source/tables/neutron-l2_agent.rst b/doc/config-reference/source/tables/neutron-l2_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-l2_agent.rst rename to doc/config-reference/source/tables/neutron-l2_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-l3_agent.rst b/doc/config-reference/source/tables/neutron-l3_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-l3_agent.rst rename to doc/config-reference/source/tables/neutron-l3_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-lbaas.rst b/doc/config-reference/source/tables/neutron-lbaas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-lbaas.rst rename to doc/config-reference/source/tables/neutron-lbaas.rst diff --git a/doc/config-ref-rst/source/tables/neutron-lbaas_agent.rst b/doc/config-reference/source/tables/neutron-lbaas_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-lbaas_agent.rst rename to doc/config-reference/source/tables/neutron-lbaas_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-lbaas_services.rst b/doc/config-reference/source/tables/neutron-lbaas_services.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-lbaas_services.rst rename to doc/config-reference/source/tables/neutron-lbaas_services.rst diff --git a/doc/config-ref-rst/source/tables/neutron-linuxbridge_agent.rst b/doc/config-reference/source/tables/neutron-linuxbridge_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-linuxbridge_agent.rst rename to doc/config-reference/source/tables/neutron-linuxbridge_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-logging.rst b/doc/config-reference/source/tables/neutron-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-logging.rst rename to doc/config-reference/source/tables/neutron-logging.rst diff --git a/doc/config-ref-rst/source/tables/neutron-metadata.rst b/doc/config-reference/source/tables/neutron-metadata.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-metadata.rst rename to doc/config-reference/source/tables/neutron-metadata.rst diff --git a/doc/config-ref-rst/source/tables/neutron-metering_agent.rst b/doc/config-reference/source/tables/neutron-metering_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-metering_agent.rst rename to doc/config-reference/source/tables/neutron-metering_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2.rst b/doc/config-reference/source/tables/neutron-ml2.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2.rst rename to doc/config-reference/source/tables/neutron-ml2.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_brocade.rst b/doc/config-reference/source/tables/neutron-ml2_brocade.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_brocade.rst rename to doc/config-reference/source/tables/neutron-ml2_brocade.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_flat.rst b/doc/config-reference/source/tables/neutron-ml2_flat.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_flat.rst rename to doc/config-reference/source/tables/neutron-ml2_flat.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_geneve.rst b/doc/config-reference/source/tables/neutron-ml2_geneve.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_geneve.rst rename to doc/config-reference/source/tables/neutron-ml2_geneve.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_gre.rst b/doc/config-reference/source/tables/neutron-ml2_gre.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_gre.rst rename to doc/config-reference/source/tables/neutron-ml2_gre.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_l2pop.rst b/doc/config-reference/source/tables/neutron-ml2_l2pop.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_l2pop.rst rename to doc/config-reference/source/tables/neutron-ml2_l2pop.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_ofa.rst b/doc/config-reference/source/tables/neutron-ml2_ofa.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_ofa.rst rename to doc/config-reference/source/tables/neutron-ml2_ofa.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_sriov.rst b/doc/config-reference/source/tables/neutron-ml2_sriov.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_sriov.rst rename to doc/config-reference/source/tables/neutron-ml2_sriov.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_vlan.rst b/doc/config-reference/source/tables/neutron-ml2_vlan.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_vlan.rst rename to doc/config-reference/source/tables/neutron-ml2_vlan.rst diff --git a/doc/config-ref-rst/source/tables/neutron-ml2_vxlan.rst b/doc/config-reference/source/tables/neutron-ml2_vxlan.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-ml2_vxlan.rst rename to doc/config-reference/source/tables/neutron-ml2_vxlan.rst diff --git a/doc/config-ref-rst/source/tables/neutron-nec.rst b/doc/config-reference/source/tables/neutron-nec.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-nec.rst rename to doc/config-reference/source/tables/neutron-nec.rst diff --git a/doc/config-ref-rst/source/tables/neutron-nova.rst b/doc/config-reference/source/tables/neutron-nova.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-nova.rst rename to doc/config-reference/source/tables/neutron-nova.rst diff --git a/doc/config-ref-rst/source/tables/neutron-openvswitch_agent.rst b/doc/config-reference/source/tables/neutron-openvswitch_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-openvswitch_agent.rst rename to doc/config-reference/source/tables/neutron-openvswitch_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-pd_linux_agent.rst b/doc/config-reference/source/tables/neutron-pd_linux_agent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-pd_linux_agent.rst rename to doc/config-reference/source/tables/neutron-pd_linux_agent.rst diff --git a/doc/config-ref-rst/source/tables/neutron-policy.rst b/doc/config-reference/source/tables/neutron-policy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-policy.rst rename to doc/config-reference/source/tables/neutron-policy.rst diff --git a/doc/config-ref-rst/source/tables/neutron-qpid.rst b/doc/config-reference/source/tables/neutron-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-qpid.rst rename to doc/config-reference/source/tables/neutron-qpid.rst diff --git a/doc/config-ref-rst/source/tables/neutron-quotas.rst b/doc/config-reference/source/tables/neutron-quotas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-quotas.rst rename to doc/config-reference/source/tables/neutron-quotas.rst diff --git a/doc/config-ref-rst/source/tables/neutron-rabbitmq.rst b/doc/config-reference/source/tables/neutron-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-rabbitmq.rst rename to doc/config-reference/source/tables/neutron-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/neutron-redis.rst b/doc/config-reference/source/tables/neutron-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-redis.rst rename to doc/config-reference/source/tables/neutron-redis.rst diff --git a/doc/config-ref-rst/source/tables/neutron-rpc.rst b/doc/config-reference/source/tables/neutron-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-rpc.rst rename to doc/config-reference/source/tables/neutron-rpc.rst diff --git a/doc/config-ref-rst/source/tables/neutron-scheduler.rst b/doc/config-reference/source/tables/neutron-scheduler.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-scheduler.rst rename to doc/config-reference/source/tables/neutron-scheduler.rst diff --git a/doc/config-ref-rst/source/tables/neutron-securitygroups.rst b/doc/config-reference/source/tables/neutron-securitygroups.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-securitygroups.rst rename to doc/config-reference/source/tables/neutron-securitygroups.rst diff --git a/doc/config-ref-rst/source/tables/neutron-service_auth.rst b/doc/config-reference/source/tables/neutron-service_auth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-service_auth.rst rename to doc/config-reference/source/tables/neutron-service_auth.rst diff --git a/doc/config-ref-rst/source/tables/neutron-sriov.rst b/doc/config-reference/source/tables/neutron-sriov.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-sriov.rst rename to doc/config-reference/source/tables/neutron-sriov.rst diff --git a/doc/config-ref-rst/source/tables/neutron-vpnaas.rst b/doc/config-reference/source/tables/neutron-vpnaas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-vpnaas.rst rename to doc/config-reference/source/tables/neutron-vpnaas.rst diff --git a/doc/config-ref-rst/source/tables/neutron-vpnaas_ipsec.rst b/doc/config-reference/source/tables/neutron-vpnaas_ipsec.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-vpnaas_ipsec.rst rename to doc/config-reference/source/tables/neutron-vpnaas_ipsec.rst diff --git a/doc/config-ref-rst/source/tables/neutron-vpnaas_openswan.rst b/doc/config-reference/source/tables/neutron-vpnaas_openswan.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-vpnaas_openswan.rst rename to doc/config-reference/source/tables/neutron-vpnaas_openswan.rst diff --git a/doc/config-ref-rst/source/tables/neutron-vpnaas_strongswan.rst b/doc/config-reference/source/tables/neutron-vpnaas_strongswan.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-vpnaas_strongswan.rst rename to doc/config-reference/source/tables/neutron-vpnaas_strongswan.rst diff --git a/doc/config-ref-rst/source/tables/neutron-zeromq.rst b/doc/config-reference/source/tables/neutron-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/neutron-zeromq.rst rename to doc/config-reference/source/tables/neutron-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/nova-amqp.rst b/doc/config-reference/source/tables/nova-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-amqp.rst rename to doc/config-reference/source/tables/nova-amqp.rst diff --git a/doc/config-ref-rst/source/tables/nova-api.rst b/doc/config-reference/source/tables/nova-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-api.rst rename to doc/config-reference/source/tables/nova-api.rst diff --git a/doc/config-ref-rst/source/tables/nova-apiv21.rst b/doc/config-reference/source/tables/nova-apiv21.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-apiv21.rst rename to doc/config-reference/source/tables/nova-apiv21.rst diff --git a/doc/config-ref-rst/source/tables/nova-auth_token.rst b/doc/config-reference/source/tables/nova-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-auth_token.rst rename to doc/config-reference/source/tables/nova-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/nova-authentication.rst b/doc/config-reference/source/tables/nova-authentication.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-authentication.rst rename to doc/config-reference/source/tables/nova-authentication.rst diff --git a/doc/config-ref-rst/source/tables/nova-availabilityzones.rst b/doc/config-reference/source/tables/nova-availabilityzones.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-availabilityzones.rst rename to doc/config-reference/source/tables/nova-availabilityzones.rst diff --git a/doc/config-ref-rst/source/tables/nova-barbican.rst b/doc/config-reference/source/tables/nova-barbican.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-barbican.rst rename to doc/config-reference/source/tables/nova-barbican.rst diff --git a/doc/config-ref-rst/source/tables/nova-ca.rst b/doc/config-reference/source/tables/nova-ca.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-ca.rst rename to doc/config-reference/source/tables/nova-ca.rst diff --git a/doc/config-ref-rst/source/tables/nova-cells.rst b/doc/config-reference/source/tables/nova-cells.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-cells.rst rename to doc/config-reference/source/tables/nova-cells.rst diff --git a/doc/config-ref-rst/source/tables/nova-common.rst b/doc/config-reference/source/tables/nova-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-common.rst rename to doc/config-reference/source/tables/nova-common.rst diff --git a/doc/config-ref-rst/source/tables/nova-compute.rst b/doc/config-reference/source/tables/nova-compute.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-compute.rst rename to doc/config-reference/source/tables/nova-compute.rst diff --git a/doc/config-ref-rst/source/tables/nova-conductor.rst b/doc/config-reference/source/tables/nova-conductor.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-conductor.rst rename to doc/config-reference/source/tables/nova-conductor.rst diff --git a/doc/config-ref-rst/source/tables/nova-configdrive.rst b/doc/config-reference/source/tables/nova-configdrive.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-configdrive.rst rename to doc/config-reference/source/tables/nova-configdrive.rst diff --git a/doc/config-ref-rst/source/tables/nova-console.rst b/doc/config-reference/source/tables/nova-console.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-console.rst rename to doc/config-reference/source/tables/nova-console.rst diff --git a/doc/config-ref-rst/source/tables/nova-cors.rst b/doc/config-reference/source/tables/nova-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-cors.rst rename to doc/config-reference/source/tables/nova-cors.rst diff --git a/doc/config-ref-rst/source/tables/nova-database.rst b/doc/config-reference/source/tables/nova-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-database.rst rename to doc/config-reference/source/tables/nova-database.rst diff --git a/doc/config-ref-rst/source/tables/nova-debug.rst b/doc/config-reference/source/tables/nova-debug.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-debug.rst rename to doc/config-reference/source/tables/nova-debug.rst diff --git a/doc/config-ref-rst/source/tables/nova-ec2.rst b/doc/config-reference/source/tables/nova-ec2.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-ec2.rst rename to doc/config-reference/source/tables/nova-ec2.rst diff --git a/doc/config-ref-rst/source/tables/nova-ephemeral_storage_encryption.rst b/doc/config-reference/source/tables/nova-ephemeral_storage_encryption.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-ephemeral_storage_encryption.rst rename to doc/config-reference/source/tables/nova-ephemeral_storage_encryption.rst diff --git a/doc/config-ref-rst/source/tables/nova-fping.rst b/doc/config-reference/source/tables/nova-fping.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-fping.rst rename to doc/config-reference/source/tables/nova-fping.rst diff --git a/doc/config-ref-rst/source/tables/nova-glance.rst b/doc/config-reference/source/tables/nova-glance.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-glance.rst rename to doc/config-reference/source/tables/nova-glance.rst diff --git a/doc/config-ref-rst/source/tables/nova-hyperv.rst b/doc/config-reference/source/tables/nova-hyperv.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-hyperv.rst rename to doc/config-reference/source/tables/nova-hyperv.rst diff --git a/doc/config-ref-rst/source/tables/nova-hypervisor.rst b/doc/config-reference/source/tables/nova-hypervisor.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-hypervisor.rst rename to doc/config-reference/source/tables/nova-hypervisor.rst diff --git a/doc/config-ref-rst/source/tables/nova-ipv6.rst b/doc/config-reference/source/tables/nova-ipv6.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-ipv6.rst rename to doc/config-reference/source/tables/nova-ipv6.rst diff --git a/doc/config-ref-rst/source/tables/nova-ironic.rst b/doc/config-reference/source/tables/nova-ironic.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-ironic.rst rename to doc/config-reference/source/tables/nova-ironic.rst diff --git a/doc/config-ref-rst/source/tables/nova-keymgr.rst b/doc/config-reference/source/tables/nova-keymgr.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-keymgr.rst rename to doc/config-reference/source/tables/nova-keymgr.rst diff --git a/doc/config-ref-rst/source/tables/nova-ldap.rst b/doc/config-reference/source/tables/nova-ldap.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-ldap.rst rename to doc/config-reference/source/tables/nova-ldap.rst diff --git a/doc/config-ref-rst/source/tables/nova-libvirt.rst b/doc/config-reference/source/tables/nova-libvirt.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-libvirt.rst rename to doc/config-reference/source/tables/nova-libvirt.rst diff --git a/doc/config-ref-rst/source/tables/nova-livemigration.rst b/doc/config-reference/source/tables/nova-livemigration.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-livemigration.rst rename to doc/config-reference/source/tables/nova-livemigration.rst diff --git a/doc/config-ref-rst/source/tables/nova-logging.rst b/doc/config-reference/source/tables/nova-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-logging.rst rename to doc/config-reference/source/tables/nova-logging.rst diff --git a/doc/config-ref-rst/source/tables/nova-metadata.rst b/doc/config-reference/source/tables/nova-metadata.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-metadata.rst rename to doc/config-reference/source/tables/nova-metadata.rst diff --git a/doc/config-ref-rst/source/tables/nova-network.rst b/doc/config-reference/source/tables/nova-network.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-network.rst rename to doc/config-reference/source/tables/nova-network.rst diff --git a/doc/config-ref-rst/source/tables/nova-neutron.rst b/doc/config-reference/source/tables/nova-neutron.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-neutron.rst rename to doc/config-reference/source/tables/nova-neutron.rst diff --git a/doc/config-ref-rst/source/tables/nova-pci.rst b/doc/config-reference/source/tables/nova-pci.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-pci.rst rename to doc/config-reference/source/tables/nova-pci.rst diff --git a/doc/config-ref-rst/source/tables/nova-periodic.rst b/doc/config-reference/source/tables/nova-periodic.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-periodic.rst rename to doc/config-reference/source/tables/nova-periodic.rst diff --git a/doc/config-ref-rst/source/tables/nova-policy.rst b/doc/config-reference/source/tables/nova-policy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-policy.rst rename to doc/config-reference/source/tables/nova-policy.rst diff --git a/doc/config-ref-rst/source/tables/nova-qpid.rst b/doc/config-reference/source/tables/nova-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-qpid.rst rename to doc/config-reference/source/tables/nova-qpid.rst diff --git a/doc/config-ref-rst/source/tables/nova-quobyte.rst b/doc/config-reference/source/tables/nova-quobyte.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-quobyte.rst rename to doc/config-reference/source/tables/nova-quobyte.rst diff --git a/doc/config-ref-rst/source/tables/nova-quota.rst b/doc/config-reference/source/tables/nova-quota.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-quota.rst rename to doc/config-reference/source/tables/nova-quota.rst diff --git a/doc/config-ref-rst/source/tables/nova-rabbitmq.rst b/doc/config-reference/source/tables/nova-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-rabbitmq.rst rename to doc/config-reference/source/tables/nova-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/nova-rdp.rst b/doc/config-reference/source/tables/nova-rdp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-rdp.rst rename to doc/config-reference/source/tables/nova-rdp.rst diff --git a/doc/config-ref-rst/source/tables/nova-redis.rst b/doc/config-reference/source/tables/nova-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-redis.rst rename to doc/config-reference/source/tables/nova-redis.rst diff --git a/doc/config-ref-rst/source/tables/nova-rpc.rst b/doc/config-reference/source/tables/nova-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-rpc.rst rename to doc/config-reference/source/tables/nova-rpc.rst diff --git a/doc/config-ref-rst/source/tables/nova-s3.rst b/doc/config-reference/source/tables/nova-s3.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-s3.rst rename to doc/config-reference/source/tables/nova-s3.rst diff --git a/doc/config-ref-rst/source/tables/nova-scheduler.rst b/doc/config-reference/source/tables/nova-scheduler.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-scheduler.rst rename to doc/config-reference/source/tables/nova-scheduler.rst diff --git a/doc/config-ref-rst/source/tables/nova-serial_console.rst b/doc/config-reference/source/tables/nova-serial_console.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-serial_console.rst rename to doc/config-reference/source/tables/nova-serial_console.rst diff --git a/doc/config-ref-rst/source/tables/nova-spice.rst b/doc/config-reference/source/tables/nova-spice.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-spice.rst rename to doc/config-reference/source/tables/nova-spice.rst diff --git a/doc/config-ref-rst/source/tables/nova-testing.rst b/doc/config-reference/source/tables/nova-testing.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-testing.rst rename to doc/config-reference/source/tables/nova-testing.rst diff --git a/doc/config-ref-rst/source/tables/nova-trustedcomputing.rst b/doc/config-reference/source/tables/nova-trustedcomputing.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-trustedcomputing.rst rename to doc/config-reference/source/tables/nova-trustedcomputing.rst diff --git a/doc/config-ref-rst/source/tables/nova-upgrade_levels.rst b/doc/config-reference/source/tables/nova-upgrade_levels.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-upgrade_levels.rst rename to doc/config-reference/source/tables/nova-upgrade_levels.rst diff --git a/doc/config-ref-rst/source/tables/nova-vmware.rst b/doc/config-reference/source/tables/nova-vmware.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-vmware.rst rename to doc/config-reference/source/tables/nova-vmware.rst diff --git a/doc/config-ref-rst/source/tables/nova-vnc.rst b/doc/config-reference/source/tables/nova-vnc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-vnc.rst rename to doc/config-reference/source/tables/nova-vnc.rst diff --git a/doc/config-ref-rst/source/tables/nova-volumes.rst b/doc/config-reference/source/tables/nova-volumes.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-volumes.rst rename to doc/config-reference/source/tables/nova-volumes.rst diff --git a/doc/config-ref-rst/source/tables/nova-vpn.rst b/doc/config-reference/source/tables/nova-vpn.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-vpn.rst rename to doc/config-reference/source/tables/nova-vpn.rst diff --git a/doc/config-ref-rst/source/tables/nova-xen.rst b/doc/config-reference/source/tables/nova-xen.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-xen.rst rename to doc/config-reference/source/tables/nova-xen.rst diff --git a/doc/config-ref-rst/source/tables/nova-xvpvncproxy.rst b/doc/config-reference/source/tables/nova-xvpvncproxy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-xvpvncproxy.rst rename to doc/config-reference/source/tables/nova-xvpvncproxy.rst diff --git a/doc/config-ref-rst/source/tables/nova-zeromq.rst b/doc/config-reference/source/tables/nova-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-zeromq.rst rename to doc/config-reference/source/tables/nova-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/nova-zookeeper.rst b/doc/config-reference/source/tables/nova-zookeeper.rst similarity index 100% rename from doc/config-ref-rst/source/tables/nova-zookeeper.rst rename to doc/config-reference/source/tables/nova-zookeeper.rst diff --git a/doc/config-ref-rst/source/tables/sahara-amqp.rst b/doc/config-reference/source/tables/sahara-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-amqp.rst rename to doc/config-reference/source/tables/sahara-amqp.rst diff --git a/doc/config-ref-rst/source/tables/sahara-api.rst b/doc/config-reference/source/tables/sahara-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-api.rst rename to doc/config-reference/source/tables/sahara-api.rst diff --git a/doc/config-ref-rst/source/tables/sahara-auth_token.rst b/doc/config-reference/source/tables/sahara-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-auth_token.rst rename to doc/config-reference/source/tables/sahara-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/sahara-clients.rst b/doc/config-reference/source/tables/sahara-clients.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-clients.rst rename to doc/config-reference/source/tables/sahara-clients.rst diff --git a/doc/config-ref-rst/source/tables/sahara-common.rst b/doc/config-reference/source/tables/sahara-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-common.rst rename to doc/config-reference/source/tables/sahara-common.rst diff --git a/doc/config-ref-rst/source/tables/sahara-cors.rst b/doc/config-reference/source/tables/sahara-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-cors.rst rename to doc/config-reference/source/tables/sahara-cors.rst diff --git a/doc/config-ref-rst/source/tables/sahara-database.rst b/doc/config-reference/source/tables/sahara-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-database.rst rename to doc/config-reference/source/tables/sahara-database.rst diff --git a/doc/config-ref-rst/source/tables/sahara-domain.rst b/doc/config-reference/source/tables/sahara-domain.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-domain.rst rename to doc/config-reference/source/tables/sahara-domain.rst diff --git a/doc/config-ref-rst/source/tables/sahara-logging.rst b/doc/config-reference/source/tables/sahara-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-logging.rst rename to doc/config-reference/source/tables/sahara-logging.rst diff --git a/doc/config-ref-rst/source/tables/sahara-object_store_access.rst b/doc/config-reference/source/tables/sahara-object_store_access.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-object_store_access.rst rename to doc/config-reference/source/tables/sahara-object_store_access.rst diff --git a/doc/config-ref-rst/source/tables/sahara-policy.rst b/doc/config-reference/source/tables/sahara-policy.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-policy.rst rename to doc/config-reference/source/tables/sahara-policy.rst diff --git a/doc/config-ref-rst/source/tables/sahara-qpid.rst b/doc/config-reference/source/tables/sahara-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-qpid.rst rename to doc/config-reference/source/tables/sahara-qpid.rst diff --git a/doc/config-ref-rst/source/tables/sahara-rabbitmq.rst b/doc/config-reference/source/tables/sahara-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-rabbitmq.rst rename to doc/config-reference/source/tables/sahara-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/sahara-redis.rst b/doc/config-reference/source/tables/sahara-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-redis.rst rename to doc/config-reference/source/tables/sahara-redis.rst diff --git a/doc/config-ref-rst/source/tables/sahara-rpc.rst b/doc/config-reference/source/tables/sahara-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-rpc.rst rename to doc/config-reference/source/tables/sahara-rpc.rst diff --git a/doc/config-ref-rst/source/tables/sahara-ssh.rst b/doc/config-reference/source/tables/sahara-ssh.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-ssh.rst rename to doc/config-reference/source/tables/sahara-ssh.rst diff --git a/doc/config-ref-rst/source/tables/sahara-timeouts.rst b/doc/config-reference/source/tables/sahara-timeouts.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-timeouts.rst rename to doc/config-reference/source/tables/sahara-timeouts.rst diff --git a/doc/config-ref-rst/source/tables/sahara-zeromq.rst b/doc/config-reference/source/tables/sahara-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/sahara-zeromq.rst rename to doc/config-reference/source/tables/sahara-zeromq.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-DEFAULT.rst b/doc/config-reference/source/tables/swift-account-server-DEFAULT.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-DEFAULT.rst rename to doc/config-reference/source/tables/swift-account-server-DEFAULT.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-account-auditor.rst b/doc/config-reference/source/tables/swift-account-server-account-auditor.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-account-auditor.rst rename to doc/config-reference/source/tables/swift-account-server-account-auditor.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-account-reaper.rst b/doc/config-reference/source/tables/swift-account-server-account-reaper.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-account-reaper.rst rename to doc/config-reference/source/tables/swift-account-server-account-reaper.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-account-replicator.rst b/doc/config-reference/source/tables/swift-account-server-account-replicator.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-account-replicator.rst rename to doc/config-reference/source/tables/swift-account-server-account-replicator.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-app-account-server.rst b/doc/config-reference/source/tables/swift-account-server-app-account-server.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-app-account-server.rst rename to doc/config-reference/source/tables/swift-account-server-app-account-server.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-filter-healthcheck.rst b/doc/config-reference/source/tables/swift-account-server-filter-healthcheck.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-filter-healthcheck.rst rename to doc/config-reference/source/tables/swift-account-server-filter-healthcheck.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-filter-recon.rst b/doc/config-reference/source/tables/swift-account-server-filter-recon.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-filter-recon.rst rename to doc/config-reference/source/tables/swift-account-server-filter-recon.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-filter-xprofile.rst b/doc/config-reference/source/tables/swift-account-server-filter-xprofile.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-filter-xprofile.rst rename to doc/config-reference/source/tables/swift-account-server-filter-xprofile.rst diff --git a/doc/config-ref-rst/source/tables/swift-account-server-pipeline-main.rst b/doc/config-reference/source/tables/swift-account-server-pipeline-main.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-account-server-pipeline-main.rst rename to doc/config-reference/source/tables/swift-account-server-pipeline-main.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-reconciler-DEFAULT.rst b/doc/config-reference/source/tables/swift-container-reconciler-DEFAULT.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-reconciler-DEFAULT.rst rename to doc/config-reference/source/tables/swift-container-reconciler-DEFAULT.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-reconciler-app-proxy-server.rst b/doc/config-reference/source/tables/swift-container-reconciler-app-proxy-server.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-reconciler-app-proxy-server.rst rename to doc/config-reference/source/tables/swift-container-reconciler-app-proxy-server.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-reconciler-container-reconciler.rst b/doc/config-reference/source/tables/swift-container-reconciler-container-reconciler.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-reconciler-container-reconciler.rst rename to doc/config-reference/source/tables/swift-container-reconciler-container-reconciler.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-reconciler-filter-cache.rst b/doc/config-reference/source/tables/swift-container-reconciler-filter-cache.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-reconciler-filter-cache.rst rename to doc/config-reference/source/tables/swift-container-reconciler-filter-cache.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-reconciler-filter-catch_errors.rst b/doc/config-reference/source/tables/swift-container-reconciler-filter-catch_errors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-reconciler-filter-catch_errors.rst rename to doc/config-reference/source/tables/swift-container-reconciler-filter-catch_errors.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-reconciler-filter-proxy-logging.rst b/doc/config-reference/source/tables/swift-container-reconciler-filter-proxy-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-reconciler-filter-proxy-logging.rst rename to doc/config-reference/source/tables/swift-container-reconciler-filter-proxy-logging.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-reconciler-pipeline-main.rst b/doc/config-reference/source/tables/swift-container-reconciler-pipeline-main.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-reconciler-pipeline-main.rst rename to doc/config-reference/source/tables/swift-container-reconciler-pipeline-main.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-DEFAULT.rst b/doc/config-reference/source/tables/swift-container-server-DEFAULT.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-DEFAULT.rst rename to doc/config-reference/source/tables/swift-container-server-DEFAULT.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-app-container-server.rst b/doc/config-reference/source/tables/swift-container-server-app-container-server.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-app-container-server.rst rename to doc/config-reference/source/tables/swift-container-server-app-container-server.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-container-auditor.rst b/doc/config-reference/source/tables/swift-container-server-container-auditor.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-container-auditor.rst rename to doc/config-reference/source/tables/swift-container-server-container-auditor.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-container-replicator.rst b/doc/config-reference/source/tables/swift-container-server-container-replicator.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-container-replicator.rst rename to doc/config-reference/source/tables/swift-container-server-container-replicator.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-container-sync.rst b/doc/config-reference/source/tables/swift-container-server-container-sync.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-container-sync.rst rename to doc/config-reference/source/tables/swift-container-server-container-sync.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-container-updater.rst b/doc/config-reference/source/tables/swift-container-server-container-updater.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-container-updater.rst rename to doc/config-reference/source/tables/swift-container-server-container-updater.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-filter-healthcheck.rst b/doc/config-reference/source/tables/swift-container-server-filter-healthcheck.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-filter-healthcheck.rst rename to doc/config-reference/source/tables/swift-container-server-filter-healthcheck.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-filter-recon.rst b/doc/config-reference/source/tables/swift-container-server-filter-recon.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-filter-recon.rst rename to doc/config-reference/source/tables/swift-container-server-filter-recon.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-filter-xprofile.rst b/doc/config-reference/source/tables/swift-container-server-filter-xprofile.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-filter-xprofile.rst rename to doc/config-reference/source/tables/swift-container-server-filter-xprofile.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-server-pipeline-main.rst b/doc/config-reference/source/tables/swift-container-server-pipeline-main.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-server-pipeline-main.rst rename to doc/config-reference/source/tables/swift-container-server-pipeline-main.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-sync-realms-DEFAULT.rst b/doc/config-reference/source/tables/swift-container-sync-realms-DEFAULT.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-sync-realms-DEFAULT.rst rename to doc/config-reference/source/tables/swift-container-sync-realms-DEFAULT.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-sync-realms-realm1.rst b/doc/config-reference/source/tables/swift-container-sync-realms-realm1.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-sync-realms-realm1.rst rename to doc/config-reference/source/tables/swift-container-sync-realms-realm1.rst diff --git a/doc/config-ref-rst/source/tables/swift-container-sync-realms-realm2.rst b/doc/config-reference/source/tables/swift-container-sync-realms-realm2.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-container-sync-realms-realm2.rst rename to doc/config-reference/source/tables/swift-container-sync-realms-realm2.rst diff --git a/doc/config-ref-rst/source/tables/swift-dispersion-dispersion.rst b/doc/config-reference/source/tables/swift-dispersion-dispersion.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-dispersion-dispersion.rst rename to doc/config-reference/source/tables/swift-dispersion-dispersion.rst diff --git a/doc/config-ref-rst/source/tables/swift-drive-audit-drive-audit.rst b/doc/config-reference/source/tables/swift-drive-audit-drive-audit.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-drive-audit-drive-audit.rst rename to doc/config-reference/source/tables/swift-drive-audit-drive-audit.rst diff --git a/doc/config-ref-rst/source/tables/swift-internal-client-DEFAULT.rst b/doc/config-reference/source/tables/swift-internal-client-DEFAULT.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-internal-client-DEFAULT.rst rename to doc/config-reference/source/tables/swift-internal-client-DEFAULT.rst diff --git a/doc/config-ref-rst/source/tables/swift-internal-client-app-proxy-server.rst b/doc/config-reference/source/tables/swift-internal-client-app-proxy-server.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-internal-client-app-proxy-server.rst rename to doc/config-reference/source/tables/swift-internal-client-app-proxy-server.rst diff --git a/doc/config-ref-rst/source/tables/swift-internal-client-filter-cache.rst b/doc/config-reference/source/tables/swift-internal-client-filter-cache.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-internal-client-filter-cache.rst rename to doc/config-reference/source/tables/swift-internal-client-filter-cache.rst diff --git a/doc/config-ref-rst/source/tables/swift-internal-client-filter-catch_errors.rst b/doc/config-reference/source/tables/swift-internal-client-filter-catch_errors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-internal-client-filter-catch_errors.rst rename to doc/config-reference/source/tables/swift-internal-client-filter-catch_errors.rst diff --git a/doc/config-ref-rst/source/tables/swift-internal-client-filter-proxy-logging.rst b/doc/config-reference/source/tables/swift-internal-client-filter-proxy-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-internal-client-filter-proxy-logging.rst rename to doc/config-reference/source/tables/swift-internal-client-filter-proxy-logging.rst diff --git a/doc/config-ref-rst/source/tables/swift-internal-client-pipeline-main.rst b/doc/config-reference/source/tables/swift-internal-client-pipeline-main.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-internal-client-pipeline-main.rst rename to doc/config-reference/source/tables/swift-internal-client-pipeline-main.rst diff --git a/doc/config-ref-rst/source/tables/swift-memcache-memcache.rst b/doc/config-reference/source/tables/swift-memcache-memcache.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-memcache-memcache.rst rename to doc/config-reference/source/tables/swift-memcache-memcache.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-expirer-DEFAULT.rst b/doc/config-reference/source/tables/swift-object-expirer-DEFAULT.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-expirer-DEFAULT.rst rename to doc/config-reference/source/tables/swift-object-expirer-DEFAULT.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-expirer-app-proxy-server.rst b/doc/config-reference/source/tables/swift-object-expirer-app-proxy-server.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-expirer-app-proxy-server.rst rename to doc/config-reference/source/tables/swift-object-expirer-app-proxy-server.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-expirer-filter-cache.rst b/doc/config-reference/source/tables/swift-object-expirer-filter-cache.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-expirer-filter-cache.rst rename to doc/config-reference/source/tables/swift-object-expirer-filter-cache.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-expirer-filter-catch_errors.rst b/doc/config-reference/source/tables/swift-object-expirer-filter-catch_errors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-expirer-filter-catch_errors.rst rename to doc/config-reference/source/tables/swift-object-expirer-filter-catch_errors.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-expirer-filter-proxy-logging.rst b/doc/config-reference/source/tables/swift-object-expirer-filter-proxy-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-expirer-filter-proxy-logging.rst rename to doc/config-reference/source/tables/swift-object-expirer-filter-proxy-logging.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-expirer-object-expirer.rst b/doc/config-reference/source/tables/swift-object-expirer-object-expirer.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-expirer-object-expirer.rst rename to doc/config-reference/source/tables/swift-object-expirer-object-expirer.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-expirer-pipeline-main.rst b/doc/config-reference/source/tables/swift-object-expirer-pipeline-main.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-expirer-pipeline-main.rst rename to doc/config-reference/source/tables/swift-object-expirer-pipeline-main.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-DEFAULT.rst b/doc/config-reference/source/tables/swift-object-server-DEFAULT.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-DEFAULT.rst rename to doc/config-reference/source/tables/swift-object-server-DEFAULT.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-app-object-server.rst b/doc/config-reference/source/tables/swift-object-server-app-object-server.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-app-object-server.rst rename to doc/config-reference/source/tables/swift-object-server-app-object-server.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-filter-healthcheck.rst b/doc/config-reference/source/tables/swift-object-server-filter-healthcheck.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-filter-healthcheck.rst rename to doc/config-reference/source/tables/swift-object-server-filter-healthcheck.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-filter-recon.rst b/doc/config-reference/source/tables/swift-object-server-filter-recon.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-filter-recon.rst rename to doc/config-reference/source/tables/swift-object-server-filter-recon.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-filter-xprofile.rst b/doc/config-reference/source/tables/swift-object-server-filter-xprofile.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-filter-xprofile.rst rename to doc/config-reference/source/tables/swift-object-server-filter-xprofile.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-object-auditor.rst b/doc/config-reference/source/tables/swift-object-server-object-auditor.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-object-auditor.rst rename to doc/config-reference/source/tables/swift-object-server-object-auditor.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-object-reconstructor.rst b/doc/config-reference/source/tables/swift-object-server-object-reconstructor.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-object-reconstructor.rst rename to doc/config-reference/source/tables/swift-object-server-object-reconstructor.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-object-replicator.rst b/doc/config-reference/source/tables/swift-object-server-object-replicator.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-object-replicator.rst rename to doc/config-reference/source/tables/swift-object-server-object-replicator.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-object-updater.rst b/doc/config-reference/source/tables/swift-object-server-object-updater.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-object-updater.rst rename to doc/config-reference/source/tables/swift-object-server-object-updater.rst diff --git a/doc/config-ref-rst/source/tables/swift-object-server-pipeline-main.rst b/doc/config-reference/source/tables/swift-object-server-pipeline-main.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-object-server-pipeline-main.rst rename to doc/config-reference/source/tables/swift-object-server-pipeline-main.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-DEFAULT.rst b/doc/config-reference/source/tables/swift-proxy-server-DEFAULT.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-DEFAULT.rst rename to doc/config-reference/source/tables/swift-proxy-server-DEFAULT.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-app-proxy-server.rst b/doc/config-reference/source/tables/swift-proxy-server-app-proxy-server.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-app-proxy-server.rst rename to doc/config-reference/source/tables/swift-proxy-server-app-proxy-server.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-account-quotas.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-account-quotas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-account-quotas.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-account-quotas.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-authtoken.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-authtoken.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-authtoken.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-authtoken.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-bulk.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-bulk.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-bulk.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-bulk.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-cache.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-cache.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-cache.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-cache.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-catch_errors.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-catch_errors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-catch_errors.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-catch_errors.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-cname_lookup.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-cname_lookup.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-cname_lookup.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-cname_lookup.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-container-quotas.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-container-quotas.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-container-quotas.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-container-quotas.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-container_sync.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-container_sync.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-container_sync.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-container_sync.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-dlo.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-dlo.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-dlo.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-dlo.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-domain_remap.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-domain_remap.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-domain_remap.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-domain_remap.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-formpost.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-formpost.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-formpost.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-formpost.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-gatekeeper.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-gatekeeper.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-gatekeeper.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-gatekeeper.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-healthcheck.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-healthcheck.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-healthcheck.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-healthcheck.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-keystoneauth.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-keystoneauth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-keystoneauth.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-keystoneauth.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-list-endpoints.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-list-endpoints.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-list-endpoints.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-list-endpoints.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-name_check.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-name_check.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-name_check.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-name_check.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-proxy-logging.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-proxy-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-proxy-logging.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-proxy-logging.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-ratelimit.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-ratelimit.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-ratelimit.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-ratelimit.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-slo.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-slo.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-slo.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-slo.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-staticweb.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-staticweb.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-staticweb.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-staticweb.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-tempauth.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-tempauth.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-tempauth.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-tempauth.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-tempurl.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-tempurl.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-tempurl.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-tempurl.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-versioned_writes.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-versioned_writes.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-versioned_writes.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-versioned_writes.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-filter-xprofile.rst b/doc/config-reference/source/tables/swift-proxy-server-filter-xprofile.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-filter-xprofile.rst rename to doc/config-reference/source/tables/swift-proxy-server-filter-xprofile.rst diff --git a/doc/config-ref-rst/source/tables/swift-proxy-server-pipeline-main.rst b/doc/config-reference/source/tables/swift-proxy-server-pipeline-main.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-proxy-server-pipeline-main.rst rename to doc/config-reference/source/tables/swift-proxy-server-pipeline-main.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-account.rst b/doc/config-reference/source/tables/swift-rsyncd-account.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-account.rst rename to doc/config-reference/source/tables/swift-rsyncd-account.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-container.rst b/doc/config-reference/source/tables/swift-rsyncd-container.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-container.rst rename to doc/config-reference/source/tables/swift-rsyncd-container.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-object.rst b/doc/config-reference/source/tables/swift-rsyncd-object.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-object.rst rename to doc/config-reference/source/tables/swift-rsyncd-object.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-object6010.rst b/doc/config-reference/source/tables/swift-rsyncd-object6010.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-object6010.rst rename to doc/config-reference/source/tables/swift-rsyncd-object6010.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-object6020.rst b/doc/config-reference/source/tables/swift-rsyncd-object6020.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-object6020.rst rename to doc/config-reference/source/tables/swift-rsyncd-object6020.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-object6030.rst b/doc/config-reference/source/tables/swift-rsyncd-object6030.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-object6030.rst rename to doc/config-reference/source/tables/swift-rsyncd-object6030.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-object6040.rst b/doc/config-reference/source/tables/swift-rsyncd-object6040.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-object6040.rst rename to doc/config-reference/source/tables/swift-rsyncd-object6040.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-object_sda.rst b/doc/config-reference/source/tables/swift-rsyncd-object_sda.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-object_sda.rst rename to doc/config-reference/source/tables/swift-rsyncd-object_sda.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-object_sdb.rst b/doc/config-reference/source/tables/swift-rsyncd-object_sdb.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-object_sdb.rst rename to doc/config-reference/source/tables/swift-rsyncd-object_sdb.rst diff --git a/doc/config-ref-rst/source/tables/swift-rsyncd-object_sdc.rst b/doc/config-reference/source/tables/swift-rsyncd-object_sdc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-rsyncd-object_sdc.rst rename to doc/config-reference/source/tables/swift-rsyncd-object_sdc.rst diff --git a/doc/config-ref-rst/source/tables/swift-swift-storage-policy-0.rst b/doc/config-reference/source/tables/swift-swift-storage-policy-0.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-swift-storage-policy-0.rst rename to doc/config-reference/source/tables/swift-swift-storage-policy-0.rst diff --git a/doc/config-ref-rst/source/tables/swift-swift-storage-policy-1.rst b/doc/config-reference/source/tables/swift-swift-storage-policy-1.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-swift-storage-policy-1.rst rename to doc/config-reference/source/tables/swift-swift-storage-policy-1.rst diff --git a/doc/config-ref-rst/source/tables/swift-swift-storage-policy-2.rst b/doc/config-reference/source/tables/swift-swift-storage-policy-2.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-swift-storage-policy-2.rst rename to doc/config-reference/source/tables/swift-swift-storage-policy-2.rst diff --git a/doc/config-ref-rst/source/tables/swift-swift-swift-constraints.rst b/doc/config-reference/source/tables/swift-swift-swift-constraints.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-swift-swift-constraints.rst rename to doc/config-reference/source/tables/swift-swift-swift-constraints.rst diff --git a/doc/config-ref-rst/source/tables/swift-swift-swift-hash.rst b/doc/config-reference/source/tables/swift-swift-swift-hash.rst similarity index 100% rename from doc/config-ref-rst/source/tables/swift-swift-swift-hash.rst rename to doc/config-reference/source/tables/swift-swift-swift-hash.rst diff --git a/doc/config-ref-rst/source/tables/trove-amqp.rst b/doc/config-reference/source/tables/trove-amqp.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-amqp.rst rename to doc/config-reference/source/tables/trove-amqp.rst diff --git a/doc/config-ref-rst/source/tables/trove-api.rst b/doc/config-reference/source/tables/trove-api.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-api.rst rename to doc/config-reference/source/tables/trove-api.rst diff --git a/doc/config-ref-rst/source/tables/trove-auth_token.rst b/doc/config-reference/source/tables/trove-auth_token.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-auth_token.rst rename to doc/config-reference/source/tables/trove-auth_token.rst diff --git a/doc/config-ref-rst/source/tables/trove-backup.rst b/doc/config-reference/source/tables/trove-backup.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-backup.rst rename to doc/config-reference/source/tables/trove-backup.rst diff --git a/doc/config-ref-rst/source/tables/trove-clients.rst b/doc/config-reference/source/tables/trove-clients.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-clients.rst rename to doc/config-reference/source/tables/trove-clients.rst diff --git a/doc/config-ref-rst/source/tables/trove-cluster.rst b/doc/config-reference/source/tables/trove-cluster.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-cluster.rst rename to doc/config-reference/source/tables/trove-cluster.rst diff --git a/doc/config-ref-rst/source/tables/trove-common.rst b/doc/config-reference/source/tables/trove-common.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-common.rst rename to doc/config-reference/source/tables/trove-common.rst diff --git a/doc/config-ref-rst/source/tables/trove-compute.rst b/doc/config-reference/source/tables/trove-compute.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-compute.rst rename to doc/config-reference/source/tables/trove-compute.rst diff --git a/doc/config-ref-rst/source/tables/trove-cors.rst b/doc/config-reference/source/tables/trove-cors.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-cors.rst rename to doc/config-reference/source/tables/trove-cors.rst diff --git a/doc/config-ref-rst/source/tables/trove-database.rst b/doc/config-reference/source/tables/trove-database.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-database.rst rename to doc/config-reference/source/tables/trove-database.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_cassandra.rst b/doc/config-reference/source/tables/trove-db_cassandra.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_cassandra.rst rename to doc/config-reference/source/tables/trove-db_cassandra.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_couchbase.rst b/doc/config-reference/source/tables/trove-db_couchbase.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_couchbase.rst rename to doc/config-reference/source/tables/trove-db_couchbase.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_db2.rst b/doc/config-reference/source/tables/trove-db_db2.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_db2.rst rename to doc/config-reference/source/tables/trove-db_db2.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_mariadb.rst b/doc/config-reference/source/tables/trove-db_mariadb.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_mariadb.rst rename to doc/config-reference/source/tables/trove-db_mariadb.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_mongodb.rst b/doc/config-reference/source/tables/trove-db_mongodb.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_mongodb.rst rename to doc/config-reference/source/tables/trove-db_mongodb.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_mysql.rst b/doc/config-reference/source/tables/trove-db_mysql.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_mysql.rst rename to doc/config-reference/source/tables/trove-db_mysql.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_percona.rst b/doc/config-reference/source/tables/trove-db_percona.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_percona.rst rename to doc/config-reference/source/tables/trove-db_percona.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_postgresql.rst b/doc/config-reference/source/tables/trove-db_postgresql.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_postgresql.rst rename to doc/config-reference/source/tables/trove-db_postgresql.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_pxc.rst b/doc/config-reference/source/tables/trove-db_pxc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_pxc.rst rename to doc/config-reference/source/tables/trove-db_pxc.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_redis.rst b/doc/config-reference/source/tables/trove-db_redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_redis.rst rename to doc/config-reference/source/tables/trove-db_redis.rst diff --git a/doc/config-ref-rst/source/tables/trove-db_vertica.rst b/doc/config-reference/source/tables/trove-db_vertica.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-db_vertica.rst rename to doc/config-reference/source/tables/trove-db_vertica.rst diff --git a/doc/config-ref-rst/source/tables/trove-debug.rst b/doc/config-reference/source/tables/trove-debug.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-debug.rst rename to doc/config-reference/source/tables/trove-debug.rst diff --git a/doc/config-ref-rst/source/tables/trove-dns.rst b/doc/config-reference/source/tables/trove-dns.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-dns.rst rename to doc/config-reference/source/tables/trove-dns.rst diff --git a/doc/config-ref-rst/source/tables/trove-guestagent.rst b/doc/config-reference/source/tables/trove-guestagent.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-guestagent.rst rename to doc/config-reference/source/tables/trove-guestagent.rst diff --git a/doc/config-ref-rst/source/tables/trove-heat.rst b/doc/config-reference/source/tables/trove-heat.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-heat.rst rename to doc/config-reference/source/tables/trove-heat.rst diff --git a/doc/config-ref-rst/source/tables/trove-logging.rst b/doc/config-reference/source/tables/trove-logging.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-logging.rst rename to doc/config-reference/source/tables/trove-logging.rst diff --git a/doc/config-ref-rst/source/tables/trove-network.rst b/doc/config-reference/source/tables/trove-network.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-network.rst rename to doc/config-reference/source/tables/trove-network.rst diff --git a/doc/config-ref-rst/source/tables/trove-nova.rst b/doc/config-reference/source/tables/trove-nova.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-nova.rst rename to doc/config-reference/source/tables/trove-nova.rst diff --git a/doc/config-ref-rst/source/tables/trove-qpid.rst b/doc/config-reference/source/tables/trove-qpid.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-qpid.rst rename to doc/config-reference/source/tables/trove-qpid.rst diff --git a/doc/config-ref-rst/source/tables/trove-quota.rst b/doc/config-reference/source/tables/trove-quota.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-quota.rst rename to doc/config-reference/source/tables/trove-quota.rst diff --git a/doc/config-ref-rst/source/tables/trove-rabbitmq.rst b/doc/config-reference/source/tables/trove-rabbitmq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-rabbitmq.rst rename to doc/config-reference/source/tables/trove-rabbitmq.rst diff --git a/doc/config-ref-rst/source/tables/trove-redis.rst b/doc/config-reference/source/tables/trove-redis.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-redis.rst rename to doc/config-reference/source/tables/trove-redis.rst diff --git a/doc/config-ref-rst/source/tables/trove-rpc.rst b/doc/config-reference/source/tables/trove-rpc.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-rpc.rst rename to doc/config-reference/source/tables/trove-rpc.rst diff --git a/doc/config-ref-rst/source/tables/trove-swift.rst b/doc/config-reference/source/tables/trove-swift.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-swift.rst rename to doc/config-reference/source/tables/trove-swift.rst diff --git a/doc/config-ref-rst/source/tables/trove-taskmanager.rst b/doc/config-reference/source/tables/trove-taskmanager.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-taskmanager.rst rename to doc/config-reference/source/tables/trove-taskmanager.rst diff --git a/doc/config-ref-rst/source/tables/trove-upgrades.rst b/doc/config-reference/source/tables/trove-upgrades.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-upgrades.rst rename to doc/config-reference/source/tables/trove-upgrades.rst diff --git a/doc/config-ref-rst/source/tables/trove-volume.rst b/doc/config-reference/source/tables/trove-volume.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-volume.rst rename to doc/config-reference/source/tables/trove-volume.rst diff --git a/doc/config-ref-rst/source/tables/trove-zeromq.rst b/doc/config-reference/source/tables/trove-zeromq.rst similarity index 100% rename from doc/config-ref-rst/source/tables/trove-zeromq.rst rename to doc/config-reference/source/tables/trove-zeromq.rst diff --git a/doc/config-ref-rst/source/telemetry.rst b/doc/config-reference/source/telemetry.rst similarity index 100% rename from doc/config-ref-rst/source/telemetry.rst rename to doc/config-reference/source/telemetry.rst diff --git a/doc/config-ref-rst/source/telemetry/alarming_service_config_opts.rst b/doc/config-reference/source/telemetry/alarming_service_config_opts.rst similarity index 100% rename from doc/config-ref-rst/source/telemetry/alarming_service_config_opts.rst rename to doc/config-reference/source/telemetry/alarming_service_config_opts.rst diff --git a/doc/config-ref-rst/source/telemetry/sample-configuration-files.rst b/doc/config-reference/source/telemetry/sample-configuration-files.rst similarity index 100% rename from doc/config-ref-rst/source/telemetry/sample-configuration-files.rst rename to doc/config-reference/source/telemetry/sample-configuration-files.rst diff --git a/doc/config-ref-rst/source/telemetry/telemetry_service_config_opts.rst b/doc/config-reference/source/telemetry/telemetry_service_config_opts.rst similarity index 100% rename from doc/config-ref-rst/source/telemetry/telemetry_service_config_opts.rst rename to doc/config-reference/source/telemetry/telemetry_service_config_opts.rst diff --git a/doc/config-reference/table_default-ports-peripheral-services.xml b/doc/config-reference/table_default-ports-peripheral-services.xml deleted file mode 100644 index 9d1a3f0148..0000000000 --- a/doc/config-reference/table_default-ports-peripheral-services.xml +++ /dev/null @@ -1,61 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Default ports that secondary services related to - OpenStack components use
ServiceDefault portUsed by
HTTP80OpenStack dashboard (Horizon) - when it is not configured to use secure - access.
HTTP alternate8080OpenStack Object Storage - (swift) service.
HTTPS443Any OpenStack service that is enabled for SSL, - especially secure-access dashboard.
rsync873OpenStack Object Storage. Required.
iSCSI target3260OpenStack Block Storage. Required.
MySQL database service3306Most OpenStack components.
Message Broker (AMQP traffic)5672OpenStack Block Storage, Networking, - Orchestration, and Compute.
-
diff --git a/doc/config-reference/table_default-ports-primary-services.xml b/doc/config-reference/table_default-ports-primary-services.xml deleted file mode 100644 index 2d8562613d..0000000000 --- a/doc/config-reference/table_default-ports-primary-services.xml +++ /dev/null @@ -1,123 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Default ports that OpenStack components use
OpenStack serviceDefault portsPort type
Block Storage (cinder)8776publicurl and adminurl
Compute (nova) endpoints8774publicurl and adminurl
Compute API (nova-api)8773, 8775 -
Compute ports for access to virtual machine - consoles5900-5999 -
Compute VNC proxy for browsers ( - openstack-nova-novncproxy)6080 -
Compute VNC proxy for traditional VNC clients - (openstack-nova-xvpvncproxy)6081 -
Proxy port for HTML5 console used by Compute - service6082 -
Data processing service (sahara) - endpoint8386publicurl and adminurl
Identity service (keystone) - administrative endpoint35357adminurl
Identity service public endpoint5000publicurl
Image service (glance) API9292publicurl and adminurl
Image service registry9191 -
Networking (neutron)9696publicurl and adminurl
Object Storage (swift)6000, 6001, 6002 -
Orchestration (heat) - endpoint8004publicurl and adminurl
Orchestration AWS CloudFormation-compatible API - (openstack-heat-api-cfn)8000 -
Orchestration AWS CloudWatch-compatible API - (openstack-heat-api-cloudwatch)8003 -
Telemetry (ceilometer)8777publicurl and adminurl
Application Catalog (murano)8082
-
diff --git a/doc/config-reference/telemetry/section_telemetry-alarming-service-config-opts.xml b/doc/config-reference/telemetry/section_telemetry-alarming-service-config-opts.xml deleted file mode 100644 index ab8b41009c..0000000000 --- a/doc/config-reference/telemetry/section_telemetry-alarming-service-config-opts.xml +++ /dev/null @@ -1,24 +0,0 @@ -
- Telemetry Alarming service configuration options - The following tables provide a comprehensive list of - the Telemetry Alarming service configuration options. - - - - - - - - - - - - - - - -
diff --git a/doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml b/doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml deleted file mode 100644 index a77805b259..0000000000 --- a/doc/config-reference/telemetry/section_telemetry-sample-configuration-files.xml +++ /dev/null @@ -1,55 +0,0 @@ -
- Telemetry sample configuration files - All the files in this section can be found in the /etc/ceilometer/ directory. -
- ceilometer.conf - The configuration for the Telemetry services and agents is - found in the ceilometer.conf file. - This file must be modified after installation. - - - - -
-
- event_definitions.yaml - The event_definitions.yaml file defines how - events received from other OpenStack components should be translated - to Telemetry events. - This file provides a standard set of events and corresponding traits - that may be of interest. This file can be modified to add and drop traits - that operators may find useful. - -
-
- pipeline.yaml - Pipelines describe a coupling between sources of samples and the - corresponding sinks for transformation and publication of the - data. They are defined in the pipeline.yaml - file. - This file can be modified to adjust polling intervals and the - samples generated by the Telemetry module - -
-
- event_pipeline.yaml - Event pipelines describe a coupling between notification event_types - and the corresponding sinks for publication of the event data. They are - defined in the event_pipeline.yaml file. - This file can be modified to adjust which notifications to capture and the - and where to publish the events. - -
-
- policy.json - The policy.json file defines additional - access controls that apply to the Telemetry service. - -
- -
diff --git a/doc/config-reference/telemetry/section_telemetry-service-config-opts.xml b/doc/config-reference/telemetry/section_telemetry-service-config-opts.xml deleted file mode 100644 index 06f5412453..0000000000 --- a/doc/config-reference/telemetry/section_telemetry-service-config-opts.xml +++ /dev/null @@ -1,41 +0,0 @@ -
- Telemetry configuration options - The following tables provide a comprehensive list of the Telemetry - configuration options. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/doc/pom.xml b/doc/pom.xml index 4b79dd87d9..0a57688bb5 100644 --- a/doc/pom.xml +++ b/doc/pom.xml @@ -11,7 +11,6 @@ cli-reference - config-reference glossary diff --git a/tools/build-all-rst.sh b/tools/build-all-rst.sh index 5aa0b48895..338fcc42b4 100755 --- a/tools/build-all-rst.sh +++ b/tools/build-all-rst.sh @@ -22,7 +22,7 @@ done # Draft guides # This includes guides that we publish from stable branches # as versioned like the networking-guide. -for guide in config-ref-rst networking-guide arch-design-draft; do +for guide in networking-guide arch-design-draft config-reference; do tools/build-rst.sh doc/$guide --build build \ --target "draft/$guide" $LINKCHECK done diff --git a/www/draft/draft-index.html b/www/draft/draft-index.html index 89d1fa6bd3..53a2b81ac1 100644 --- a/www/draft/draft-index.html +++ b/www/draft/draft-index.html @@ -76,9 +76,7 @@