8b2016b5d0
Include the new tables in the documentation. Fix the cinder-prophetstor_dpl.xml filename and the xiv category. Manually fix those files for niceness test: - glance-logging.xml (lines 34, 38), fixed in oslo-incubator - trove-logging.xml (lines 26, 30), fixed in oslo-incubator - neutron-openvswitch_agent.xml (line 45), to be fixed in neutron Closes-Bug: #1340858 Closes-Bug: #1344231 Closes-Bug: #1345956 Closes-Bug: #1346711 Closes-Bug: #1347978 Partial-Bug: #1348329 Closes-Bug: #1352074 Partial-Bug: #1353417 Closes-Bug: #1354622 Closes-Bug: #1339754 Closes-Bug: #1358598 Closes-Bug: #1358259 Closes-Bug: #1357865 Partial-Bug: #1357457 Closes-Bug: #1357421 Change-Id: Id2da7d7762ca954bd552dbf89a9ff28b144efb68
151 lines
5.5 KiB
XML
151 lines
5.5 KiB
XML
<?xml version='1.0' encoding='UTF-8'?>
|
|
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
|
|
<!-- Warning: Do not edit this file. It is automatically
|
|
generated and your changes will be overwritten.
|
|
The tool to do so lives in openstack-doc-tools repository. -->
|
|
<table rules="all" xml:id="config_table_nova_volumes">
|
|
<caption>Description of configuration options for volumes</caption>
|
|
<col width="50%"/>
|
|
<col width="50%"/>
|
|
<thead>
|
|
<tr>
|
|
<th>Configuration option = Default value</th>
|
|
<th>Description</th>
|
|
</tr>
|
|
</thead>
|
|
<tbody>
|
|
<tr>
|
|
<th colspan="2">[DEFAULT]</th>
|
|
</tr>
|
|
<tr>
|
|
<td>block_device_allocate_retries = 60</td>
|
|
<td>(IntOpt) Number of times to retry block device allocation on failures</td>
|
|
</tr>
|
|
<tr>
|
|
<td>block_device_allocate_retries_interval = 3</td>
|
|
<td>(IntOpt) Waiting time interval (seconds) between block device allocation retries on failures</td>
|
|
</tr>
|
|
<tr>
|
|
<td>cinder_api_insecure = False</td>
|
|
<td>(BoolOpt) Allow to perform insecure SSL requests to cinder</td>
|
|
</tr>
|
|
<tr>
|
|
<td>cinder_ca_certificates_file = None</td>
|
|
<td>(StrOpt) Location of ca certificates file to use for cinder client requests.</td>
|
|
</tr>
|
|
<tr>
|
|
<td>cinder_catalog_info = volume:cinder:publicURL</td>
|
|
<td>(StrOpt) Info to match when looking for cinder in the service catalog. Format is: separated values of the form: <service_type>:<service_name>:<endpoint_type></td>
|
|
</tr>
|
|
<tr>
|
|
<td>cinder_cross_az_attach = True</td>
|
|
<td>(BoolOpt) Allow attach between instance and volume in different availability zones.</td>
|
|
</tr>
|
|
<tr>
|
|
<td>cinder_endpoint_template = None</td>
|
|
<td>(StrOpt) Override service catalog lookup with template for cinder endpoint e.g. http://localhost:8776/v1/%(project_id)s</td>
|
|
</tr>
|
|
<tr>
|
|
<td>cinder_http_retries = 3</td>
|
|
<td>(IntOpt) Number of cinderclient retries on failed http calls</td>
|
|
</tr>
|
|
<tr>
|
|
<td>cinder_http_timeout = None</td>
|
|
<td>(IntOpt) HTTP inactivity timeout (in seconds)</td>
|
|
</tr>
|
|
<tr>
|
|
<td>os_region_name = None</td>
|
|
<td>(StrOpt) Region name of this node</td>
|
|
</tr>
|
|
<tr>
|
|
<td>volume_api_class = nova.volume.cinder.API</td>
|
|
<td>(StrOpt) The full class name of the volume API class to use</td>
|
|
</tr>
|
|
<tr>
|
|
<td>volume_usage_poll_interval = 0</td>
|
|
<td>(IntOpt) Interval in seconds for gathering volume usages</td>
|
|
</tr>
|
|
<tr>
|
|
<th colspan="2">[baremetal]</th>
|
|
</tr>
|
|
<tr>
|
|
<td>iscsi_iqn_prefix = iqn.2010-10.org.openstack.baremetal</td>
|
|
<td>(StrOpt) The iSCSI IQN prefix used in baremetal volume connections.</td>
|
|
</tr>
|
|
<tr>
|
|
<td>volume_driver = nova.virt.baremetal.volume_driver.LibvirtVolumeDriver</td>
|
|
<td>(StrOpt) Baremetal volume driver.</td>
|
|
</tr>
|
|
<tr>
|
|
<th colspan="2">[hyperv]</th>
|
|
</tr>
|
|
<tr>
|
|
<td>force_volumeutils_v1 = False</td>
|
|
<td>(BoolOpt) Force V1 volume utility class</td>
|
|
</tr>
|
|
<tr>
|
|
<td>volume_attach_retry_count = 10</td>
|
|
<td>(IntOpt) The number of times to retry to attach a volume</td>
|
|
</tr>
|
|
<tr>
|
|
<td>volume_attach_retry_interval = 5</td>
|
|
<td>(IntOpt) Interval between volume attachment attempts, in seconds</td>
|
|
</tr>
|
|
<tr>
|
|
<th colspan="2">[libvirt]</th>
|
|
</tr>
|
|
<tr>
|
|
<td>glusterfs_mount_point_base = $state_path/mnt</td>
|
|
<td>(StrOpt) Directory where the glusterfs volume is mounted on the compute node</td>
|
|
</tr>
|
|
<tr>
|
|
<td>nfs_mount_options = None</td>
|
|
<td>(StrOpt) Mount options passedf to the NFS client. See section of the nfs man page for details</td>
|
|
</tr>
|
|
<tr>
|
|
<td>nfs_mount_point_base = $state_path/mnt</td>
|
|
<td>(StrOpt) Directory where the NFS volume is mounted on the compute node</td>
|
|
</tr>
|
|
<tr>
|
|
<td>num_aoe_discover_tries = 3</td>
|
|
<td>(IntOpt) Number of times to rediscover AoE target to find volume</td>
|
|
</tr>
|
|
<tr>
|
|
<td>num_iscsi_scan_tries = 5</td>
|
|
<td>(IntOpt) Number of times to rescan iSCSI target to find volume</td>
|
|
</tr>
|
|
<tr>
|
|
<td>num_iser_scan_tries = 5</td>
|
|
<td>(IntOpt) Number of times to rescan iSER target to find volume</td>
|
|
</tr>
|
|
<tr>
|
|
<td>qemu_allowed_storage_drivers = </td>
|
|
<td>(ListOpt) Protocols listed here will be accessed directly from QEMU. Currently supported protocols: [gluster]</td>
|
|
</tr>
|
|
<tr>
|
|
<td>rbd_secret_uuid = None</td>
|
|
<td>(StrOpt) The libvirt UUID of the secret for the rbd_uservolumes</td>
|
|
</tr>
|
|
<tr>
|
|
<td>rbd_user = None</td>
|
|
<td>(StrOpt) The RADOS client name for accessing rbd volumes</td>
|
|
</tr>
|
|
<tr>
|
|
<td>scality_sofs_config = None</td>
|
|
<td>(StrOpt) Path or URL to Scality SOFS configuration file</td>
|
|
</tr>
|
|
<tr>
|
|
<td>scality_sofs_mount_point = $state_path/scality</td>
|
|
<td>(StrOpt) Base dir where Scality SOFS shall be mounted</td>
|
|
</tr>
|
|
<tr>
|
|
<th colspan="2">[xenserver]</th>
|
|
</tr>
|
|
<tr>
|
|
<td>block_device_creation_timeout = 10</td>
|
|
<td>(IntOpt) Time to wait for a block device to be created</td>
|
|
</tr>
|
|
</tbody>
|
|
</table>
|
|
</para>
|