Update nova options for icehouse

Change-Id: I45eb0141576d5f5d5d65814d0da550aac4a3fe93
This commit is contained in:
Shaun McCance 2014-01-08 16:38:52 -05:00 committed by annegentle
parent 9e658c052a
commit ac4eb86704
30 changed files with 364 additions and 497 deletions

View File

@ -57,11 +57,11 @@
</tr>
<tr>
<td>osapi_compute_listen = 0.0.0.0</td>
<td>(StrOpt) IP address for OpenStack API to listen</td>
<td>(StrOpt) The IP address on which the OpenStack API will listen.</td>
</tr>
<tr>
<td>osapi_compute_listen_port = 8774</td>
<td>(IntOpt) list port for osapi compute</td>
<td>(IntOpt) The port on which the OpenStack API will listen.</td>
</tr>
<tr>
<td>osapi_compute_workers = None</td>

View File

@ -17,7 +17,7 @@
<tbody>
<tr>
<td>api_rate_limit = False</td>
<td>(BoolOpt) whether to use per-user rate limiting for the api.</td>
<td>(BoolOpt) whether to use per-user rate limiting for the api. This option is only used by v2 api. rate limiting is removed from v3 api.</td>
</tr>
<tr>
<td>auth_strategy = noauth</td>

View File

@ -40,7 +40,7 @@
<td>(ListOpt) a list of additional capabilities corresponding to flavor_extra_specs for this compute host to advertise. Valid entries are name=value, pairs For example, "key1:val1, key2:val2"</td>
</tr>
<tr>
<td>ipmi_power_retry = 5</td>
<td>ipmi_power_retry = 10</td>
<td>(IntOpt) maximal number of retries for IPMI operations</td>
</tr>
<tr>
@ -52,7 +52,7 @@
<td>(StrOpt) Baremetal power management method</td>
</tr>
<tr>
<td>pxe_append_params = None</td>
<td>pxe_append_params = nofb nomodeset vga=normal</td>
<td>(StrOpt) additional append parameters for baremetal PXE boot</td>
</tr>
<tr>
@ -91,10 +91,22 @@
<td>tftp_root = /tftpboot</td>
<td>(StrOpt) Baremetal compute node's tftp root path</td>
</tr>
<tr>
<td>use_file_injection = True</td>
<td>(BoolOpt) If True, enable file injection for network info, files and admin password</td>
</tr>
<tr>
<td>use_unsafe_iscsi = False</td>
<td>(BoolOpt) Do not set this out of dev/test environments. If a node does not have a fixed PXE IP address, volumes are exported with globally opened ACL</td>
</tr>
<tr>
<td>vif_driver = nova.virt.xenapi.vif.XenAPIBridgeDriver</td>
<td>(StrOpt) The XenAPI VIF driver using XenServer Network APIs.</td>
</tr>
<tr>
<td>vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver</td>
<td>(StrOpt) The libvirt VIF driver to configure the VIFs.</td>
</tr>
<tr>
<td>vif_driver = nova.virt.baremetal.vif_driver.BareMetalVIFDriver</td>
<td>(StrOpt) Baremetal VIF driver.</td>

View File

@ -24,7 +24,7 @@
<td>(ListOpt) Key/Multi-value list with the capabilities of the cell</td>
</tr>
<tr>
<td>cell_type = None</td>
<td>cell_type = compute</td>
<td>(StrOpt) Type of cell: api or compute</td>
</tr>
<tr>
@ -79,6 +79,10 @@
<td>name = nova</td>
<td>(StrOpt) name of this cell</td>
</tr>
<tr>
<td>offset_weight_multiplier = 1.0</td>
<td>(FloatOpt) Multiplier used to weigh offset weigher.</td>
</tr>
<tr>
<td>reserve_percent = 10.0</td>
<td>(FloatOpt) Percentage of cell capacity to hold in reserve. Affects both memory and disk utilization</td>

View File

@ -15,6 +15,10 @@
</tr>
</thead>
<tbody>
<tr>
<td>api_audit_map = api_audit_map.conf</td>
<td>(StrOpt) File containing mapping for api paths and service endpoints</td>
</tr>
<tr>
<td>bindir = /usr/local/bin</td>
<td>(StrOpt) Directory where nova binaries are installed</td>
@ -43,6 +47,10 @@
<td>host = 127.0.0.1</td>
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>host = None</td>
<td>(StrOpt) Debug host (ip or name) to connect. Note that using the remote debug option changes how Nova uses the eventlet library to support async IO. This could result in failures that do not occur under normal operation. Use at your own risk.</td>
</tr>
<tr>
<td>lock_path = None</td>
<td>(StrOpt) Directory to use for lock files.</td>
@ -52,9 +60,13 @@
<td>(ListOpt) Memcached servers or None for in process cache.</td>
</tr>
<tr>
<td>my_ip = 192.168.122.99</td>
<td>my_ip = 192.168.0.8</td>
<td>(StrOpt) ip address of this host</td>
</tr>
<tr>
<td>namespace = openstack</td>
<td>(StrOpt) namespace prefix for generated id</td>
</tr>
<tr>
<td>notification_driver = []</td>
<td>(MultiStrOpt) Driver or drivers to handle sending notifications</td>
@ -72,7 +84,7 @@
<td>(StrOpt) If set, send compute.instance.update notifications on instance state changes. Valid values are None for no notifications, "vm_state" for notifications on VM state changes, or "vm_and_task_state" for notifications on VM and task state changes.</td>
</tr>
<tr>
<td>pybasedir = /home/docwork/openstack-manuals-new/tools/autogenerate-config-docs/nova</td>
<td>pybasedir = /usr/lib/python2.7/site-packages</td>
<td>(StrOpt) Directory where the nova python module is installed</td>
</tr>
<tr>

View File

@ -16,16 +16,8 @@
</thead>
<tbody>
<tr>
<td>base_dir_name = _base</td>
<td>(StrOpt) Where cached images are stored under $instances_path.This is NOT the full path - just a folder name.For per-compute-host cached images, set to _base_$my_ip</td>
</tr>
<tr>
<td>checksum_interval_seconds = 3600</td>
<td>(IntOpt) How frequently to checksum base images</td>
</tr>
<tr>
<td>compute_api_class = nova.compute.api.API</td>
<td>(StrOpt) The full class name of the compute API class to use (deprecated)</td>
<td>compute_available_monitors = ['nova.compute.monitors.all_monitors']</td>
<td>(MultiStrOpt) Monitor classes available to the compute which may be specified more than once.</td>
</tr>
<tr>
<td>compute_driver = None</td>
@ -35,6 +27,10 @@
<td>compute_manager = nova.compute.manager.ComputeManager</td>
<td>(StrOpt) full class name for the Manager for compute</td>
</tr>
<tr>
<td>compute_monitors = </td>
<td>(ListOpt) A list of monitors that can be used for getting compute metrics.</td>
</tr>
<tr>
<td>compute_stats_class = nova.compute.stats.Stats</td>
<td>(StrOpt) Class that will manage stats for the local compute host</td>
@ -61,7 +57,7 @@
</tr>
<tr>
<td>enable_instance_password = True</td>
<td>(BoolOpt) Allows use of instance password during server creation</td>
<td>(BoolOpt) Enables returning of the instance password by the relevant server API calls such as create, rebuild or rescue, If the hypervisor does not support password injection then the password returned will not be correct</td>
</tr>
<tr>
<td>heal_instance_info_cache_interval = 60</td>
@ -76,8 +72,8 @@
<td>(IntOpt) Number of seconds to wait between runs of the image cache manager</td>
</tr>
<tr>
<td>image_info_filename_pattern = $instances_path/$base_dir_name/%(image)s.info</td>
<td>(StrOpt) Allows image information files to be stored in non-standard locations</td>
<td>image_cache_subdirectory_name = _base</td>
<td>(StrOpt) Where cached images are stored under $instances_path. This is NOT the full path - just a folder name. For per-compute-host cached images, set to _base_$my_ip</td>
</tr>
<tr>
<td>instance_build_timeout = 0</td>
@ -120,8 +116,8 @@
<td>(BoolOpt) Whether to start guests that were running before the host rebooted</td>
</tr>
<tr>
<td>running_deleted_instance_action = log</td>
<td>(StrOpt) Action to take if a running deleted instance is detected.Valid options are 'noop', 'log' and 'reap'. Set to 'noop' to disable.</td>
<td>running_deleted_instance_action = reap</td>
<td>(StrOpt) Action to take if a running deleted instance is detected.Valid options are 'noop', 'log', 'shutdown', or 'reap'. Set to 'noop' to take no action.</td>
</tr>
<tr>
<td>running_deleted_instance_poll_interval = 1800</td>

View File

@ -20,20 +20,20 @@
<td>(StrOpt) The backend to use for db</td>
</tr>
<tr>
<td>connection_trace = False</td>
<td>(BoolOpt) Add python stack traces to SQL as comment strings</td>
</tr>
<tr>
<td>connection = sqlite:////home/docwork/openstack-manuals-new/tools/autogenerate-config-docs/nova/nova/openstack/common/db/$sqlite_db</td>
<td>connection = sqlite:////usr/lib/python2.7/site-packages/nova/openstack/common/db/$sqlite_db</td>
<td>(StrOpt) The SQLAlchemy connection string used to connect to the database</td>
</tr>
<tr>
<td>connection_debug = 0</td>
<td>(IntOpt) Verbosity of SQL debugging information. 0=None, 100=Everything</td>
</tr>
<tr>
<td>connection_trace = False</td>
<td>(BoolOpt) Add python stack traces to SQL as comment strings</td>
</tr>
<tr>
<td>db_backend = sqlalchemy</td>
<td>(StrOpt) The backend to use for bare-metal database.(This option is deprecated, please use backend instead.)</td>
<td>(StrOpt) The backend to use for bare-metal database</td>
</tr>
<tr>
<td>db_check_interval = 60</td>
@ -47,14 +47,14 @@
<td>idle_timeout = 3600</td>
<td>(IntOpt) timeout before idle sql connections are reaped</td>
</tr>
<tr>
<td>max_pool_size = None</td>
<td>(IntOpt) Maximum number of SQL connections to keep open in a pool</td>
</tr>
<tr>
<td>max_overflow = None</td>
<td>(IntOpt) If set, use this value for max_overflow with sqlalchemy</td>
</tr>
<tr>
<td>max_pool_size = None</td>
<td>(IntOpt) Maximum number of SQL connections to keep open in a pool</td>
</tr>
<tr>
<td>max_retries = 10</td>
<td>(IntOpt) maximum db connection retries during startup. (setting -1 implies an infinite retry count)</td>

View File

@ -16,7 +16,7 @@
</thead>
<tbody>
<tr>
<td>docker_registry_default_port = 5042</td>
<td>registry_default_port = 5042</td>
<td>(IntOpt) Default TCP port to find the docker-registry container</td>
</tr>
</tbody>

View File

@ -25,11 +25,11 @@
</tr>
<tr>
<td>ec2_listen = 0.0.0.0</td>
<td>(StrOpt) IP address for EC2 API to listen</td>
<td>(StrOpt) The IP address on which the EC2 API will listen.</td>
</tr>
<tr>
<td>ec2_listen_port = 8773</td>
<td>(IntOpt) port for ec2 api to listen</td>
<td>(IntOpt) The port on which the EC2 API will listen.</td>
</tr>
<tr>
<td>ec2_path = /services/Cloud</td>

View File

@ -19,138 +19,62 @@
<td>block_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC</td>
<td>(StrOpt) Migration flags to be set for block migration</td>
</tr>
<tr>
<td>checksum_base_images = False</td>
<td>(BoolOpt) Write a checksum for files in _base to disk</td>
</tr>
<tr>
<td>default_ephemeral_format = None</td>
<td>(StrOpt) The default format an ephemeral_volume will be formatted with on creation.</td>
</tr>
<tr>
<td>disk_cachemodes = </td>
<td>(ListOpt) Specific cachemodes to use for different disk types e.g: ["file=directsync","block=none"]</td>
<td>(ListOpt) Specific cachemodes to use for different disk types e.g: file=directsync,block=none</td>
</tr>
<tr>
<td>force_raw_images = True</td>
<td>(BoolOpt) Force backing images to raw format</td>
</tr>
<tr>
<td>inject_password = True</td>
<td>(BoolOpt) Whether baremetal compute injects password or not</td>
</tr>
<tr>
<td>libvirt_cpu_mode = None</td>
<td>(StrOpt) Set to "host-model" to clone the host CPU feature flags; to "host-passthrough" to use the host CPU model exactly; to "custom" to use a named CPU model; to "none" to not set any CPU model. If libvirt_type="kvm|qemu", it will default to "host-model", otherwise it will default to "none"</td>
</tr>
<tr>
<td>libvirt_cpu_model = None</td>
<td>(StrOpt) Set to a named libvirt CPU model (see names listed in /usr/share/libvirt/cpu_map.xml). Only has effect if libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"</td>
</tr>
<tr>
<td>libvirt_disk_prefix = None</td>
<td>(StrOpt) Override the default disk prefix for the devices attached to a server, which is dependent on libvirt_type. (valid options are: sd, xvd, uvd, vd)</td>
</tr>
<tr>
<td>libvirt_images_rbd_ceph_conf = </td>
<td>images_rbd_ceph_conf = </td>
<td>(StrOpt) path to the ceph configuration file to use</td>
</tr>
<tr>
<td>libvirt_images_type = default</td>
<td>(StrOpt) VM Images format. Acceptable values are: raw, qcow2, lvm,rbd, default. If default is specified, then use_cow_images flag is used instead of this one.</td>
</tr>
<tr>
<td>libvirt_images_rbd_pool = rbd</td>
<td>images_rbd_pool = rbd</td>
<td>(StrOpt) the RADOS pool in which rbd volumes are stored</td>
</tr>
<tr>
<td>libvirt_images_volume_group = None</td>
<td>(StrOpt) LVM Volume Group that is used for VM images, when you specify libvirt_images_type=lvm.</td>
<td>images_type = default</td>
<td>(StrOpt) VM Images format. Acceptable values are: raw, qcow2, lvm,rbd, default. If default is specified, then use_cow_images flag is used instead of this one.</td>
</tr>
<tr>
<td>libvirt_inject_key = True</td>
<td>images_volume_group = None</td>
<td>(StrOpt) LVM Volume Group that is used for VM images, when you specify images_type=lvm.</td>
</tr>
<tr>
<td>inject_key = True</td>
<td>(BoolOpt) Inject the ssh public key at boot time</td>
</tr>
<tr>
<td>libvirt_inject_partition = 1</td>
<td>inject_partition = 1</td>
<td>(IntOpt) The partition to inject to : -2 =&gt; disable, -1 =&gt; inspect (libguestfs only), 0 =&gt; not partitioned, &gt;0 =&gt; partition number</td>
</tr>
<tr>
<td>libvirt_inject_password = False</td>
<td>inject_password = False</td>
<td>(BoolOpt) Inject the admin password at boot time, without an agent.</td>
</tr>
<tr>
<td>libvirt_iscsi_use_multipath = False</td>
<td>iscsi_use_multipath = False</td>
<td>(BoolOpt) use multipath connection of the iSCSI volume</td>
</tr>
<tr>
<td>libvirt_iser_use_multipath = False</td>
<td>iser_use_multipath = False</td>
<td>(BoolOpt) use multipath connection of the iSER volume</td>
</tr>
<tr>
<td>libvirt_lvm_snapshot_size = 1000</td>
<td>lvm_snapshot_size = 1000</td>
<td>(IntOpt) The amount of storage (in megabytes) to allocate for LVM snapshot copy-on-write blocks.</td>
</tr>
<tr>
<td>libvirt_nonblocking = True</td>
<td>(BoolOpt) Use a separated OS thread pool to realize non-blocking libvirt calls</td>
</tr>
<tr>
<td>libvirt_snapshot_compression = False</td>
<td>(BoolOpt) Compress snapshot images when possible. This currently applies exclusively to qcow2 images</td>
</tr>
<tr>
<td>libvirt_snapshots_directory = $instances_path/snapshots</td>
<td>(StrOpt) Location where libvirt driver will store snapshots before uploading them to image service</td>
</tr>
<tr>
<td>libvirt_sparse_logical_volumes = False</td>
<td>(BoolOpt) Create sparse logical volumes (with virtualsize) if this flag is set to True.</td>
</tr>
<tr>
<td>libvirt_type = kvm</td>
<td>(StrOpt) Libvirt domain type (valid options are: kvm, lxc, qemu, uml, xen)</td>
</tr>
<tr>
<td>libvirt_uri = </td>
<td>(StrOpt) Override the default libvirt URI (which is dependent on libvirt_type)</td>
</tr>
<tr>
<td>libvirt_use_virtio_for_bridges = True</td>
<td>(BoolOpt) Use virtio for bridge interfaces with KVM/QEMU</td>
</tr>
<tr>
<td>libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver</td>
<td>(StrOpt) The libvirt VIF driver to configure the VIFs.</td>
</tr>
<tr>
<td>libvirt_volume_drivers = iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver, iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver, local=nova.virt.libvirt.volume.LibvirtVolumeDriver, fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver, rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver, sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver, nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver, aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver, glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver, fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver, scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver</td>
<td>(ListOpt) Libvirt handlers for remote volumes.</td>
</tr>
<tr>
<td>libvirt_wait_soft_reboot_seconds = 120</td>
<td>(IntOpt) Number of seconds to wait for instance to shut down after soft reboot request is made. We fall back to hard reboot if instance does not shutdown within this window.</td>
</tr>
<tr>
<td>preallocate_images = none</td>
<td>(StrOpt) VM image preallocation mode: "none" =&gt; no storage provisioning is done up front, "space" =&gt; storage is fully allocated at instance start</td>
</tr>
<tr>
<td>remove_unused_base_images = True</td>
<td>(BoolOpt) Should unused base images be removed?</td>
</tr>
<tr>
<td>remove_unused_kernels = False</td>
<td>(BoolOpt) Should unused kernel images be removed? This is only safe to enable if all compute nodes have been updated to support this option. This will enabled by default in future.</td>
</tr>
<tr>
<td>remove_unused_original_minimum_age_seconds = 86400</td>
<td>(IntOpt) Unused unresized base images younger than this will not be removed</td>
</tr>
<tr>
<td>remove_unused_resized_minimum_age_seconds = 3600</td>
<td>(IntOpt) Unused resized base images younger than this will not be removed</td>
</tr>
<tr>
<td>rescue_image_id = None</td>
<td>(StrOpt) Rescue ami image</td>
@ -167,10 +91,18 @@
<td>rescue_timeout = 0</td>
<td>(IntOpt) Automatically unrescue an instance after N seconds. Set to 0 to disable.</td>
</tr>
<tr>
<td>snapshot_compression = False</td>
<td>(BoolOpt) Compress snapshot images when possible. This currently applies exclusively to qcow2 images</td>
</tr>
<tr>
<td>snapshot_image_format = None</td>
<td>(StrOpt) Snapshot image format (valid options are : raw, qcow2, vmdk, vdi). Defaults to same as source image</td>
</tr>
<tr>
<td>sparse_logical_volumes = False</td>
<td>(BoolOpt) Create sparse logical volumes (with virtualsize) if this flag is set to True.</td>
</tr>
<tr>
<td>timeout_nbd = 10</td>
<td>(IntOpt) time to wait for a NBD device coming up</td>
@ -183,12 +115,16 @@
<td>use_usb_tablet = True</td>
<td>(BoolOpt) Sync virtual and real mouse cursors in Windows VMs</td>
</tr>
<tr>
<td>use_virtio_for_bridges = True</td>
<td>(BoolOpt) Use virtio for bridge interfaces with KVM/QEMU</td>
</tr>
<tr>
<td>vcpu_pin_set = None</td>
<td>(StrOpt) Which pcpus can be used by vcpus of instance e.g: "4-12,^8,15"</td>
</tr>
<tr>
<td>virt_mkfs = ['default=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', 'windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s']</td>
<td>virt_mkfs = []</td>
<td>(MultiStrOpt) mkfs commands for ephemeral device. The format is &lt;os_type&gt;=&lt;mkfs command&gt;</td>
</tr>
</tbody>

View File

@ -20,7 +20,7 @@
<td>(BoolOpt) Print debugging output (set logging level to DEBUG instead of default WARNING level).</td>
</tr>
<tr>
<td>default_log_levels = amqplib=WARN, sqlalchemy=WARN, boto=WARN, suds=INFO, keystone=INFO, eventlet.wsgi.server=WARN</td>
<td>default_log_levels = amqp=WARN, amqplib=WARN, boto=WARN, keystone=INFO, qpid=WARN, sqlalchemy=WARN, suds=INFO, iso8601=WARN</td>
<td>(ListOpt) list of logger=LEVEL pairs</td>
</tr>
<tr>
@ -40,8 +40,8 @@
<td>(StrOpt) If an instance UUID is passed with the log message, format it like this</td>
</tr>
<tr>
<td>log_config = None</td>
<td>(StrOpt) If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files.</td>
<td>log_config_append = None</td>
<td>(StrOpt) The name of logging configuration file. It does not disable existing loggers, but just appends specified logging configuration to any other existing logging options. Please see the Python logging module documentation for details on logging configuration files.</td>
</tr>
<tr>
<td>log_date_format = %Y-%m-%d %H:%M:%S</td>

View File

@ -21,11 +21,11 @@
</tr>
<tr>
<td>metadata_listen = 0.0.0.0</td>
<td>(StrOpt) IP address for metadata api to listen</td>
<td>(StrOpt) The IP address on which the metadata API will listen.</td>
</tr>
<tr>
<td>metadata_listen_port = 8775</td>
<td>(IntOpt) port for metadata api to listen</td>
<td>(IntOpt) The port on which the metadata API will listen.</td>
</tr>
<tr>
<td>metadata_manager = nova.api.manager.MetadataManager</td>
@ -45,7 +45,7 @@
</tr>
<tr>
<td>vendordata_jsonfile_path = None</td>
<td>(StrOpt) File to load json formated vendor data from</td>
<td>(StrOpt) File to load json formatted vendor data from</td>
</tr>
</tbody>
</table>

View File

@ -87,11 +87,6 @@
<td>flat_interface = None</td>
<td>(StrOpt) FlatDhcp will bridge into this interface if set</td>
</tr>
<tr>
<td>ovs_vsctl_timeout = 120</td>
<td>(IntOpt) Amount of time, in seconds, that ovs_vsctl should wait for aresponse from the database. 0 is to wait forever.</td>
</tr>
<tr>
<td>flat_network_bridge = None</td>
<td>(StrOpt) Bridge for simple network instances</td>
@ -124,18 +119,10 @@
<td>injected_network_template = $pybasedir/nova/virt/interfaces.template</td>
<td>(StrOpt) Template file for injected network</td>
</tr>
<tr>
<td>injected_network_template = $pybasedir/nova/virt/baremetal/interfaces.template</td>
<td>(StrOpt) Template file for injected network</td>
</tr>
<tr>
<td>injected_network_template = $pybasedir/nova/virt/interfaces.template</td>
<td>(StrOpt) Template file for injected network</td>
</tr>
<tr>
<td>injected_network_template = $pybasedir/nova/virt/baremetal/interfaces.template</td>
<td>(StrOpt) Template file for injected network</td>
</tr>
<tr>
<td>instance_dns_domain = </td>
<td>(StrOpt) full class name for the DNS Zone for instance IPs</td>
@ -182,7 +169,7 @@
</tr>
<tr>
<td>network_device_mtu = None</td>
<td>(StrOpt) MTU setting for vlan</td>
<td>(IntOpt) MTU setting for network interface</td>
</tr>
<tr>
<td>network_driver = nova.network.linux_net</td>
@ -208,6 +195,10 @@
<td>num_networks = 1</td>
<td>(IntOpt) Number of networks to support</td>
</tr>
<tr>
<td>ovs_vsctl_timeout = 120</td>
<td>(IntOpt) Amount of time, in seconds, that ovs_vsctl should wait for a response from the database. 0 is to wait forever.</td>
</tr>
<tr>
<td>public_interface = eth0</td>
<td>(StrOpt) Interface for public IP addresses</td>

View File

@ -15,10 +15,6 @@
</tr>
</thead>
<tbody>
<tr>
<td>dhcp_options_enabled = False</td>
<td>(BoolOpt) Use per-port DHCP options with Neutron</td>
</tr>
<tr>
<td>neutron_admin_auth_url = http://localhost:5000/v2.0</td>
<td>(StrOpt) auth url for connecting to neutron in admin context</td>
@ -45,7 +41,7 @@
</tr>
<tr>
<td>neutron_ca_certificates_file = None</td>
<td>(StrOpt) Location of ca certicates file to use for neutronclient requests.</td>
<td>(StrOpt) Location of ca certificates file to use for neutron client requests.</td>
</tr>
<tr>
<td>neutron_default_tenant_id = default</td>

View File

@ -64,7 +64,7 @@
<td>(IntOpt) number of seconds until a reservation expires</td>
</tr>
<tr>
<td>resize_fs_using_block_device = True</td>
<td>resize_fs_using_block_device = False</td>
<td>(BoolOpt) Attempt to resize the filesystem by accessing the image over a block device. This is done by the host and may not be necessary if the image contains a recent version of cloud-init. Possible mechanisms require the nbd driver (for qcow and raw), or loop (for raw).</td>
</tr>
<tr>

View File

@ -1,44 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Warning: Do not edit this file. It is automatically
generated and your changes will be overwritten.
The tool to do so lives in the tools directory of this
repository -->
<para xmlns="http://docbook.org/ns/docbook" version="5.0">
<table rules="all">
<caption>Description of configuration options for powervm</caption>
<col width="50%"/>
<col width="50%"/>
<thead>
<tr>
<th>Configuration option = Default value</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>powervm_img_local_path = /tmp</td>
<td>(StrOpt) Local directory to download glance images to. Make sure this path can fit your biggest image in glance</td>
</tr>
<tr>
<td>powervm_img_remote_path = /home/padmin</td>
<td>(StrOpt) PowerVM image remote path where images will be moved. Make sure this path can fit your biggest image in glance</td>
</tr>
<tr>
<td>powervm_mgr = None</td>
<td>(StrOpt) PowerVM manager host or ip</td>
</tr>
<tr>
<td>powervm_mgr_passwd = None</td>
<td>(StrOpt) PowerVM manager user password</td>
</tr>
<tr>
<td>powervm_mgr_type = ivm</td>
<td>(StrOpt) PowerVM manager type (ivm, hmc)</td>
</tr>
<tr>
<td>powervm_mgr_user = None</td>
<td>(StrOpt) PowerVM manager user name</td>
</tr>
</tbody>
</table>
</para>

View File

@ -25,7 +25,7 @@
</tr>
<tr>
<td>enable_network_quota = False</td>
<td>(BoolOpt) Enables or disables quotaing of tenant networks</td>
<td>(BoolOpt) Enables or disables quota checking for tenant networks</td>
</tr>
<tr>
<td>quota_cores = 20</td>

View File

@ -23,6 +23,10 @@
<td>host = 127.0.0.1</td>
<td>(StrOpt) Host to locate redis</td>
</tr>
<tr>
<td>host = None</td>
<td>(StrOpt) Debug host (ip or name) to connect. Note that using the remote debug option changes how Nova uses the eventlet library to support async IO. This could result in failures that do not occur under normal operation. Use at your own risk.</td>
</tr>
<tr>
<td>password = None</td>
<td>(StrOpt) Password for Redis server. (optional)</td>
@ -31,6 +35,10 @@
<td>port = 6379</td>
<td>(IntOpt) Use this port to connect to redis host.</td>
</tr>
<tr>
<td>port = None</td>
<td>(IntOpt) Debug port to connect. Note that using the remote debug option changes how Nova uses the eventlet library to support async IO. This could result in failures that do not occur under normal operation. Use at your own risk.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -15,14 +15,14 @@
</tr>
</thead>
<tbody>
<tr>
<td>amqp_durable_queues = False</td>
<td>(BoolOpt) Use durable queues in amqp.</td>
</tr>
<tr>
<td>amqp_auto_delete = False</td>
<td>(BoolOpt) Auto-delete queues in amqp.</td>
</tr>
<tr>
<td>amqp_durable_queues = False</td>
<td>(BoolOpt) Use durable queues in amqp.</td>
</tr>
<tr>
<td>baseapi = None</td>
<td>(StrOpt) Set a version cap for messages sent to the base api in any service</td>

View File

@ -15,6 +15,14 @@
</tr>
</thead>
<tbody>
<tr>
<td>aggregate_image_properties_isolation_namespace = None</td>
<td>(StrOpt) Force the filter to consider only keys matching the given namespace.</td>
</tr>
<tr>
<td>aggregate_image_properties_isolation_separator = .</td>
<td>(StrOpt) The separator used between the namespace and keys</td>
</tr>
<tr>
<td>cpu_allocation_ratio = 16.0</td>
<td>(FloatOpt) Virtual CPU to physical CPU allocation ratio which affects all CPU filters. This configuration specifies a global ratio for CoreFilter. For AggregateCoreFilter, it will fall back to this configuration value if no per-aggregate setting found.</td>
@ -119,6 +127,14 @@
<td>scheduler_weight_classes = nova.scheduler.weights.all_weighers</td>
<td>(ListOpt) Which weight class names to use for weighing hosts</td>
</tr>
<tr>
<td>weight_multiplier = 1.0</td>
<td>(FloatOpt) Multiplier used for weighing metrics.</td>
</tr>
<tr>
<td>weight_setting = </td>
<td>(ListOpt) How the metrics are going to be weighed. This should be in the form of "&lt;name1&gt;=&lt;ratio1&gt;, &lt;name2&gt;=&lt;ratio2&gt;, ...", where &lt;nameX&gt; is one of the metric to be weighed, and &lt;ratioX&gt; is the corresponding ratio. So for "name1=1.0, name2=-1.0" The final weight would be name1.value * 1.0 + name2.value * -1.0.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -17,7 +17,7 @@
<tbody>
<tr>
<td>api_retry_count = 10</td>
<td>(IntOpt) The number of times we retry on failures, e.g., socket error, etc. Used only if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.</td>
<td>(IntOpt) The number of times we retry on failures, e.g., socket error, etc.</td>
</tr>
<tr>
<td>cluster_name = None</td>
@ -29,15 +29,15 @@
</tr>
<tr>
<td>host_ip = None</td>
<td>(StrOpt) URL for connection to VMware ESX/VC host. Required if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.</td>
</tr>
<tr>
<td>host_username = None</td>
<td>(StrOpt) Username for connection to VMware ESX/VC host. Used only if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.</td>
<td>(StrOpt) URL for connection to VMware ESX/VC host.</td>
</tr>
<tr>
<td>host_password = None</td>
<td>(StrOpt) Password for connection to VMware ESX/VC host. Used only if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.</td>
<td>(StrOpt) Password for connection to VMware ESX/VC host.</td>
</tr>
<tr>
<td>host_username = None</td>
<td>(StrOpt) Username for connection to VMware ESX/VC host.</td>
</tr>
<tr>
<td>integration_bridge = br-int</td>
@ -49,7 +49,7 @@
</tr>
<tr>
<td>task_poll_interval = 5.0</td>
<td>(FloatOpt) The interval used for polling of remote tasks. Used only if compute_driver is vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.</td>
<td>(FloatOpt) The interval used for polling of remote tasks.</td>
</tr>
<tr>
<td>use_linked_clone = True</td>

View File

@ -27,10 +27,6 @@
<td>vnc_keymap = en-us</td>
<td>(StrOpt) keymap for vnc</td>
</tr>
<tr>
<td>vnc_password = None</td>
<td>(StrOpt) VNC password</td>
</tr>
<tr>
<td>vnc_port = 5900</td>
<td>(IntOpt) VNC starting port</td>

View File

@ -25,7 +25,7 @@
</tr>
<tr>
<td>cinder_ca_certificates_file = None</td>
<td>(StrOpt) Location of ca certicates file to use for cinder client requests.</td>
<td>(StrOpt) Location of ca certificates file to use for cinder client requests.</td>
</tr>
<tr>
<td>cinder_catalog_info = volume:cinder:publicURL</td>
@ -67,6 +67,10 @@
<td>num_aoe_discover_tries = 3</td>
<td>(IntOpt) number of times to rediscover AoE target to find volume</td>
</tr>
<tr>
<td>num_iscsi_scan_tries = 3</td>
<td>(IntOpt) number of times to rescan iSCSI target to find volume</td>
</tr>
<tr>
<td>num_iser_scan_tries = 3</td>
<td>(IntOpt) number of times to rescan iSER target to find volume</td>

View File

@ -35,6 +35,10 @@
<td>tcp_keepidle = 600</td>
<td>(IntOpt) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X.</td>
</tr>
<tr>
<td>wsgi_default_pool_size = 1000</td>
<td>(IntOpt) Size of the pool of greenthreads used by wsgi</td>
</tr>
<tr>
<td>wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f</td>
<td>(StrOpt) A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds.</td>

View File

@ -15,6 +15,10 @@
</tr>
</thead>
<tbody>
<tr>
<td>agent_path = usr/sbin/xe-update-networking</td>
<td>(StrOpt) Specifies the path in which the xenapi guest agent should be located. If the agent is present, network configuration is not injected into the image. Used if compute_driver=xenapi.XenAPIDriver and flat_injected=True</td>
</tr>
<tr>
<td>agent_resetnetwork_timeout = 60</td>
<td>(IntOpt) number of seconds to wait for agent reply to resetnetwork request</td>
@ -31,6 +35,26 @@
<td>cache_images = all</td>
<td>(StrOpt) Cache glance images locally. `all` will cache all images, `some` will only cache images that have the image_property `cache_in_nova=True`, and `none` turns off caching entirely</td>
</tr>
<tr>
<td>check_host = True</td>
<td>(BoolOpt) Ensure compute service is running on host XenAPI connects to.</td>
</tr>
<tr>
<td>connection_concurrent = 5</td>
<td>(IntOpt) Maximum number of concurrent XenAPI connections. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>connection_password = None</td>
<td>(StrOpt) Password for connection to XenServer/Xen Cloud Platform. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>connection_url = None</td>
<td>(StrOpt) URL for connection to XenServer/Xen Cloud Platform. A special value of unix://local can be used to connect to the local unix socket. Required if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>connection_username = root</td>
<td>(StrOpt) Username for connection to XenServer/Xen Cloud Platform. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>console_driver = nova.console.xvp.XVPConsoleProxy</td>
<td>(StrOpt) Driver to use for the console proxy</td>
@ -67,14 +91,70 @@
<td>default_os_type = linux</td>
<td>(StrOpt) Default OS type</td>
</tr>
<tr>
<td>disable_agent = False</td>
<td>(BoolOpt) Disables the use of the XenAPI agent in any image regardless of what image properties are present.</td>
</tr>
<tr>
<td>image_compression_level = None</td>
<td>(IntOpt) Compression level for images, e.g., 9 for gzip -9. Range is 1-9, 9 being most compressed but most CPU intensive on dom0.</td>
</tr>
<tr>
<td>image_upload_handler = nova.virt.xenapi.image.glance.GlanceStore</td>
<td>(StrOpt) Dom0 plugin driver used to handle image uploads.</td>
</tr>
<tr>
<td>ipxe_boot_menu_url = None</td>
<td>(StrOpt) URL to the iPXE boot menu</td>
</tr>
<tr>
<td>ipxe_mkisofs_cmd = mkisofs</td>
<td>(StrOpt) Name and optionally path of the tool used for ISO image creation</td>
</tr>
<tr>
<td>ipxe_network_name = None</td>
<td>(StrOpt) Name of network to use for booting iPXE ISOs</td>
</tr>
<tr>
<td>iqn_prefix = iqn.2010-10.org.openstack</td>
<td>(StrOpt) IQN Prefix</td>
</tr>
<tr>
<td>login_timeout = 10</td>
<td>(IntOpt) Timeout in seconds for XenAPI login.</td>
</tr>
<tr>
<td>max_kernel_ramdisk_size = 16777216</td>
<td>(IntOpt) Maximum size in bytes of kernel or ramdisk images</td>
</tr>
<tr>
<td>num_vbd_unplug_retries = 10</td>
<td>(IntOpt) Maximum number of retries to unplug VBD</td>
</tr>
<tr>
<td>ovs_integration_bridge = xapi1</td>
<td>(StrOpt) Name of Integration Bridge used by Open vSwitch</td>
</tr>
<tr>
<td>remap_vbd_dev = False</td>
<td>(BoolOpt) Used to enable the remapping of VBD dev (Works around an issue in Ubuntu Maverick)</td>
</tr>
<tr>
<td>remap_vbd_dev_prefix = sd</td>
<td>(StrOpt) Specify prefix to remap VBD dev to (ex. /dev/xvdb -&gt; /dev/sdb)</td>
</tr>
<tr>
<td>running_timeout = 60</td>
<td>(IntOpt) number of seconds to wait for instance to go to running state</td>
</tr>
<tr>
<td>sparse_copy = True</td>
<td>(BoolOpt) Whether to use sparse_copy for copying data on a resize down (False will use standard dd). This speeds up resizes down considerably since large runs of zeros won't have to be rsynced</td>
</tr>
<tr>
<td>sr_base_path = /var/run/sr-mount</td>
<td>(StrOpt) Base path to the storage repository</td>
</tr>
<tr>
<td>sr_matching_filter = default-sr:true</td>
<td>(StrOpt) Filter for finding the SR to be used to install guest instances on. To use the Local Storage in default XenServer/XCP installations set this flag to other-config:i18n-key=local-storage. To select an SR with a different matching criteria, you could set it to other-config:my_favorite_sr=true. On the other hand, to fall back on the Default SR, as displayed by XenCenter, set this flag to: default-sr:true</td>
@ -91,146 +171,62 @@
<td>target_port = 3260</td>
<td>(StrOpt) iSCSI Target Port, 3260 Default</td>
</tr>
<tr>
<td>torrent_base_url = None</td>
<td>(StrOpt) Base URL for torrent files.</td>
</tr>
<tr>
<td>torrent_download_stall_cutoff = 600</td>
<td>(IntOpt) Number of seconds a download can remain at the same progress percentage w/o being considered a stall</td>
</tr>
<tr>
<td>torrent_images = none</td>
<td>(StrOpt) Whether or not to download images via Bit Torrent (all|some|none).</td>
</tr>
<tr>
<td>torrent_listen_port_end = 6891</td>
<td>(IntOpt) End of port range to listen on</td>
</tr>
<tr>
<td>torrent_listen_port_start = 6881</td>
<td>(IntOpt) Beginning of port range to listen on</td>
</tr>
<tr>
<td>torrent_max_last_accessed = 86400</td>
<td>(IntOpt) Cached torrent files not accessed within this number of seconds can be reaped</td>
</tr>
<tr>
<td>torrent_max_seeder_processes_per_host = 1</td>
<td>(IntOpt) Maximum number of seeder processes to run concurrently within a given dom0. (-1 = no limit)</td>
</tr>
<tr>
<td>torrent_seed_chance = 1.0</td>
<td>(FloatOpt) Probability that peer will become a seeder. (1.0 = 100%)</td>
</tr>
<tr>
<td>torrent_seed_duration = 3600</td>
<td>(IntOpt) Number of seconds after downloading an image via BitTorrent that it should be seeded for other peers.</td>
</tr>
<tr>
<td>use_agent_default = False</td>
<td>(BoolOpt) Determines if the xenapi agent should be used when the image used does not contain a hint to declare if the agent is present or not. The hint is a glance property "xenapi_use_agent" that has the value "true" or "false". Note that waiting for the agent when it is not present will significantly increase server boot times.</td>
</tr>
<tr>
<td>use_join_force = True</td>
<td>(BoolOpt) To use for hosts with different CPUs</td>
</tr>
<tr>
<td>vhd_coalesce_max_attempts = 5</td>
<td>(IntOpt) Max number of times to poll for VHD to coalesce. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>vhd_coalesce_poll_interval = 5.0</td>
<td>(FloatOpt) The interval used for polling of coalescing vhds. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>xen_hvmloader_path = /usr/lib/xen/boot/hvmloader</td>
<td>(StrOpt) Location where the Xen hvmloader is kept</td>
</tr>
<tr>
<td>xenapi_agent_path = usr/sbin/xe-update-networking</td>
<td>(StrOpt) Specifies the path in which the xenapi guest agent should be located. If the agent is present, network configuration is not injected into the image. Used if compute_driver=xenapi.XenAPIDriver and flat_injected=True</td>
</tr>
<tr>
<td>xenapi_check_host = True</td>
<td>(BoolOpt) Ensure compute service is running on host XenAPI connects to.</td>
</tr>
<tr>
<td>xenapi_connection_concurrent = 5</td>
<td>(IntOpt) Maximum number of concurrent XenAPI connections. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>xenapi_connection_password = None</td>
<td>(StrOpt) Password for connection to XenServer/Xen Cloud Platform. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>xenapi_connection_url = None</td>
<td>(StrOpt) URL for connection to XenServer/Xen Cloud Platform. A special value of unix://local can be used to connect to the local unix socket. Required if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>xenapi_connection_username = root</td>
<td>(StrOpt) Username for connection to XenServer/Xen Cloud Platform. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>xenapi_disable_agent = False</td>
<td>(BoolOpt) Disables the use of the XenAPI agent in any image regardless of what image properties are present. </td>
</tr>
<tr>
<td>xenapi_image_compression_level = None</td>
<td>(IntOpt) Compression level for images, e.g., 9 for gzip -9. Range is 1-9, 9 being most compressed but most CPU intensive on dom0.</td>
</tr>
<tr>
<td>xenapi_image_upload_handler = nova.virt.xenapi.image.glance.GlanceStore</td>
<td>(StrOpt) Dom0 plugin driver used to handle image uploads.</td>
</tr>
<tr>
<td>xenapi_ipxe_boot_menu_url = None</td>
<td>(StrOpt) URL to the iPXE boot menu</td>
</tr>
<tr>
<td>xenapi_login_timeout = 10</td>
<td>(IntOpt) Timeout in seconds for XenAPI login.</td>
</tr>
<tr>
<td>xenapi_ipxe_mkisofs_cmd = mkisofs</td>
<td>(StrOpt) Name and optionally path of the tool used for ISO image creation</td>
</tr>
<tr>
<td>xenapi_num_vbd_unplug_retries = 10</td>
<td>(IntOpt) Maximum number of retries to unplug VBD</td>
</tr>
<tr>
<td>xenapi_ipxe_network_name = None</td>
<td>(StrOpt) Name of network to use for booting iPXE ISOs</td>
</tr>
<tr>
<td>xenapi_ovs_integration_bridge = xapi1</td>
<td>(StrOpt) Name of Integration Bridge used by Open vSwitch</td>
</tr>
<tr>
<td>xenapi_remap_vbd_dev = False</td>
<td>(BoolOpt) Used to enable the remapping of VBD dev (Works around an issue in Ubuntu Maverick)</td>
</tr>
<tr>
<td>xenapi_remap_vbd_dev_prefix = sd</td>
<td>(StrOpt) Specify prefix to remap VBD dev to (ex. /dev/xvdb -&gt; /dev/sdb)</td>
</tr>
<tr>
<td>xenapi_running_timeout = 60</td>
<td>(IntOpt) number of seconds to wait for instance to go to running state</td>
</tr>
<tr>
<td>xenapi_sparse_copy = True</td>
<td>(BoolOpt) Whether to use sparse_copy for copying data on a resize down (False will use standard dd). This speeds up resizes down considerably since large runs of zeros won't have to be rsynced</td>
</tr>
<tr>
<td>xenapi_sr_base_path = /var/run/sr-mount</td>
<td>(StrOpt) Base path to the storage repository</td>
</tr>
<tr>
<td>xenapi_torrent_base_url = None</td>
<td>(StrOpt) Base URL for torrent files.</td>
</tr>
<tr>
<td>xenapi_torrent_download_stall_cutoff = 600</td>
<td>(IntOpt) Number of seconds a download can remain at the same progress percentage w/o being considered a stall</td>
</tr>
<tr>
<td>xenapi_torrent_images = none</td>
<td>(StrOpt) Whether or not to download images via Bit Torrent (all|some|none).</td>
</tr>
<tr>
<td>xenapi_torrent_listen_port_end = 6891</td>
<td>(IntOpt) End of port range to listen on</td>
</tr>
<tr>
<td>xenapi_torrent_listen_port_start = 6881</td>
<td>(IntOpt) Beginning of port range to listen on</td>
</tr>
<tr>
<td>xenapi_torrent_max_last_accessed = 86400</td>
<td>(IntOpt) Cached torrent files not accessed within this number of seconds can be reaped</td>
</tr>
<tr>
<td>xenapi_torrent_max_seeder_processes_per_host = 1</td>
<td>(IntOpt) Maximum number of seeder processes to run concurrently within a given dom0. (-1 = no limit)</td>
</tr>
<tr>
<td>xenapi_torrent_seed_chance = 1.0</td>
<td>(FloatOpt) Probability that peer will become a seeder. (1.0 = 100%)</td>
</tr>
<tr>
<td>xenapi_torrent_seed_duration = 3600</td>
<td>(IntOpt) Number of seconds after downloading an image via BitTorrent that it should be seeded for other peers.</td>
</tr>
<tr>
<td>xenapi_use_agent_default = False</td>
<td>(BoolOpt) Determines if the xenapi agent should be used when the image used does not contain a hint to declare if the agent is present or not. The hint is a glance property "xenapi_use_agent" that has the value "true" or "false". Note that waiting for the agent when it is not present will significantly increase server boot times.</td>
</tr>
<tr>
<td>xenapi_vhd_coalesce_max_attempts = 5</td>
<td>(IntOpt) Max number of times to poll for VHD to coalesce. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>xenapi_vhd_coalesce_poll_interval = 5.0</td>
<td>(FloatOpt) The interval used for polling of coalescing vhds. Used only if compute_driver=xenapi.XenAPIDriver</td>
</tr>
<tr>
<td>xenapi_vif_driver = nova.virt.xenapi.vif.XenAPIBridgeDriver</td>
<td>(StrOpt) The XenAPI VIF driver using XenServer Network APIs.</td>
</tr>
</tbody>
</table>
</para>

View File

@ -62,13 +62,6 @@
>nova-compute</systemitem> service in a
para-virtualized VM.</para>
</listitem>
<listitem>
<para><link
xlink:href="http://www-03.ibm.com/systems/power/software/virtualization/features.html"
> PowerVM</link> - Server virtualization with IBM
PowerVM, use to run AIX, IBM i and Linux environments
on IBM POWER technology.</para>
</listitem>
<listitem>
<para><link
xlink:href="http://www.microsoft.com/en-us/server-cloud/windows-server/server-virtualization-features.aspx"
@ -118,7 +111,6 @@
<xi:include href="section_introduction-to-xen.xml"/>
<xi:include href="section_hypervisor_lxc.xml"/>
<xi:include href="section_hypervisor_vmware.xml"/>
<xi:include href="section_hypervisor_powervm.xml"/>
<xi:include href="section_hypervisor_hyper-v.xml"/>
<xi:include href="section_hypervisor_baremetal.xml"/>
<xi:include href="section_hypervisor_docker.xml"/>

View File

@ -37,7 +37,6 @@
<xi:include href="../../common/tables/nova-network.xml"/>
<xi:include href="../../common/tables/nova-periodic.xml"/>
<xi:include href="../../common/tables/nova-policy.xml"/>
<xi:include href="../../common/tables/nova-powervm.xml"/>
<xi:include href="../../common/tables/nova-qpid.xml"/>
<xi:include href="../../common/tables/nova-neutron.xml"/>
<xi:include href="../../common/tables/nova-quota.xml"/>

View File

@ -1,50 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
xml:id="powervm">
<title>PowerVM</title>
<?dbhtml stop-chunking?>
<section xml:id="powervm-intro">
<title>Introduction</title>
<warning><title>PowerVM driver removed</title>
<para>
Due to a change in strategic direction at IBM, the PowerVM driver
has been removed from OpenStack Compute. It will be replaced by
a driver using PowerVC in the future.
</para>
</warning>
<para>PowerVM compute driver connects to an Integrated Virtualization
Manager (IVM) to perform PowerVM Logical Partition (LPAR)
deployment and management. The driver supports file-based deployment
using images from the OpenStack Image Service.</para>
<note><para>Hardware Management Console (HMC) is not yet supported.</para></note>
<para>For more detailed information about PowerVM Virtualization system,
refer to the IBM Redbook publication:
<link xlink:href="http://www.redbooks.ibm.com/abstracts/sg247940.html">
IBM PowerVM Virtualization Introduction and Configuration</link>.</para>
</section>
<section xml:id="powervm-config">
<title>Configuration</title>
<para>To enable the PowerVM compute driver, add the following configuration
options <filename>/etc/nova/nova.conf</filename>:
<programlisting language="ini">compute_driver=nova.virt.powervm.PowerVMDriver
powervm_mgr_type=ivm
powervm_mgr=powervm_hostname_or_ip_address
powervm_mgr_user=padmin
powervm_mgr_passwd=padmin_user_password
powervm_img_remote_path=/path/to/remote/image/directory
powervm_img_local_path=/path/to/local/image/directory/on/compute/host</programlisting>
</para>
<xi:include href="../../common/tables/nova-powervm.xml"/>
</section>
<section xml:id="powervm-limits">
<title>Limitations</title>
<para>
PowerVM LPARs names have a limit of 31 characters. Since OpenStack Compute instance names
are mapped to LPAR names in Power Systems, make sure
<literal>instance_name_template</literal>
config option in <filename>nova.conf</filename> yields names that have 31 or fewer characters.
</para>
</section>
</section>

View File

@ -1,18 +1,22 @@
address zookeeper
agent_enabled spice
agent_path xen
agent_resetnetwork_timeout xen
agent_timeout xen
agent_version_timeout xen
aggregate_image_properties_isolation_namespace scheduling
aggregate_image_properties_isolation_separator scheduling
allow_instance_snapshots policy
allow_migrate_to_same_host policy
allow_resize_to_same_host policy
allow_same_net_traffic network
allowed_direct_url_schemes glance
allowed_rpc_exception_modules testing
amqp_auto_delete rpc
amqp_durable_queues rpc
api_audit_map common
api_class keymgr
api_paste_config wsgi
amqp_auto_delete rpc
api_rate_limit authentication
api_retry_count vmware
attestation_api_url trustedcomputing
@ -27,7 +31,6 @@ backdoor_port testing
backend db
bandwidth_poll_interval quota
bandwidth_update_interval quota
base_dir_name compute
baseapi rpc
bindir common
block_device_creation_timeout volumes
@ -46,6 +49,7 @@ cert upgrade_levels
cert_file ca
cert_manager ca
cert_topic ca
check_host xen
checksum_base_images libvirt
checksum_interval_seconds libvirt
cinder_api_insecure volumes
@ -57,9 +61,10 @@ cinder_http_retries volumes
cluster_name vmware
cnt_vpn_clients network
compute upgrade_levels
compute_api_class compute
compute_available_monitors compute
compute_driver compute
compute_manager compute
compute_monitors compute
compute_stats_class compute
compute_topic common
conductor upgrade_levels
@ -68,12 +73,16 @@ config_drive_format configdrive
config_drive_inject_password configdrive
config_drive_skip_versions configdrive
config_drive_tempdir configdrive
connection_trace db
console upgrade_levels
connection db
conn_uri libvirt
console_driver xen
connection_concurrent xen
connection_debug db
connection_password xen
connection_trace db
connection_uri libvirt
connection_url xen
connection_username xen
console upgrade_levels
console_driver xen
console_host compute
console_manager compute
console_public_hostname console
@ -91,6 +100,8 @@ consoleauth_manager console
consoleauth_topic common
control_exchange rpc
cpu_allocation_ratio scheduling
cpu_mode libvirt
cpu_model libvirt
create_unique_mac_address_attempts network
crl_file ca
datastore_regex vmware
@ -114,12 +125,13 @@ deploy_kernel baremetal
deploy_ramdisk baremetal
dhcp_domain network
dhcp_lease_time network
dhcp_options_enabled neutron
dhcpbridge network
dhcpbridge_flagfile network
disable_agent xen
disable_process_locking common
disk_allocation_ratio scheduling
disk_cachemodes hypervisor
disk_prefix libvirt
dmz_cidr vpn
dmz_mask vpn
dmz_net vpn
@ -188,14 +200,23 @@ heal_instance_info_cache_interval compute
host common
host redis
host_ip vmware
host_password vmware
host_state_interval compute
host_username vmware
html5proxy_base_url spice
idle_timeout db
image_cache_manager_interval compute
host_password vmware
image_cache_subdirectory_name compute
image_compression_level xen
image_decryption_dir s3
image_info_filename_pattern libvirt
image_upload_handler xen
images_rbd_ceph_conf hypervisor
images_rbd_pool hypervisor
images_type hypervisor
images_volume_group hypervisor
inject_key hypervisor
inject_partition hypervisor
inject_password hypervisor
injected_network_template network
injected_network_template network
@ -220,8 +241,13 @@ iptables_bottom_regex network
iptables_drop_action network
iptables_top_regex network
ipv6_backend ipv6
ipxe_boot_menu_url xen
ipxe_mkisofs_cmd xen
ipxe_network_name xen
iqn_prefix xen
iscsi_iqn_prefix volumes
iscsi_use_multipath hypervisor
iser_use_multipath hypervisor
isolated_hosts scheduling
isolated_images scheduling
key_file ca
@ -243,27 +269,6 @@ ldap_dns_soa_refresh ldap
ldap_dns_soa_retry ldap
ldap_dns_url ldap
ldap_dns_user ldap
libvirt_cpu_mode libvirt
libvirt_cpu_model libvirt
libvirt_disk_prefix libvirt
libvirt_images_rbd_ceph_conf hypervisor
libvirt_images_type hypervisor
libvirt_images_rbd_pool hypervisor
libvirt_images_volume_group hypervisor
libvirt_inject_key libvirt
libvirt_inject_partition libvirt
libvirt_inject_password libvirt
libvirt_iscsi_use_multipath hypervisor
libvirt_iser_use_multipath hypervisor
libvirt_lvm_snapshot_size hypervisor
libvirt_non_blocking libvirt
libvirt_snapshot_compression hypervisor
libvirt_snapshots_directory libvirt
libvirt_sparse_logical_volumes hypervisor
libvirt_use_virtio_for_bridges hypervisor
libvirt_vif_driver libvirt
libvirt_volume_drivers libvirt
libvirt_wait_soft_reboot_seconds libvirt
limit_cpu_features hyperv
linuxnet_interface_driver network
linuxnet_ovs_integration_bridge network
@ -275,7 +280,7 @@ lock_path common
lockout_attempts ec2
lockout_minutes ec2
lockout_window ec2
log_config logging
log_config_append logging
log_date_format logging
log_dir logging
log_file logging
@ -284,6 +289,8 @@ logging_context_format_string logging
logging_debug_format_suffix logging
logging_default_format_string logging
logging_exception_prefix logging
login_timeout xen
lvm_snapshot_size hypervisor
manager cells
manager conductor
matchmaker_heartbeat_freq rpc
@ -294,15 +301,15 @@ max_instances_per_host scheduling
max_io_ops_per_host scheduling
max_kernel_ramdisk_size xen
max_local_block_devices policy
max_pool_size db
memcached_servers common
max_overflow db
metadata_host metadata
max_pool_size db
max_retries db
metadata_listen metadata
maximum_objects vmware
metadata_listen_port metadata
maximum_instance_delete_attempts compute
maximum_objects vmware
memcached_servers common
metadata_host metadata
metadata_listen metadata
metadata_listen_port metadata
metadata_manager metadata
metadata_port metadata
metadata_workers metadata
@ -318,6 +325,7 @@ mute_weight_multiplier cells
mute_weight_value cells
my_ip common
name cells
namespace common
net_config_template baremetal
network upgrade_levels
network_allocate_retries network
@ -352,8 +360,11 @@ notify_on_state_change common
novncproxy_base_url vnc
null_kernel api
num_aoe_discover_tries volumes
num_iscsi_scan_tries volumes
num_iser_scan_tries volumes
num_networks network
num_vbd_unplug_retries xen
offset_weight_multiplier cells
os_region_name volumes
osapi_compute_ext_list api
osapi_compute_extension api
@ -366,6 +377,7 @@ osapi_glance_link_prefix glance
osapi_hide_server_address_states api
osapi_max_limit policy
osapi_max_request_body_size policy
ovs_integration_bridge xen
ovs_vsctl_timeout network
password redis
password_length policy
@ -378,12 +390,6 @@ policy_file policy
pool_timeout db
port redis
power_manager baremetal
powervm_img_local_path powervm
powervm_img_remote_path powervm
powervm_mgr powervm
powervm_mgr_passwd powervm
powervm_mgr_type powervm
powervm_mgr_user powervm
preallocate_images hypervisor
project_cert_subject ca
public_interface network
@ -439,6 +445,8 @@ reclaim_instance_interval compute
recv_timeout zookeeper
region_list ec2
registry_default_port docker
remap_vbd_dev xen
remap_vbd_dev_prefix xen
remove_unused_base_images libvirt
remove_unused_kernels libvirt
remove_unused_original_minimum_age_seconds libvirt
@ -453,9 +461,9 @@ reserve_percent cells
reserved_host_disk_mb scheduling
reserved_host_memory_mb scheduling
resize_confirm_window compute
resize_fs_using_block_device policy
restrict_isolated_hosts_to_isolated_images scheduling
resume_guests_state_on_host_boot compute
resize_fs_using_block_device policy
retry_interval db
ringfile rpc
rootwrap_config common
@ -477,6 +485,7 @@ run_external_periodic_tasks periodic
running_deleted_instance_action compute
running_deleted_instance_poll_interval compute
running_deleted_instance_timeout compute
running_timeout xen
s3_access_key s3
s3_affix_tenant s3
s3_host s3
@ -515,12 +524,17 @@ share_dhcp_address network
shelved_offload_time compute
shelved_poll_interval compute
slave_connection db
snapshot_compression hypervisor
snapshot_image_format hypervisor
snapshot_name_template api
snapshots_directory libvirt
sparse_copy xen
sparse_logical_volumes hypervisor
sql_connection baremetal
sql_connection db
sqlite_db db
sqlite_synchronous db
sr_base_path xen
sr_matching_filter xen
ssl_ca_file wsgi
ssl_cert_file wsgi
@ -549,9 +563,20 @@ timeout_nbd hypervisor
topic cells
topic conductor
topics rpc
torrent_base_url xen
torrent_download_stall_cutoff xen
torrent_images xen
torrent_listen_port_end xen
torrent_listen_port_start xen
torrent_max_last_accessed xen
torrent_max_seeder_processes_per_host xen
torrent_seed_chance xen
torrent_seed_duration xen
until_refresh policy
update_dns_entries network
use_agent_default xen
use_cow_images hypervisor
use_file_injection baremetal
use_forwarded_for api
use_ipv6 ipv6
use_join_force xen
@ -566,11 +591,14 @@ use_syslog logging
use_tpool api
use_unsafe_iscsi baremetal
use_usb_tablet hypervisor
use_virtio_for_bridges hypervisor
user_cert_subject ca
vcpu_pin_set hypervisor
vendordata_driver metadata
verbose logging
vendordata_jsonfile_path metadata
verbose logging
vhd_coalesce_max_attempts xen
vhd_coalesce_poll_interval xen
vif_driver baremetal
virt_mkfs hypervisor
virt_type libvirt
@ -584,7 +612,6 @@ vlan_interface network
vlan_start network
vnc_enabled vnc
vnc_keymap vnc
vnc_password vnc
vnc_port vnc
vnc_port_total vnc
vncserver_listen vnc
@ -593,6 +620,7 @@ volume_api_class volumes
volume_attach_retry_count volumes
volume_attach_retry_interval volumes
volume_driver volumes
volume_drivers libvirt
volume_usage_poll_interval volumes
vpn_flavor vpn
vpn_image_id vpn
@ -600,43 +628,14 @@ vpn_ip vpn
vpn_key_suffix vpn
vpn_start vpn
vswitch_name hyperv
wait_soft_reboot_seconds libvirt
weight_multiplier scheduling
weight_setting scheduling
workers conductor
wsdl_location vmware
wsgi_default_pool_size wsgi
wsgi_log_format wsgi
xen_hvmloader_path xen
xenapi_agent_path xen
xenapi_check_host xen
xenapi_connection_concurrent xen
xenapi_connection_password xen
xenapi_connection_url xen
xenapi_connection_username xen
xenapi_disable_agent xen
xenapi_image_compression_level xen
xenapi_image_upload_handler xen
xenapi_ipxe_boot_menu_url xen
xenapi_login_timeout xen
xenapi_ipxe_mkisofs_cmd xen
xenapi_num_vbd_unplug_retries xen
xenapi_ipxe_network_name xen
xenapi_ovs_integration_bridge xen
xenapi_remap_vbd_dev xen
xenapi_remap_vbd_dev_prefix xen
xenapi_running_timeout xen
xenapi_sparse_copy xen
xenapi_sr_base_path xen
xenapi_torrent_base_url xen
xenapi_torrent_download_stall_cutoff xen
xenapi_torrent_images xen
xenapi_torrent_listen_port_end xen
xenapi_torrent_listen_port_start xen
xenapi_torrent_max_last_accessed xen
xenapi_torrent_max_seeder_processes_per_host xen
xenapi_torrent_seed_chance xen
xenapi_torrent_seed_duration xen
xenapi_use_agent_default xen
xenapi_vhd_coalesce_max_attempts xen
xenapi_vhd_coalesce_poll_interval xen
xenapi_vif_driver xen
xvpvncproxy_base_url xvpnvncproxy
xvpvncproxy_host xvpnvncproxy
xvpvncproxy_port xvpnvncproxy