5dd4018141
With NovaNfsEnabled instance create fails due to wrong default secontext. The default in THT is set to nova_var_lib_t in Ie4fe217bd119b638f42c682d21572547f02f17b2 while system_u:object_r:nfs_t:s0 should have access. The virt_use_nfs boolean, which is turned on by openstack-selinux, should cover this use case. This changes the default to context=system_u:object_r:nfs_t:s0 Change-Id: I2a28462b6f6bc9f8a41a81ea8c65471f05df3b85 Closes-Bug: 1781894
88 lines
3.4 KiB
YAML
88 lines
3.4 KiB
YAML
# ******************************************************************************
|
|
# This file will not enable the deployment of Ceph in future releases.
|
|
# Use ./ceph-ansible/ceph-ansible.yaml for this purpose instead.
|
|
# ******************************************************************************
|
|
## A Heat environment file which can be used to set up storage
|
|
## backends. Defaults to Ceph used as a backend for Cinder, Glance and
|
|
## Nova ephemeral storage.
|
|
resource_registry:
|
|
OS::TripleO::Services::CephMgr: ../docker/services/ceph-ansible/ceph-mgr.yaml
|
|
OS::TripleO::Services::CephMon: ../docker/services/ceph-ansible/ceph-mon.yaml
|
|
OS::TripleO::Services::CephOSD: ../docker/services/ceph-ansible/ceph-osd.yaml
|
|
OS::TripleO::Services::CephClient: ../docker/services/ceph-ansible/ceph-client.yaml
|
|
|
|
parameter_defaults:
|
|
#### BACKEND SELECTION ####
|
|
|
|
## Whether to enable iscsi backend for Cinder.
|
|
CinderEnableIscsiBackend: false
|
|
## Whether to enable rbd (Ceph) backend for Cinder.
|
|
CinderEnableRbdBackend: true
|
|
## Cinder Backup backend can be either 'ceph', 'swift' or 'nfs'.
|
|
CinderBackupBackend: ceph
|
|
## Whether to enable NFS backend for Cinder.
|
|
# CinderEnableNfsBackend: false
|
|
## Whether to enable rbd (Ceph) backend for Nova ephemeral storage.
|
|
NovaEnableRbdBackend: true
|
|
## Glance backend can be either 'rbd' (Ceph), 'swift' or 'file'.
|
|
GlanceBackend: rbd
|
|
## Gnocchi backend can be either 'rbd' (Ceph), 'swift' or 'file'.
|
|
GnocchiBackend: rbd
|
|
|
|
|
|
#### CINDER NFS SETTINGS ####
|
|
|
|
## NFS mount options
|
|
# CinderNfsMountOptions: ''
|
|
## NFS mount point, e.g. '192.168.122.1:/export/cinder'
|
|
# CinderNfsServers: ''
|
|
|
|
|
|
#### GLANCE NFS SETTINGS ####
|
|
|
|
## Make sure to set `GlanceBackend: file` when enabling NFS
|
|
##
|
|
## Whether to make Glance 'file' backend a NFS mount
|
|
# GlanceNfsEnabled: false
|
|
## NFS share for image storage, e.g. '192.168.122.1:/export/glance'
|
|
## (If using IPv6, use both double- and single-quotes,
|
|
## e.g. "'[fdd0::1]:/export/glance'")
|
|
# GlanceNfsShare: ''
|
|
## Mount options for the NFS image storage mount point
|
|
# GlanceNfsOptions: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
|
|
|
|
|
|
#### NOVA NFS SETTINGS ####
|
|
# NovaNfsEnabled: false
|
|
## NFS share for nova instance file storage, e.g. '192.168.122.1:/export/nova'
|
|
## (If using IPv6, use both double and single-quotes,
|
|
## e.g. "'[fdd0::1]:/export/nova'")
|
|
# NovaNfsShare: ''
|
|
## Mount options for the NFS instance file storage mount point
|
|
# NovaNfsOptions: 'context=system_u:object_r:nfs_t:s0'
|
|
|
|
|
|
#### CEPH SETTINGS ####
|
|
|
|
## When deploying Ceph Nodes through the oscplugin CLI, the following
|
|
## parameters are set automatically by the CLI. When deploying via
|
|
## heat stack-create or ceph on the controller nodes only,
|
|
## they need to be provided manually.
|
|
|
|
## Number of Ceph storage nodes to deploy
|
|
# CephStorageCount: 0
|
|
## Ceph FSID, e.g. '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
|
|
# CephClusterFSID: ''
|
|
## Ceph monitor key, e.g. 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
|
|
# CephMonKey: ''
|
|
## Ceph admin key, e.g. 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
|
|
# CephAdminKey: ''
|
|
## Ceph client key, e.g 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
|
|
# CephClientKey: ''
|
|
## OSDs configuration
|
|
## See https://github.com/ceph/ceph-ansible/blob/stable-3.0/docs/source/osds/scenarios.rst
|
|
# CephAnsibleDisksConfig:
|
|
# devices:
|
|
# - /dev/vdb
|
|
# osd_scenario: collocated
|