2301477207
Rename the file in the environments directory so that it reflects its expanded scope. This file is used when deploying storage with DCN sites regardless of if those sites use HCI. We are now supporting non-HCI DCN sites with storage so the old name is confusing. Old name : dcn-hci.yaml New name : dcn-storage.yaml dcn-hci.yaml is depreacated but will remain in the environments directory for backwards compatibility. dcn-hci.yaml will be removed during the X cycle. Change-Id: Ice5e1cfbc158eb6705988706c8625bedb80d7de2 (cherry picked from commitd4ae25e2fd
) (cherry picked from commit8d0638ecac
)
60 lines
3.3 KiB
YAML
60 lines
3.3 KiB
YAML
# DEPRECATED:
|
|
# - Please use dcn-storage.yaml instead of this file.
|
|
# - This file will be removed during the X-cycle.
|
|
# *******************************************************************
|
|
# This file was created automatically by the sample environment
|
|
# generator. Developers should use `tox -e genconfig` to update it.
|
|
# Users are recommended to make changes to a copy of the file instead
|
|
# of the original, if any customizations are needed.
|
|
# *******************************************************************
|
|
# title: Distributed Compute Node HCI
|
|
# description: |
|
|
# Environment file for deploying a remote site of HCI distributed compute
|
|
# nodes (DCN) in a separate stack (multi-stack) deployment. It should be
|
|
# used in combination with environments/ceph-ansible/ceph-ansible.yaml.
|
|
parameter_defaults:
|
|
# When running Cinder A/A, whether to connect to Etcd via the local IP for the Etcd network. If set to true, the ip on the local node will be used. If set to false, the VIP on the Etcd network will be used instead. Defaults to false.
|
|
# Type: boolean
|
|
CinderEtcdLocalConnect: True
|
|
|
|
# The Cinder service's storage availability zone.
|
|
# Type: string
|
|
CinderStorageAvailabilityZone: dcn
|
|
|
|
# The cluster name used for deploying the cinder-volume service in an active-active (A/A) configuration. This configuration requires the Cinder backend drivers support A/A, and the cinder-volume service not be managed by pacemaker. If these criteria are not met then the cluster name must be left blank.
|
|
# Type: string
|
|
CinderVolumeCluster: dcn
|
|
|
|
# Enable Glance Image Cache
|
|
# Type: boolean
|
|
GlanceCacheEnabled: False
|
|
|
|
# The upper limit on cache size, in bytes, after which the cache-pruner cleans up the image cache.
|
|
# Type: number
|
|
GlanceImageCacheMaxSize: 10737418240
|
|
|
|
# Manage the network and related resources (subnets and segments) with either create, update, or delete operations (depending on the stack operation). Does not apply to ports which will always be managed as needed. Defaults to true. For multi-stack use cases where the network related resources have already been managed by a separate stack, this parameter can be set to false.
|
|
# Type: boolean
|
|
ManageNetworks: False
|
|
|
|
# The availability zone where new Nova compute nodes will be added. If the zone does not already exist, it will be created. If left unset, it will default to the value of the stack name.
|
|
# Type: string
|
|
NovaComputeAvailabilityZone: ''
|
|
|
|
# Whether instances can attach cinder volumes from a different availability zone.
|
|
# Type: boolean
|
|
NovaCrossAZAttach: False
|
|
|
|
# Refuse to boot an instance if it would require downloading from glance and uploading to ceph instead of a COW clone.
|
|
# Type: boolean
|
|
NovaDisableImageDownloadToRbd: True
|
|
|
|
resource_registry:
|
|
OS::TripleO::Network::Ports::OVNDBsVipPort: ../network/ports/noop.yaml
|
|
OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/noop.yaml
|
|
OS::TripleO::Services::CinderVolumeEdge: ../deployment/cinder/cinder-volume-container-puppet.yaml
|
|
OS::TripleO::Services::Etcd: ../deployment/etcd/etcd-container-puppet.yaml
|
|
OS::TripleO::Services::GlanceApiEdge: ../deployment/glance/glance-api-edge-container-puppet.yaml
|
|
OS::TripleO::Services::HAproxyEdge: ../deployment/haproxy/haproxy-edge-container-puppet.yaml
|
|
OS::TripleO::Services::NovaAZConfig: ../deployment/nova/nova-az-config.yaml
|