30ca49bf61
This patch adds two new tripleo services that together support deploying the glance-api service at edge sites. The service uses the same glance database in the control plane, but allows other edge services (e.g. cinder and nova) to access a glance endpoint that is local to the edge site. A new GlanceApiEdge service is a minor variant of the GlanceApi service. The most significant change is it doesn't use the control plane VIP, but instead configures cinder and nova services to access the glance-api endpoint running on that edge node (not the VIP). A companion HAproxyEdge service supports scaling out DCN sites with larger (>3) number of nodes. Instead of deploying GlanceApiEdge on every node, the HAproxyEdge service configures a local haproxy to forward glance-api requests to the edge nodes running GlanceApiEdge. The HAproxyEdge is extensible. While this patch is only concerned with proxying glance-api, it can be extended to support additional proxy requirements as needs arise. blueprint: split-controlplane-glance-cache Change-Id: Id6c416b8c7b3b6314d935e3eeb8a3f114492cecd Depends-On: Ic8d652a5209219c96f795a8c18ceb457c6d9382a
49 lines
2.9 KiB
YAML
49 lines
2.9 KiB
YAML
# *******************************************************************
|
|
# This file was created automatically by the sample environment
|
|
# generator. Developers should use `tox -e genconfig` to update it.
|
|
# Users are recommended to make changes to a copy of the file instead
|
|
# of the original, if any customizations are needed.
|
|
# *******************************************************************
|
|
# title: Distributed Compute Node HCI
|
|
# description: |
|
|
# Environment file for deploying a remote site of HCI distributed compute
|
|
# nodes (DCN) in a separate stack (multi-stack) deployment. It should be
|
|
# used in combination with environments/ceph-ansible/ceph-ansible.yaml.
|
|
parameter_defaults:
|
|
# When running Cinder A/A, whether to connect to Etcd via the local IP for the Etcd network. If set to true, the ip on the local node will be used. If set to false, the VIP on the Etcd network will be used instead. Defaults to false.
|
|
# Type: boolean
|
|
CinderEtcdLocalConnect: True
|
|
|
|
# The Cinder service's storage availability zone.
|
|
# Type: string
|
|
CinderStorageAvailabilityZone: dcn
|
|
|
|
# The cluster name used for deploying the cinder-volume service in an active-active (A/A) configuration. This configuration requires the Cinder backend drivers support A/A, and the cinder-volume service not be managed by pacemaker. If these criteria are not met then the cluster name must be left blank.
|
|
# Type: string
|
|
CinderVolumeCluster: dcn
|
|
|
|
# Enable Glance Image Cache
|
|
# Type: boolean
|
|
GlanceCacheEnabled: False
|
|
|
|
# The upper limit on cache size, in bytes, after which the cache-pruner cleans up the image cache.
|
|
# Type: number
|
|
GlanceImageCacheMaxSize: 10737418240
|
|
|
|
# Manage the network and related resources (subnets and segments) with either create, update, or delete operations (depending on the stack operation). Does not apply to ports which will always be managed as needed. Defaults to true. For multi-stack use cases where the network related resources have already been managed by a separate stack, this parameter can be set to false.
|
|
# Type: boolean
|
|
ManageNetworks: False
|
|
|
|
# The availability zone where new Nova compute nodes will be added. If the zone does not already exist, it will be created. If left unset, it will default to the value of the stack name.
|
|
# Type: string
|
|
NovaComputeAvailabilityZone: ''
|
|
|
|
resource_registry:
|
|
OS::TripleO::Network::Ports::OVNDBsVipPort: ../network/ports/noop.yaml
|
|
OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/noop.yaml
|
|
OS::TripleO::Services::CinderVolume: ../deployment/cinder/cinder-volume-container-puppet.yaml
|
|
OS::TripleO::Services::Etcd: ../../deployment/etcd/etcd-container-puppet.yaml
|
|
OS::TripleO::Services::GlanceApiEdge: ../deployment/glance/glance-api-edge-container-puppet.yaml
|
|
OS::TripleO::Services::HAproxyEdge: ../deployment/haproxy/haproxy-edge-container-puppet.yaml
|
|
OS::TripleO::Services::NovaAZConfig: ../deployment/nova/nova-az-config.yaml
|