92e203aab6
This submission: - Fix an error in the AllNodesExtraConfig resource. (Can't merge servers multiple times). - Add environment files to deploy swap file/partition without manual edit over the templates. - If a swap partition is mounted without having it available the deployment will fail, the fix checks that if the partition is not created then the deployment continues. - Removing empty extra lines in swap templates. - Adjust description and remove unnecessary comments in swap templates. Closes-Bug: 1652184 Change-Id: I828bbbbd4c178956aac74af49f80fcd4f62fa16b
670 lines
24 KiB
YAML
670 lines
24 KiB
YAML
{% set primary_role_name = roles[0].name -%}
|
|
heat_template_version: ocata
|
|
|
|
description: >
|
|
Deploy an OpenStack environment, consisting of several node types (roles),
|
|
Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage
|
|
roles enable independent scaling of the storage components, but the minimal
|
|
deployment is one Controller and one Compute node.
|
|
|
|
|
|
# TODO(shadower): we should probably use the parameter groups to put
|
|
# some order in here.
|
|
parameters:
|
|
|
|
# Common parameters (not specific to a role)
|
|
CloudName:
|
|
default: overcloud.localdomain
|
|
description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
|
|
type: string
|
|
CloudNameInternal:
|
|
default: overcloud.internalapi.localdomain
|
|
description: >
|
|
The DNS name of this cloud's internal API endpoint. E.g.
|
|
'ci-overcloud.internalapi.tripleo.org'.
|
|
type: string
|
|
CloudNameStorage:
|
|
default: overcloud.storage.localdomain
|
|
description: >
|
|
The DNS name of this cloud's storage endpoint. E.g.
|
|
'ci-overcloud.storage.tripleo.org'.
|
|
type: string
|
|
CloudNameStorageManagement:
|
|
default: overcloud.storagemgmt.localdomain
|
|
description: >
|
|
The DNS name of this cloud's storage management endpoint. E.g.
|
|
'ci-overcloud.storagemgmt.tripleo.org'.
|
|
type: string
|
|
CloudNameCtlplane:
|
|
default: overcloud.ctlplane.localdomain
|
|
description: >
|
|
The DNS name of this cloud's storage management endpoint. E.g.
|
|
'ci-overcloud.management.tripleo.org'.
|
|
type: string
|
|
ControlFixedIPs:
|
|
default: []
|
|
description: Should be used for arbitrary ips.
|
|
type: json
|
|
InternalApiVirtualFixedIPs:
|
|
default: []
|
|
description: >
|
|
Control the IP allocation for the InternalApiVirtualInterface port. E.g.
|
|
[{'ip_address':'1.2.3.4'}]
|
|
type: json
|
|
NeutronControlPlaneID:
|
|
default: 'ctlplane'
|
|
type: string
|
|
description: Neutron ID or name for ctlplane network.
|
|
NeutronPublicInterface:
|
|
default: nic1
|
|
description: What interface to bridge onto br-ex for network nodes.
|
|
type: string
|
|
PublicVirtualFixedIPs:
|
|
default: []
|
|
description: >
|
|
Control the IP allocation for the PublicVirtualInterface port. E.g.
|
|
[{'ip_address':'1.2.3.4'}]
|
|
type: json
|
|
RabbitCookieSalt:
|
|
type: string
|
|
default: unset
|
|
description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
|
|
StorageVirtualFixedIPs:
|
|
default: []
|
|
description: >
|
|
Control the IP allocation for the StorageVirtualInterface port. E.g.
|
|
[{'ip_address':'1.2.3.4'}]
|
|
type: json
|
|
StorageMgmtVirtualFixedIPs:
|
|
default: []
|
|
description: >
|
|
Control the IP allocation for the StorageMgmgVirtualInterface port. E.g.
|
|
[{'ip_address':'1.2.3.4'}]
|
|
type: json
|
|
RedisVirtualFixedIPs:
|
|
default: []
|
|
description: >
|
|
Control the IP allocation for the virtual IP used by Redis. E.g.
|
|
[{'ip_address':'1.2.3.4'}]
|
|
type: json
|
|
CloudDomain:
|
|
default: 'localdomain'
|
|
type: string
|
|
description: >
|
|
The DNS domain used for the hosts. This should match the dhcp_domain
|
|
configured in the Undercloud neutron. Defaults to localdomain.
|
|
ServerMetadata:
|
|
default: {}
|
|
description: >
|
|
Extra properties or metadata passed to Nova for the created nodes in
|
|
the overcloud. It's accessible via the Nova metadata API.
|
|
type: json
|
|
|
|
# Compute-specific params
|
|
# FIXME(shardy) handle these deprecated names as they don't match compute.yaml
|
|
HypervisorNeutronPhysicalBridge:
|
|
default: 'br-ex'
|
|
description: >
|
|
An OVS bridge to create on each hypervisor. This defaults to br-ex the
|
|
same as the control plane nodes, as we have a uniform configuration of
|
|
the openvswitch agent. Typically should not need to be changed.
|
|
type: string
|
|
HypervisorNeutronPublicInterface:
|
|
default: nic1
|
|
description: What interface to add to the HypervisorNeutronPhysicalBridge.
|
|
type: string
|
|
|
|
# Jinja loop for Role in role_data.yaml
|
|
{% for role in roles %}
|
|
# Parameters generated for {{role.name}} Role
|
|
{{role.name}}Services:
|
|
description: A list of service resources (configured in the Heat
|
|
resource_registry) which represent nested stacks
|
|
for each service that should get installed on the {{role.name}} role.
|
|
type: comma_delimited_list
|
|
|
|
{{role.name}}Count:
|
|
description: Number of {{role.name}} nodes to deploy
|
|
type: number
|
|
default: {{role.CountDefault|default(0)}}
|
|
|
|
{{role.name}}HostnameFormat:
|
|
type: string
|
|
description: >
|
|
Format for {{role.name}} node hostnames
|
|
Note %index% is translated into the index of the node, e.g 0/1/2 etc
|
|
and %stackname% is replaced with the stack name e.g overcloud
|
|
{% if role.HostnameFormatDefault %}
|
|
default: "{{role.HostnameFormatDefault}}"
|
|
{% else %}
|
|
default: "%stackname%-{{role.name.lower()}}-%index%"
|
|
{% endif %}
|
|
|
|
{{role.name}}RemovalPolicies:
|
|
default: []
|
|
type: json
|
|
description: >
|
|
List of resources to be removed from {{role.name}} ResourceGroup when
|
|
doing an update which requires removal of specific resources.
|
|
Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]
|
|
|
|
{% if role.name != 'Compute' %}
|
|
{{role.name}}SchedulerHints:
|
|
{% else %}
|
|
NovaComputeSchedulerHints:
|
|
{% endif %}
|
|
type: json
|
|
description: Optional scheduler hints to pass to nova
|
|
default: {}
|
|
{% endfor %}
|
|
|
|
# Identifiers to trigger tasks on nodes
|
|
UpdateIdentifier:
|
|
default: ''
|
|
type: string
|
|
description: >
|
|
Setting to a previously unused value during stack-update will trigger
|
|
package update on all nodes
|
|
DeployIdentifier:
|
|
default: ''
|
|
type: string
|
|
description: >
|
|
Setting this to a unique value will re-run any deployment tasks which
|
|
perform configuration on a Heat stack-update.
|
|
AddVipsToEtcHosts:
|
|
default: True
|
|
type: boolean
|
|
description: >
|
|
Set to true to append per network Vips to /etc/hosts on each node.
|
|
|
|
conditions:
|
|
add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]}
|
|
|
|
resources:
|
|
|
|
VipHosts:
|
|
type: OS::Heat::Value
|
|
properties:
|
|
type: string
|
|
value:
|
|
list_join:
|
|
- "\n"
|
|
- - str_replace:
|
|
template: IP HOST
|
|
params:
|
|
IP: {get_attr: [VipMap, net_ip_map, external]}
|
|
HOST: {get_param: CloudName}
|
|
- str_replace:
|
|
template: IP HOST
|
|
params:
|
|
IP: {get_attr: [VipMap, net_ip_map, ctlplane]}
|
|
HOST: {get_param: CloudNameCtlplane}
|
|
- str_replace:
|
|
template: IP HOST
|
|
params:
|
|
IP: {get_attr: [VipMap, net_ip_map, internal_api]}
|
|
HOST: {get_param: CloudNameInternal}
|
|
- str_replace:
|
|
template: IP HOST
|
|
params:
|
|
IP: {get_attr: [VipMap, net_ip_map, storage]}
|
|
HOST: {get_param: CloudNameStorage}
|
|
- str_replace:
|
|
template: IP HOST
|
|
params:
|
|
IP: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
|
|
HOST: {get_param: CloudNameStorageManagement}
|
|
|
|
HeatAuthEncryptionKey:
|
|
type: OS::Heat::RandomString
|
|
|
|
PcsdPassword:
|
|
type: OS::Heat::RandomString
|
|
properties:
|
|
length: 16
|
|
|
|
HorizonSecret:
|
|
type: OS::Heat::RandomString
|
|
properties:
|
|
length: 10
|
|
|
|
ServiceNetMap:
|
|
type: OS::TripleO::ServiceNetMap
|
|
|
|
EndpointMap:
|
|
type: OS::TripleO::EndpointMap
|
|
properties:
|
|
CloudEndpoints:
|
|
external: {get_param: CloudName}
|
|
internal_api: {get_param: CloudNameInternal}
|
|
storage: {get_param: CloudNameStorage}
|
|
storage_mgmt: {get_param: CloudNameStorageManagement}
|
|
ctlplane: {get_param: CloudNameCtlplane}
|
|
NetIpMap: {get_attr: [VipMap, net_ip_map]}
|
|
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
|
|
|
|
# Jinja loop for Role in roles_data.yaml
|
|
{% for role in roles %}
|
|
# Resources generated for {{role.name}} Role
|
|
{{role.name}}ServiceChain:
|
|
type: OS::TripleO::Services
|
|
properties:
|
|
Services:
|
|
get_param: {{role.name}}Services
|
|
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
|
|
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
|
|
DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
|
|
|
|
{{role.name}}HostsDeployment:
|
|
type: OS::Heat::StructuredDeployments
|
|
properties:
|
|
name: {{role.name}}HostsDeployment
|
|
config: {get_attr: [hostsConfig, config_id]}
|
|
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
|
|
|
|
{{role.name}}AllNodesDeployment:
|
|
type: OS::Heat::StructuredDeployments
|
|
depends_on:
|
|
{% for role_inner in roles %}
|
|
- {{role_inner.name}}HostsDeployment
|
|
{% endfor %}
|
|
properties:
|
|
name: {{role.name}}AllNodesDeployment
|
|
config: {get_attr: [allNodesConfig, config_id]}
|
|
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
|
|
input_values:
|
|
# Note we have to use yaql to look up the first hostname/ip in the
|
|
# list because heat path based attributes operate on the attribute
|
|
# inside the ResourceGroup, not the exposed list ref discussion in
|
|
# https://bugs.launchpad.net/heat/+bug/1640488
|
|
# The coalesce is needed because $.data is None during heat validation
|
|
bootstrap_nodeid:
|
|
yaql:
|
|
expression: coalesce($.data, []).first(null)
|
|
data: {get_attr: [{{role.name}}, hostname]}
|
|
bootstrap_nodeid_ip:
|
|
yaql:
|
|
expression: coalesce($.data, []).first(null)
|
|
data: {get_attr: [{{role.name}}, ip_address]}
|
|
|
|
{{role.name}}AllNodesValidationDeployment:
|
|
type: OS::Heat::StructuredDeployments
|
|
depends_on: {{role.name}}AllNodesDeployment
|
|
properties:
|
|
name: {{role.name}}AllNodesValidationDeployment
|
|
config: {get_resource: AllNodesValidationConfig}
|
|
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
|
|
|
|
{{role.name}}IpListMap:
|
|
type: OS::TripleO::Network::Ports::NetIpListMap
|
|
properties:
|
|
ControlPlaneIpList: {get_attr: [{{role.name}}, ip_address]}
|
|
ExternalIpList: {get_attr: [{{role.name}}, external_ip_address]}
|
|
InternalApiIpList: {get_attr: [{{role.name}}, internal_api_ip_address]}
|
|
StorageIpList: {get_attr: [{{role.name}}, storage_ip_address]}
|
|
StorageMgmtIpList: {get_attr: [{{role.name}}, storage_mgmt_ip_address]}
|
|
TenantIpList: {get_attr: [{{role.name}}, tenant_ip_address]}
|
|
ManagementIpList: {get_attr: [{{role.name}}, management_ip_address]}
|
|
EnabledServices: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
|
|
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
|
|
ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
|
|
NetworkHostnameMap:
|
|
# Note (shardy) this somewhat complex yaql may be replaced
|
|
# with a map_deep_merge function in ocata. It merges the
|
|
# list of maps, but appends to colliding lists so we can
|
|
# create a map of lists for all nodes for each network
|
|
yaql:
|
|
expression: dict($.data.where($ != null).flatten().selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
|
|
data:
|
|
- {get_attr: [{{role.name}}, hostname_map]}
|
|
|
|
{{role.name}}:
|
|
type: OS::Heat::ResourceGroup
|
|
depends_on: Networks
|
|
properties:
|
|
count: {get_param: {{role.name}}Count}
|
|
removal_policies: {get_param: {{role.name}}RemovalPolicies}
|
|
resource_def:
|
|
type: OS::TripleO::{{role.name}}
|
|
properties:
|
|
CloudDomain: {get_param: CloudDomain}
|
|
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
|
|
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
|
|
Hostname:
|
|
str_replace:
|
|
template: {get_param: {{role.name}}HostnameFormat}
|
|
params:
|
|
'%stackname%': {get_param: 'OS::stack_name'}
|
|
NodeIndex: '%index%'
|
|
{% if role.name != 'Compute' %}
|
|
{{role.name}}SchedulerHints: {get_param: {{role.name}}SchedulerHints}
|
|
{% else %}
|
|
NovaComputeSchedulerHints: {get_param: NovaComputeSchedulerHints}
|
|
{% endif %}
|
|
ServiceConfigSettings:
|
|
map_merge:
|
|
- get_attr: [{{role.name}}ServiceChain, role_data, config_settings]
|
|
{% for r in roles %}
|
|
- get_attr: [{{r.name}}ServiceChain, role_data, global_config_settings]
|
|
{% endfor %}
|
|
# This next step combines two yaql passes:
|
|
# - The inner one does a deep merge on the service_config_settings for all roles
|
|
# - The outer one filters the map based on the services enabled for the role
|
|
# then merges the result into one map.
|
|
- yaql:
|
|
expression: let(root => $) -> $.data.map.items().where($[0] in coalesce($root.data.services, [])).select($[1]).reduce($1.mergeWith($2), {})
|
|
data:
|
|
map:
|
|
yaql:
|
|
expression: $.data.where($ != null).reduce($1.mergeWith($2), {})
|
|
data:
|
|
{% for r in roles %}
|
|
- get_attr: [{{r.name}}ServiceChain, role_data, service_config_settings]
|
|
{% endfor %}
|
|
services: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
|
|
ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
|
|
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
|
|
ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChain, role_data, service_metadata_settings]}
|
|
{% endfor %}
|
|
|
|
hostsConfig:
|
|
type: OS::TripleO::Hosts::SoftwareConfig
|
|
properties:
|
|
hosts:
|
|
list_join:
|
|
- "\n"
|
|
- - if:
|
|
- add_vips_to_etc_hosts
|
|
- {get_attr: [VipHosts, value]}
|
|
- ''
|
|
-
|
|
{% for role in roles %}
|
|
- list_join:
|
|
- "\n"
|
|
- {get_attr: [{{role.name}}, hosts_entry]}
|
|
{% endfor %}
|
|
|
|
allNodesConfig:
|
|
type: OS::TripleO::AllNodes::SoftwareConfig
|
|
properties:
|
|
cloud_name_external: {get_param: CloudName}
|
|
cloud_name_internal_api: {get_param: CloudNameInternal}
|
|
cloud_name_storage: {get_param: CloudNameStorage}
|
|
cloud_name_storage_mgmt: {get_param: CloudNameStorageManagement}
|
|
cloud_name_ctlplane: {get_param: CloudNameCtlplane}
|
|
enabled_services:
|
|
list_join:
|
|
- ','
|
|
{% for role in roles %}
|
|
- {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
|
|
{% endfor %}
|
|
logging_groups:
|
|
yaql:
|
|
expression: >
|
|
$.data.groups.flatten()
|
|
data:
|
|
groups:
|
|
{% for role in roles %}
|
|
- {get_attr: [{{role.name}}ServiceChain, role_data, logging_groups]}
|
|
{% endfor %}
|
|
logging_sources:
|
|
yaql:
|
|
expression: >
|
|
$.data.sources.flatten()
|
|
data:
|
|
sources:
|
|
{% for role in roles %}
|
|
- {get_attr: [{{role.name}}ServiceChain, role_data, logging_sources]}
|
|
{% endfor %}
|
|
controller_ips: {get_attr: [{{primary_role_name}}, ip_address]}
|
|
controller_names: {get_attr: [{{primary_role_name}}, hostname]}
|
|
service_ips:
|
|
# Note (shardy) this somewhat complex yaql may be replaced
|
|
# with a map_deep_merge function in ocata. It merges the
|
|
# list of maps, but appends to colliding lists when a service
|
|
# is deployed on more than one role
|
|
yaql:
|
|
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
|
|
data:
|
|
l:
|
|
{% for role in roles %}
|
|
- {get_attr: [{{role.name}}IpListMap, service_ips]}
|
|
{% endfor %}
|
|
service_node_names:
|
|
yaql:
|
|
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
|
|
data:
|
|
l:
|
|
{% for role in roles %}
|
|
- {get_attr: [{{role.name}}IpListMap, service_hostnames]}
|
|
{% endfor %}
|
|
short_service_node_names:
|
|
yaql:
|
|
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
|
|
data:
|
|
l:
|
|
{% for role in roles %}
|
|
- {get_attr: [{{role.name}}IpListMap, short_service_hostnames]}
|
|
{% endfor %}
|
|
short_service_bootstrap_node:
|
|
yaql:
|
|
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten().first()]))
|
|
data:
|
|
l:
|
|
{% for role in roles %}
|
|
- {get_attr: [{{role.name}}IpListMap, short_service_bootstrap_hostnames]}
|
|
{% endfor %}
|
|
# FIXME(shardy): These require further work to move into service_ips
|
|
memcache_node_ips: {get_attr: [{{primary_role_name}}IpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
|
|
NetVipMap: {get_attr: [VipMap, net_ip_map]}
|
|
RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
|
|
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
|
|
DeployIdentifier: {get_param: DeployIdentifier}
|
|
UpdateIdentifier: {get_param: UpdateIdentifier}
|
|
|
|
MysqlRootPassword:
|
|
type: OS::Heat::RandomString
|
|
properties:
|
|
length: 10
|
|
|
|
RabbitCookie:
|
|
type: OS::Heat::RandomString
|
|
properties:
|
|
length: 20
|
|
salt: {get_param: RabbitCookieSalt}
|
|
|
|
DefaultPasswords:
|
|
type: OS::TripleO::DefaultPasswords
|
|
properties:
|
|
DefaultMysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
|
|
DefaultRabbitCookie: {get_attr: [RabbitCookie, value]}
|
|
DefaultHeatAuthEncryptionKey: {get_attr: [HeatAuthEncryptionKey, value]}
|
|
DefaultPcsdPassword: {get_attr: [PcsdPassword, value]}
|
|
DefaultHorizonSecret: {get_attr: [HorizonSecret, value]}
|
|
|
|
# creates the network architecture
|
|
Networks:
|
|
type: OS::TripleO::Network
|
|
|
|
ControlVirtualIP:
|
|
type: OS::TripleO::Network::Ports::ControlPlaneVipPort
|
|
depends_on: Networks
|
|
properties:
|
|
name: control_virtual_ip
|
|
network: {get_param: NeutronControlPlaneID}
|
|
fixed_ips: {get_param: ControlFixedIPs}
|
|
replacement_policy: AUTO
|
|
|
|
RedisVirtualIP:
|
|
depends_on: Networks
|
|
type: OS::TripleO::Network::Ports::RedisVipPort
|
|
properties:
|
|
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
|
|
ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
|
|
PortName: redis_virtual_ip
|
|
NetworkName: {get_attr: [ServiceNetMap, service_net_map, RedisNetwork]}
|
|
ServiceName: redis
|
|
FixedIPs: {get_param: RedisVirtualFixedIPs}
|
|
|
|
# The public VIP is on the External net, falls back to ctlplane
|
|
PublicVirtualIP:
|
|
depends_on: Networks
|
|
type: OS::TripleO::Network::Ports::ExternalVipPort
|
|
properties:
|
|
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
|
|
ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
|
|
PortName: public_virtual_ip
|
|
FixedIPs: {get_param: PublicVirtualFixedIPs}
|
|
|
|
InternalApiVirtualIP:
|
|
depends_on: Networks
|
|
type: OS::TripleO::Network::Ports::InternalApiVipPort
|
|
properties:
|
|
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
|
|
PortName: internal_api_virtual_ip
|
|
FixedIPs: {get_param: InternalApiVirtualFixedIPs}
|
|
|
|
StorageVirtualIP:
|
|
depends_on: Networks
|
|
type: OS::TripleO::Network::Ports::StorageVipPort
|
|
properties:
|
|
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
|
|
PortName: storage_virtual_ip
|
|
FixedIPs: {get_param: StorageVirtualFixedIPs}
|
|
|
|
StorageMgmtVirtualIP:
|
|
depends_on: Networks
|
|
type: OS::TripleO::Network::Ports::StorageMgmtVipPort
|
|
properties:
|
|
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
|
|
PortName: storage_management_virtual_ip
|
|
FixedIPs: {get_param: StorageMgmtVirtualFixedIPs}
|
|
|
|
VipMap:
|
|
type: OS::TripleO::Network::Ports::NetVipMap
|
|
properties:
|
|
ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
|
|
ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
|
|
ExternalIpUri: {get_attr: [PublicVirtualIP, ip_address_uri]}
|
|
InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]}
|
|
InternalApiIpUri: {get_attr: [InternalApiVirtualIP, ip_address_uri]}
|
|
StorageIp: {get_attr: [StorageVirtualIP, ip_address]}
|
|
StorageIpUri: {get_attr: [StorageVirtualIP, ip_address_uri]}
|
|
StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]}
|
|
StorageMgmtIpUri: {get_attr: [StorageMgmtVirtualIP, ip_address_uri]}
|
|
# No tenant or management VIP required
|
|
|
|
# All Nodes Validations
|
|
AllNodesValidationConfig:
|
|
type: OS::TripleO::AllNodes::Validation
|
|
properties:
|
|
PingTestIps:
|
|
list_join:
|
|
- ' '
|
|
- - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]}
|
|
- {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]}
|
|
- {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]}
|
|
- {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]}
|
|
- {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]}
|
|
- {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]}
|
|
|
|
UpdateWorkflow:
|
|
type: OS::TripleO::Tasks::UpdateWorkflow
|
|
depends_on:
|
|
{% for role in roles %}
|
|
- {{role.name}}AllNodesDeployment
|
|
{% endfor %}
|
|
properties:
|
|
servers:
|
|
{% for role in roles %}
|
|
{{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
|
|
{% endfor %}
|
|
input_values:
|
|
deploy_identifier: {get_param: DeployIdentifier}
|
|
update_identifier: {get_param: UpdateIdentifier}
|
|
|
|
# Optional ExtraConfig for all nodes - all roles are passed in here, but
|
|
# the nested template may configure each role differently (or not at all)
|
|
AllNodesExtraConfig:
|
|
type: OS::TripleO::AllNodesExtraConfig
|
|
depends_on:
|
|
- UpdateWorkflow
|
|
{% for role in roles %}
|
|
- {{role.name}}AllNodesValidationDeployment
|
|
{% endfor %}
|
|
properties:
|
|
servers:
|
|
{% for role in roles %}
|
|
{{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
|
|
{% endfor %}
|
|
|
|
# Upgrade steps for all roles
|
|
AllNodesUpgradeSteps:
|
|
type: OS::TripleO::UpgradeSteps
|
|
depends_on:
|
|
{% for role in roles %}
|
|
- {{role.name}}AllNodesDeployment
|
|
{% endfor %}
|
|
properties:
|
|
servers:
|
|
{% for role in roles %}
|
|
{{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
|
|
{% endfor %}
|
|
role_data:
|
|
{% for role in roles %}
|
|
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
|
|
{% endfor %}
|
|
|
|
# Post deployment steps for all roles
|
|
AllNodesDeploySteps:
|
|
type: OS::TripleO::PostDeploySteps
|
|
depends_on: AllNodesUpgradeSteps
|
|
properties:
|
|
servers:
|
|
{% for role in roles %}
|
|
{{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
|
|
{% endfor %}
|
|
role_data:
|
|
{% for role in roles %}
|
|
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
|
|
{% endfor %}
|
|
|
|
outputs:
|
|
ManagedEndpoints:
|
|
description: Asserts that the keystone endpoints have been provisioned.
|
|
value: true
|
|
KeystoneURL:
|
|
description: URL for the Overcloud Keystone service
|
|
value: {get_attr: [EndpointMap, endpoint_map, KeystonePublic, uri]}
|
|
KeystoneAdminVip:
|
|
description: Keystone Admin VIP endpoint
|
|
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
|
|
EndpointMap:
|
|
description: |
|
|
Mapping of the resources with the needed info for their endpoints.
|
|
This includes the protocol used, the IP, port and also a full
|
|
representation of the URI.
|
|
value: {get_attr: [EndpointMap, endpoint_map]}
|
|
HostsEntry:
|
|
description: |
|
|
The content that should be appended to your /etc/hosts if you want to get
|
|
hostname-based access to the deployed nodes (useful for testing without
|
|
setting up a DNS).
|
|
value:
|
|
list_join:
|
|
- "\n"
|
|
- - {get_attr: [hostsConfig, hosts_entries]}
|
|
- - {get_attr: [VipHosts, value]}
|
|
EnabledServices:
|
|
description: The services enabled on each role
|
|
value:
|
|
{% for role in roles %}
|
|
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
|
|
{% endfor %}
|
|
RoleData:
|
|
description: The configuration data associated with each role
|
|
value:
|
|
{% for role in roles %}
|
|
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
|
|
{% endfor %}
|