tripleo-heat-templates/overcloud.j2.yaml

1222 lines
44 KiB
YAML
Raw Normal View History

{%- set primary_role = [roles[0]] -%}
{%- for role in roles -%}
{%- if 'primary' in role.tags and 'controller' in role.tags -%}
{%- set _ = primary_role.pop() -%}
{%- set _ = primary_role.append(role) -%}
{%- endif -%}
{%- endfor -%}
{%- set primary_role_name = primary_role[0].name -%}
# primary role is: {{primary_role_name}}
heat_template_version: rocky
description: >
Deploy an OpenStack environment, consisting of several node types (roles),
Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage
roles enable independent scaling of the storage components, but the minimal
deployment is one Controller and one Compute node.
# TODO(shadower): we should probably use the parameter groups to put
# some order in here.
parameters:
# Common parameters (not specific to
{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %}
{%- if network.name == 'External' %}
# Special case the External hostname param, which is CloudName
CloudName:
default: overcloud.localdomain
description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
type: string
# TODO (dsneddon) Legacy name, eventually refactor to match network name
PublicVirtualFixedIPs:
default: []
description: >
Control the IP allocation for the PublicVirtualInterface port. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
{%- elif network.name == 'InternalApi' %}
# Special case the Internal API hostname param, which is CloudNameInternal
CloudNameInternal:
default: overcloud.{{network.name.lower()}}.localdomain
description: >
The DNS name of this cloud's {{network.name_lower}} endpoint. E.g.
'ci-overcloud.{{network.name.lower()}}.tripleo.org'.
type: string
{%- elif network.name == 'StorageMgmt' %}
# Special case StorageMgmt hostname param, which is CloudNameStorageManagement
CloudNameStorageManagement:
default: overcloud.{{network.name.lower()}}.localdomain
description: >
The DNS name of this cloud's {{network.name_lower}} endpoint. E.g.
'ci-overcloud.{{network.name.lower()}}.tripleo.org'.
type: string
{%- else %}
CloudName{{network.name}}:
default: overcloud.{{network.name.lower()}}.localdomain
description: >
The DNS name of this cloud's {{network.name_lower}} endpoint. E.g.
'ci-overcloud.{{network.name.lower()}}.tripleo.org'.
type: string
{%- endif %}
{{network.name}}VirtualFixedIPs:
default: []
description: >
Control the IP allocation for the {{network.name}}VirtualInterface port. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
{%- endfor %}
CloudNameCtlplane:
default: overcloud.ctlplane.localdomain
description: >
The DNS name of this cloud's provisioning network endpoint. E.g.
'ci-overcloud.ctlplane.tripleo.org'.
type: string
ExtraHostFileEntries:
default: []
description: List of extra hosts entries to be appended to /etc/hosts
type: comma_delimited_list
EndpointMapOverride:
default: {}
description: Can be used to override the calcluated EndpointMap
type: json
ExtraConfig:
default: {}
description: |
Additional hiera configuration to inject into the cluster.
type: json
NeutronControlPlaneID:
default: 'ctlplane'
type: string
description: Neutron ID or name for ctlplane network.
NeutronPhysicalBridge:
default: 'br-ex'
description: An OVS bridge to create for accessing external networks.
type: string
NeutronPublicInterface:
default: nic1
description: Which interface to add to the NeutronPhysicalBridge.
type: string
ControlPlaneSubnet:
description: The name of the undercloud Neutron control plane subnet
default: ctlplane-subnet
type: string
ControlPlaneSubnetCidr:
default: ''
description: >
The subnet CIDR of the control plane network. (The parameter is
automatically resolved from the ctlplane subnet's cidr attribute.)
type: string
ControlFixedIPs:
default: []
description: >
Control the IP allocation for the ControlVirtualIP port. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
RabbitCookieSalt:
type: string
default: unset
description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
RedisVirtualFixedIPs:
default: []
description: >
Control the IP allocation for the virtual IP used by Redis. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
OVNDBsVirtualFixedIPs:
default: []
description: >
Control the IP allocation for the virtual IP used by OVN DBs. E.g.
[{'ip_address':'1.2.3.4'}]
type: json
CloudDomain:
default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This must match the
overcloud_domain_name configured on the undercloud.
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
the overcloud. It's accessible via the Nova metadata API.
type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
Heat action when to apply network configuration changes
default: ['CREATE']
# Compute-specific params
# FIXME(shardy) handle these deprecated names as they don't match compute.yaml
HypervisorNeutronPhysicalBridge:
default: 'br-ex'
description: >
An OVS bridge to create on each hypervisor. This defaults to br-ex the
same as the control plane nodes, as we have a uniform configuration of
the openvswitch agent. Typically should not need to be changed.
type: string
HypervisorNeutronPublicInterface:
default: nic1
description: What interface to add to the HypervisorNeutronPhysicalBridge.
type: string
NodeCreateBatchSize:
default: 30
description: Maxiumum batch size for creating nodes
type: number
NovaAdditionalCell:
default: false
description: Whether this is an cell additional to the default cell.
type: boolean
NovaLocalMetadataPerCell:
default: false
description: >
Indicates that the nova-metadata API service has been deployed
per-cell, so that we can have better performance and data isolation in a
multi-cell deployment. Users should consider the use of this configuration
depending on how neutron is setup. If networks span cells, you might need
to run nova-metadata API service globally. If your networks are segmented
along cell boundaries, then you can run nova-metadata API service per cell.
When running nova-metadata API service per cell, you should also configure
each Neutron metadata-agent to point to the corresponding nova-metadata API
service.
type: boolean
# Jinja loop for Role in role_data.yaml
{% for role in roles %}
{{role.name}}ExtraConfig:
default: {}
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
{%- if role.deprecated_param_extraconfig is defined %}
{{role.deprecated_param_extraconfig}}:
default: {}
description: |
DEPRECATED use {{role.name}}ExtraConfig instead
type: json
{%- endif %}
# Parameters generated for {{role.name}} Role
{{role.name}}Services:
description: A list of service resources (configured in the Heat
resource_registry) which represent nested stacks
for each service that should get installed on the {{role.name}} role.
type: comma_delimited_list
{{role.name}}NetworkDeploymentActions:
type: comma_delimited_list
description: >
Heat action when to apply network configuration changes
default: []
{{role.name}}AnyErrorsFatal:
default: yes
type: string
{#- We generally won't want any failures on HA Controller roles, 15% will cause any 1 node to fail the deploy, #}
{#- for a 3 or 5 node Role, making it a fairly safe default. #}
{{role.name}}MaxFailPercentage:
default: 15
type: number
{{role.name}}Count:
description: Number of {{role.name}} nodes to deploy
type: number
default: {{role.CountDefault|default(0)}}
{{role.name}}HostnameFormat:
type: string
description: >
Format for {{role.name}} node hostnames
Note %index% is translated into the index of the node, e.g 0/1/2 etc
and %stackname% is replaced with the stack name e.g overcloud
{% if role.HostnameFormatDefault %}
default: "{{role.HostnameFormatDefault}}"
{% else %}
default: "%stackname%-{{role.name.lower()}}-%index%"
{% endif %}
{{role.name}}RemovalPolicies:
default: []
type: json
description: >
List of resources to be removed from {{role.name}} ResourceGroup when
doing an update which requires removal of specific resources.
Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]
{{role.name}}RemovalPoliciesMode:
default: append
type: string
description: >
How to handle change to RemovalPolicies for {{role.name}}
ResourceGroup when doing an update. Default mode 'append' will
append to the existing blacklist and 'update' would replace
the blacklist.
{{role.name}}SchedulerHints:
type: json
description: Optional scheduler hints to pass to nova
default: {}
{%- if role.deprecated_param_scheduler_hints is defined %}
{{role.deprecated_param_scheduler_hints}}:
type: json
description: DEPRECATED - use {{role.name}}SchedulerHints instead
default: {}
{%- endif %}
{{role.name}}Parameters:
type: json
description: Optional Role Specific parameters to be provided to service
default: {}
{% endfor %}
# Identifiers to trigger tasks on nodes
UpdateIdentifier:
default: ''
type: string
description: >
Setting to a previously unused value during stack-update will trigger
package update on all nodes
DeployIdentifier:
default: ''
type: string
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
AddVipsToEtcHosts:
default: True
type: boolean
description: >
Set to true to append per network Vips to /etc/hosts on each node.
DeploymentServerBlacklist:
default: []
type: comma_delimited_list
description: >
List of server hostnames to blacklist from any triggered deployments.
GlobalConfigExtraMapData:
type: json
default: {}
description: Map of extra global_config_settings data to set on each node.
{% for role in roles %}
{%- if role.deprecated_param_scheduler_hints is defined or role.deprecated_param_extraconfig is defined %}
{%- if not parameter_groups_defined|default(false) %}
parameter_groups:
- label: deprecated
description: Do not use deprecated params, they will be removed.
parameters:
{%- set parameter_groups_defined = true %}
{%- endif %}
{%- endif %}
{%- if role.deprecated_param_scheduler_hints is defined %}
- {{role.deprecated_param_scheduler_hints}}
{%- endif %}
{%- if role.deprecated_param_extraconfig is defined %}
- {{role.deprecated_param_extraconfig}}
{%- endif %}
{%- endfor %}
conditions:
add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]}
control_fixed_ip_not_set: {equals : [{get_param: ControlFixedIPs}, []]}
ctlplane_subnet_cidr_set:
not:
equals: [{get_param: ControlPlaneSubnetCidr}, '']
L3 routed networks - subnet fixed_ips (3/3) When using neutron routed networks we need to specify either the subnet or a ip address in the fixed-ips-request when creating neutron ports. a) For the Vip's: Adds VipSubnetMap and VipSubnetMapDefaults parameters in service_net_map.yaml. The two maps are merged, so that the operator can override the subnet where VIP port should be hosted. For example: parameter_defaults: VipSubnetMap: ctlplane: ctlplane-leaf1 InternalApi: internal_api_leaf1 Storage: storage_leaf1 redis: internal_api_leaf1 b) For overcloud node ports: Enrich 'networks' in roles defenition to include both network and subnet data. Changes the list to a map instead of a list of strings. New schema: - name: <role_name> networks: <network_name> subnet: <subnet_name> For backward compatibility a conditional is used to check if the data is a map or not. In either case the internal list of role networks is created as '_role_networks' in the jinja2 templates. When the data is a map, and the map contains the 'subnet' key the subnet specified in roles_data.yaml is used as the subnet in the fixed-ips-reqest when ports are created. If subnet is not set (or role.networks is not a map) the default will be {{network.name_lower}}_subnet. Also, since the fixed_ips request passed to Vip ports are no longer [] by default, the conditinal has been updated to test for 'ip_address' entries in the request. Partial: blueprint tripleo-routed-networks-templates Depends-On: I773a38fd903fe287132151a4d178326a46890969 Change-Id: I77edc82723d00bfece6752b5dd2c79137db93443
2018-07-12 16:27:08 +00:00
{%- for network in networks if network.name != 'External' %}
{{network.name_lower}}_virtual_fixed_ip_set:
not:
equals:
- get_param: {{network.name}}VirtualFixedIPs
- []
{%- endfor %}
public_virtual_fixed_ip_set:
not:
equals:
- get_param: PublicVirtualFixedIPs
- []
redis_virtual_fixed_ip_set:
not:
equals:
- get_param: RedisVirtualFixedIPs
- []
ovn_dbs_virtual_fixed_ip_set:
not:
equals:
- get_param: OVNDBsVirtualFixedIPs
- []
set_default_mysql_cell_internal:
or:
- equals:
- get_param: NovaAdditionalCell
- true
- and:
- equals:
- get_param: NovaAdditionalCell
- false
- equals:
- get_param: [EndpointMapOverride, MysqlCellInternal]
- ''
{%- for role in roles %}
{{role.name}}_network_deployment_actions_exists:
not:
equals:
- {get_param: {{role.name}}NetworkDeploymentActions}
- []
{%- endfor %}
set_default_nova_vnc_proxy_cell_public:
or:
- equals:
- get_param: NovaAdditionalCell
- true
- and:
- equals:
- get_param: NovaAdditionalCell
- false
- equals:
- get_param: [EndpointMapOverride, NovaVNCProxyCellPublic]
- ''
set_default_nova_metadata_cell_internal:
or:
- equals:
- get_param: NovaLocalMetadataPerCell
- true
- and:
- equals:
- get_param: NovaLocalMetadataPerCell
- false
- equals:
- get_param: [EndpointMapOverride, NovaMetadataCellInternal]
- ''
resources:
VipHosts:
type: OS::Heat::Value
properties:
type: string
value:
list_join:
- "\n"
- - str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, ctlplane]}
HOST: {get_param: CloudNameCtlplane}
{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %}
{%- if network.name == 'External' %}
# Special case the External hostname param, which is CloudName
- str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, {{network.name_lower}}]}
HOST: {get_param: CloudName}
{%- elif network.name == 'InternalApi' %}
# Special case the Internal API hostname param, which is CloudNameInternal
- str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, {{network.name_lower}}]}
HOST: {get_param: CloudNameInternal}
{%- elif network.name == 'StorageMgmt' %}
# Special case StorageMgmt hostname param, which is CloudNameStorageManagement
- str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, {{network.name_lower}}]}
HOST: {get_param: CloudNameStorageManagement}
{%- else %}
- str_replace:
template: IP HOST
params:
IP: {get_attr: [VipMap, net_ip_map, {{network.name_lower}}]}
HOST: {get_param: CloudName{{network.name}}}
{%- endif %}
{%- endfor %}
HeatAuthEncryptionKey:
type: OS::TripleO::RandomString
PcsdPassword:
type: OS::TripleO::RandomString
properties:
length: 16
HorizonSecret:
type: OS::TripleO::RandomString
properties:
length: 64
NetCidrMapValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_merge:
- {get_attr: [Networks, net_cidr_map]}
# NOTE(hjensas): When ctlplane network and subnets are created by the
# undercloud installer, the subnet cidrs are added as tags.
- ctlplane: {get_attr: [ControlVirtualIP, network, tags]}
- keys:
ctlplane: {get_param: NeutronControlPlaneID}
ServiceNetMap:
type: OS::TripleO::ServiceNetMap
EndpointMap:
type: OS::TripleO::EndpointMap
properties:
CloudEndpoints:
ctlplane: {get_param: CloudNameCtlplane}
{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %}
{%- if network.name == 'External' %}
# Special case the External hostname param, which is CloudName
{{network.name_lower}}: {get_param: CloudName}
{%- elif network.name == 'InternalApi' %}
# Special case the Internal API hostname param, which is CloudNameInternal
{{network.name_lower}}: {get_param: CloudNameInternal}
{%- elif network.name == 'StorageMgmt' %}
# Special case StorageMgmt hostname param, which is CloudNameStorageManagement
{{network.name_lower}}: {get_param: CloudNameStorageManagement}
{%- else %}
{{network.name_lower}}: {get_param: CloudName{{network.name}}}
{%- endif %}
{%- endfor %}
NetIpMap: {get_attr: [VipMap, net_ip_map]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
EndpointMapData:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
- {get_attr: [EndpointMap, endpoint_map]}
- {get_param: EndpointMapOverride}
# For parent stack we must set these to the local endpoints
# For split-controlplane stacks that are nova cells we must set
# these to the local endpoints
# For split-controlplane stacks that are not nova cells we should
# take these from EndpointMapOverride (i.e the parent stack)
- if:
- set_default_mysql_cell_internal
- MysqlCellInternal: {get_attr: [EndpointMap, endpoint_map, MysqlInternal]}
- {}
- if:
- set_default_nova_vnc_proxy_cell_public
- NovaVNCProxyCellPublic: {get_attr: [EndpointMap, endpoint_map, NovaVNCProxyPublic]}
- {}
- if:
- set_default_nova_metadata_cell_internal
- NovaMetadataCellInternal: {get_attr: [EndpointMap, endpoint_map, NovaMetadataInternal]}
- {}
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
NodeAdminUserData:
type: OS::TripleO::NodeAdminUserData
# Bootstraps an ntp configuration and includes a hardware clock sync to
# for containers.
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
NodeTimesyncUserData:
type: OS::TripleO::NodeTimesyncUserData
# For optional operator additional userdata
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
NodeUserData:
type: OS::TripleO::NodeUserData
# Jinja loop for Role in roles_data.yaml
{% for role in roles %}
# Resources generated for {{role.name}} Role
{{role.name}}ServiceChain:
type: OS::TripleO::{{role.name}}Services
properties:
Services:
get_param: {{role.name}}Services
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
ServiceData:
net_cidr_map: {get_attr: [NetCidrMapValue, value]}
EndpointMap: {get_attr: [EndpointMapData, value]}
DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
RoleName: {{role.name}}
RoleParameters:
map_merge:
- {{role.RoleParametersDefault|default({})}}
- get_param: {{role.name}}Parameters
# Lookup of role_data via heat outputs is slow, so workaround this by caching
# the value in an OS::Heat::Value resource
{{role.name}}ServiceChainRoleData:
type: OS::Heat::Value
properties:
type: json
value: {get_attr: [{{role.name}}ServiceChain, role_data]}
{{role.name}}NetworkDeploymentActionsValue:
type: OS::Heat::Value
properties:
value:
- if:
- {{role.name}}_network_deployment_actions_exists
- {get_param: {{role.name}}NetworkDeploymentActions}
- {get_param: NetworkDeploymentActions}
{{role.name}}ConfigData:
type: OS::Heat::Value
properties:
type: json
value:
service_configs: {get_attr: [{{role.name}}ServiceConfigSettings, value]}
role_extraconfig:
map_merge:
- tripleo::profile::base::metrics::collectd::sensubility::subscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
{%- if role.deprecated_param_extraconfig is defined %}
- {get_param: {{role.deprecated_param_extraconfig}}}
{%- endif %}
- {get_param: {{role.name}}ExtraConfig}
extraconfig: {get_param: ExtraConfig}
hieradata_files:
- '"%{::uuid}"'
- fqdn
- docker_puppet # Optionally provided by container-puppet.py
- heat_config_%{::deploy_config_name}
- config_step
- role_extraconfig
- extraconfig
- service_configs
- cloud_domain
- bootstrap_node # provided by tripleo-hieradata
- all_nodes # provided by tripleo-hieradata
- vip_data # provided by tripleo-hieradata
- net_ip_map
- '"%{::osfamily}"'
# The following are required for compatibility with the Controller role
# where some vendor integrations added hieradata via ExtraConfigPre
- neutron_bigswitch_data # Optionally provided by Controller/ComputeExtraConfigPre
# Special variable for upgrade
- upgrade
{{role.name}}ServiceConfigSettings:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
- get_param: GlobalConfigExtraMapData
- get_attr: [{{role.name}}ServiceChainRoleData, value, config_settings]
{% for r in roles %}
- get_attr: [{{r.name}}ServiceChainRoleData, value, global_config_settings]
{% endfor %}
# This next step combines two yaql passes:
# - The inner one does a deep merge on the service_config_settings for all roles
# - The outer one filters the map based on the services enabled for the role
# then merges the result into one map.
- yaql:
expression: let(root => $) -> $.data.map.items().where($[0] in coalesce($root.data.services, [])).select($[1]).reduce($1.mergeWith($2), {})
data:
map:
yaql:
expression: $.data.where($ != null).reduce($1.mergeWith($2), {})
data:
{% for r in roles %}
- get_attr: [{{r.name}}ServiceChainRoleData, value, service_config_settings]
{% endfor %}
services: {get_attr: [{{role.name}}ServiceNames, value]}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-04 03:21:44 +00:00
# Filter any null/None service_names which may be present due to mapping
# of services to OS::Heat::None
{{role.name}}ServiceNames:
type: OS::Heat::Value
depends_on: {{role.name}}ServiceChain
properties:
type: comma_delimited_list
value:
yaql:
expression: let(root => $) -> $.data.extra_services.items().where($[0] in coalesce($root.data.enabled_services, [])).select($[1]).flatten() + coalesce($root.data.enabled_services, [])
data:
enabled_services: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_names]}
extra_services:
# If anything other than keystone needs this
# then we should add an extra_networks interface
# to the service templates role_data but for
# now we hard-code the keystone special case
keystone:
- keystone_admin_api
- keystone_public_api
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-04 03:21:44 +00:00
{{role.name}}IpListMap:
type: OS::TripleO::Network::Ports::NetIpListMap
properties:
ControlPlaneIpList: {get_attr: [{{role.name}}, ip_address]}
{%- for network in networks %}
{%- if network.enabled|default(true) and network.name in role.networks|default([]) %}
{{network.name}}IpList: {get_attr: [{{role.name}}, {{network.name_lower}}_ip_address]}
{%- else %}
{{network.name}}IpList: {get_attr: [{{role.name}}, ip_address]}
{%- endif %}
{%- endfor %}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-04 03:21:44 +00:00
EnabledServices: {get_attr: [{{role.name}}ServiceNames, value]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
NetworkHostnameMap: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{{role.name}}NetworkHostnameMap:
type: OS::Heat::Value
properties:
type: json
value:
# Note (shardy) this somewhat complex yaql may be replaced
# with a map_deep_merge function in ocata. It merges the
# list of maps, but appends to colliding lists so we can
# create a map of lists for all nodes for each network
yaql:
expression: dict($.data.where($ != null).flatten().selectMany($.items()).groupBy($[0], $[1]).select([$[0], $[1].flatten()]))
data:
- {get_attr: [{{role.name}}, hostname_map]}
# Combine the NodeAdminUserData and NodeUserData mime archives
{{role.name}}UserData:
type: OS::Heat::MultipartMime
properties:
parts:
- config: {get_resource: NodeAdminUserData}
type: multipart
- config: {get_resource: NodeTimesyncUserData}
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
- config: {get_resource: {{role.name}}RoleUserData}
type: multipart
# For optional operator role-specific userdata
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
{{role.name}}RoleUserData:
type: OS::TripleO::{{role.name}}::NodeUserData
{{role.name}}:
type: OS::Heat::ResourceGroup
depends_on: Networks
update_policy:
batch_create:
max_batch_size: {get_param: NodeCreateBatchSize}
properties:
count: {get_param: {{role.name}}Count}
removal_policies: {get_param: {{role.name}}RemovalPolicies}
removal_policies_mode: {get_param: {{role.name}}RemovalPoliciesMode}
resource_def:
type: OS::TripleO::{{role.name}}
properties:
CloudDomain: {get_param: CloudDomain}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
EndpointMap: {get_attr: [EndpointMapData, value]}
Hostname:
str_replace:
template: {get_param: {{role.name}}HostnameFormat}
params:
'%stackname%': {get_param: 'OS::stack_name'}
NodeIndex: '%index%'
# Note, SchedulerHints must be defined here, not only in the
# nested template, as it can contain %index%
{{role.name}}SchedulerHints:
map_merge:
{%- if role.deprecated_param_scheduler_hints is defined %}
- {get_param: {{role.deprecated_param_scheduler_hints}}}
{%- endif %}
- {get_param: {{role.name}}SchedulerHints}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-04 03:21:44 +00:00
ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
RoleParameters:
map_merge:
- {{role.RoleParametersDefault|default({})}}
- get_param: {{role.name}}Parameters
UserData: {get_resource: {{role.name}}UserData}
{%- endfor %}
{%- for role in roles %}
{{role.name}}Servers:
type: OS::Heat::Value
depends_on: {{role.name}}
properties:
type: json
value:
yaql:
expression: let(servers=>switch(isDict($.data.servers) => $.data.servers, true => {})) -> $servers.deleteAll($servers.keys().where($servers[$] = null))
data:
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{%- endfor %}
# This is a different format to *Servers, as it creates a map of lists
# whereas *Servers creates a map of maps with keys of the nested resource names
ServerIdMap:
type: OS::Heat::Value
properties:
value:
server_ids:
{%- for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}, nova_server_resource]}
{%- endfor %}
bootstrap_server_id:
yaql:
expression: coalesce($.data, []).first(null)
data: {get_attr: [{{primary_role_name}}, nova_server_resource]}
# This resource just creates a dict out of the DeploymentServerBlacklist,
# which is a list. The dict is used in the role templates to set a condition
# on whether to create the deployment resources. We can't use the list
# directly because there is no way to ask Heat if a list contains a specific
# value.
DeploymentServerBlacklistDict:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
repeat:
template:
hostname: 1
for_each:
hostname: {get_param: DeploymentServerBlacklist}
HostsValue:
type: OS::Heat::Value
properties:
value:
list_join:
- "\n"
- - if:
- add_vips_to_etc_hosts
- {get_attr: [VipHosts, value]}
- ''
-
{%- for role in roles %}
- list_join:
- ""
- {get_attr: [{{role.name}}, hosts_entry]}
{%- endfor %}
- {get_param: ExtraHostFileEntries}
HostsEntryValue:
type: OS::Heat::Value
properties:
value:
list_join:
- ' '
- str_split:
- '\n'
- {get_attr: [HostsValue, value]}
CloudNames:
type: OS::Heat::Value
properties:
value:
{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %}
{%- if network.name == 'External' %}
# Special case the External hostname param, which is CloudName
cloud_name_{{network.name_lower}}: {get_param: CloudName}
{%- elif network.name == 'InternalApi' %}
# Special case the Internal API hostname param, which is CloudNameInternal
cloud_name_{{network.name_lower}}: {get_param: CloudNameInternal}
{%- elif network.name == 'StorageMgmt' %}
# Special case StorageMgmt hostname param, which is CloudNameStorageManagement
cloud_name_{{network.name_lower}}: {get_param: CloudNameStorageManagement}
{%- else %}
cloud_name_{{network.name_lower}}: {get_param: CloudName{{network.name}}}
{%- endif %}
{%- endfor %}
cloud_name_ctlplane: {get_param: CloudNameCtlplane}
GlobalConfig:
type: OS::Heat::Value
properties:
type: json
value:
map_merge:
{% for role in roles %}
- get_attr: [{{role.name}}ServiceChainRoleData, value, global_config_settings]
{% endfor %}
MysqlRootPassword:
type: OS::TripleO::RandomString
properties:
length: 10
RabbitCookie:
type: OS::TripleO::RandomString
properties:
length: 20
salt: {get_param: RabbitCookieSalt}
DefaultPasswords:
type: OS::TripleO::DefaultPasswords
properties:
DefaultMysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
DefaultRabbitCookie: {get_attr: [RabbitCookie, value]}
DefaultHeatAuthEncryptionKey: {get_attr: [HeatAuthEncryptionKey, value]}
DefaultPcsdPassword: {get_attr: [PcsdPassword, value]}
DefaultHorizonSecret: {get_attr: [HorizonSecret, value]}
# creates the network architecture
Networks:
type: OS::TripleO::Network
properties:
CtlplaneNetworkCidrs: {get_attr: [ControlVirtualIP, network, tags]}
{%- for role in roles %}
{{role.name}}GroupVars:
type: OS::Heat::Value
properties:
value:
ctlplane_subnet_cidr:
yaql:
expression: coalesce($.data, []).where(not isEmpty($)).first().split('/')[-1]
data:
if:
- ctlplane_subnet_cidr_set
- [{get_param: ControlPlaneSubnetCidr}]
- {get_attr: [ControlVirtualIP, network, tags]}
network_cidrs:
{%- for network in networks %}
{%- if network.enabled|default(true) and network.name in role.networks|default([]) %}
{{network.name}}_cidr:
yaql:
expression: coalesce($.data, []).where(not isEmpty($)).first().split('/')[-1]
data: {get_attr: [Networks, net_cidr_map, {{network.name_lower}}]}
{%- endif %}
{%- endfor %}
role_networks:
{%- for network in networks %}
{%- if network.enabled|default(true) and network.name in role.networks|default([]) %}
- {{network.name}}
{% endif %}
{% endfor %}
{%- for network in networks %}
{%- if network.enabled|default(true) and network.name in role.networks|default([]) %}
{{network.name_lower}}_cidr:
yaql:
expression: coalesce($.data, []).where(not isEmpty($)).first().split('/')[-1]
data: {get_attr: [Networks, net_cidr_map, {{network.name_lower}}]}
{% endif %}
{% endfor %}
{% endfor %}
ControlVirtualIP:
depends_on: ServiceNetMap
type: OS::TripleO::Network::Ports::ControlPlaneVipPort
properties:
name: control_virtual_ip
network: {get_param: NeutronControlPlaneID}
fixed_ips:
if:
- control_fixed_ip_not_set
L3 routed networks - subnet fixed_ips (3/3) When using neutron routed networks we need to specify either the subnet or a ip address in the fixed-ips-request when creating neutron ports. a) For the Vip's: Adds VipSubnetMap and VipSubnetMapDefaults parameters in service_net_map.yaml. The two maps are merged, so that the operator can override the subnet where VIP port should be hosted. For example: parameter_defaults: VipSubnetMap: ctlplane: ctlplane-leaf1 InternalApi: internal_api_leaf1 Storage: storage_leaf1 redis: internal_api_leaf1 b) For overcloud node ports: Enrich 'networks' in roles defenition to include both network and subnet data. Changes the list to a map instead of a list of strings. New schema: - name: <role_name> networks: <network_name> subnet: <subnet_name> For backward compatibility a conditional is used to check if the data is a map or not. In either case the internal list of role networks is created as '_role_networks' in the jinja2 templates. When the data is a map, and the map contains the 'subnet' key the subnet specified in roles_data.yaml is used as the subnet in the fixed-ips-reqest when ports are created. If subnet is not set (or role.networks is not a map) the default will be {{network.name_lower}}_subnet. Also, since the fixed_ips request passed to Vip ports are no longer [] by default, the conditinal has been updated to test for 'ip_address' entries in the request. Partial: blueprint tripleo-routed-networks-templates Depends-On: I773a38fd903fe287132151a4d178326a46890969 Change-Id: I77edc82723d00bfece6752b5dd2c79137db93443
2018-07-12 16:27:08 +00:00
- [{subnet: {get_attr: [ServiceNetMap, vip_subnet_map, ctlplane]}}]
- get_param: ControlFixedIPs
replacement_policy: AUTO
RedisVirtualIP:
L3 routed networks - subnet fixed_ips (3/3) When using neutron routed networks we need to specify either the subnet or a ip address in the fixed-ips-request when creating neutron ports. a) For the Vip's: Adds VipSubnetMap and VipSubnetMapDefaults parameters in service_net_map.yaml. The two maps are merged, so that the operator can override the subnet where VIP port should be hosted. For example: parameter_defaults: VipSubnetMap: ctlplane: ctlplane-leaf1 InternalApi: internal_api_leaf1 Storage: storage_leaf1 redis: internal_api_leaf1 b) For overcloud node ports: Enrich 'networks' in roles defenition to include both network and subnet data. Changes the list to a map instead of a list of strings. New schema: - name: <role_name> networks: <network_name> subnet: <subnet_name> For backward compatibility a conditional is used to check if the data is a map or not. In either case the internal list of role networks is created as '_role_networks' in the jinja2 templates. When the data is a map, and the map contains the 'subnet' key the subnet specified in roles_data.yaml is used as the subnet in the fixed-ips-reqest when ports are created. If subnet is not set (or role.networks is not a map) the default will be {{network.name_lower}}_subnet. Also, since the fixed_ips request passed to Vip ports are no longer [] by default, the conditinal has been updated to test for 'ip_address' entries in the request. Partial: blueprint tripleo-routed-networks-templates Depends-On: I773a38fd903fe287132151a4d178326a46890969 Change-Id: I77edc82723d00bfece6752b5dd2c79137db93443
2018-07-12 16:27:08 +00:00
depends_on: [Networks, ServiceNetMap]
type: OS::TripleO::Network::Ports::RedisVipPort
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ControlPlaneSubnetCidr:
if:
- ctlplane_subnet_cidr_set
- {get_param: ControlPlaneSubnetCidr}
- {str_split: ['/', {get_attr: [ControlVirtualIP, subnets, 0, cidr]}, 1]}
ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
PortName: redis_virtual_ip
NetworkName: {get_attr: [ServiceNetMap, service_net_map, RedisNetwork]}
ServiceName: redis
L3 routed networks - subnet fixed_ips (3/3) When using neutron routed networks we need to specify either the subnet or a ip address in the fixed-ips-request when creating neutron ports. a) For the Vip's: Adds VipSubnetMap and VipSubnetMapDefaults parameters in service_net_map.yaml. The two maps are merged, so that the operator can override the subnet where VIP port should be hosted. For example: parameter_defaults: VipSubnetMap: ctlplane: ctlplane-leaf1 InternalApi: internal_api_leaf1 Storage: storage_leaf1 redis: internal_api_leaf1 b) For overcloud node ports: Enrich 'networks' in roles defenition to include both network and subnet data. Changes the list to a map instead of a list of strings. New schema: - name: <role_name> networks: <network_name> subnet: <subnet_name> For backward compatibility a conditional is used to check if the data is a map or not. In either case the internal list of role networks is created as '_role_networks' in the jinja2 templates. When the data is a map, and the map contains the 'subnet' key the subnet specified in roles_data.yaml is used as the subnet in the fixed-ips-reqest when ports are created. If subnet is not set (or role.networks is not a map) the default will be {{network.name_lower}}_subnet. Also, since the fixed_ips request passed to Vip ports are no longer [] by default, the conditinal has been updated to test for 'ip_address' entries in the request. Partial: blueprint tripleo-routed-networks-templates Depends-On: I773a38fd903fe287132151a4d178326a46890969 Change-Id: I77edc82723d00bfece6752b5dd2c79137db93443
2018-07-12 16:27:08 +00:00
FixedIPs:
if:
- redis_virtual_fixed_ip_set
- {get_param: RedisVirtualFixedIPs}
- [{subnet: {get_attr: [ServiceNetMap, vip_subnet_map, redis]}}]
OVNDBsVirtualIP:
depends_on: [Networks, ServiceNetMap]
type: OS::TripleO::Network::Ports::OVNDBsVipPort
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ControlPlaneSubnetCidr:
if:
- ctlplane_subnet_cidr_set
- {get_param: ControlPlaneSubnetCidr}
- {str_split: ['/', {get_attr: [ControlVirtualIP, subnets, 0, cidr]}, 1]}
ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
PortName: ovn_dbs_virtual_ip
NetworkName: {get_attr: [ServiceNetMap, service_net_map, OvnDbsNetwork]}
ServiceName: ovn_dbs
FixedIPs:
if:
- ovn_dbs_virtual_fixed_ip_set
- {get_param: OVNDBsVirtualFixedIPs}
- [{subnet: {get_attr: [ServiceNetMap, vip_subnet_map, ovn_dbs]}}]
{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %}
{%- if network.name == 'External' %}
# The public VIP is on the External net, falls back to ctlplane
PublicVirtualIP:
L3 routed networks - subnet fixed_ips (3/3) When using neutron routed networks we need to specify either the subnet or a ip address in the fixed-ips-request when creating neutron ports. a) For the Vip's: Adds VipSubnetMap and VipSubnetMapDefaults parameters in service_net_map.yaml. The two maps are merged, so that the operator can override the subnet where VIP port should be hosted. For example: parameter_defaults: VipSubnetMap: ctlplane: ctlplane-leaf1 InternalApi: internal_api_leaf1 Storage: storage_leaf1 redis: internal_api_leaf1 b) For overcloud node ports: Enrich 'networks' in roles defenition to include both network and subnet data. Changes the list to a map instead of a list of strings. New schema: - name: <role_name> networks: <network_name> subnet: <subnet_name> For backward compatibility a conditional is used to check if the data is a map or not. In either case the internal list of role networks is created as '_role_networks' in the jinja2 templates. When the data is a map, and the map contains the 'subnet' key the subnet specified in roles_data.yaml is used as the subnet in the fixed-ips-reqest when ports are created. If subnet is not set (or role.networks is not a map) the default will be {{network.name_lower}}_subnet. Also, since the fixed_ips request passed to Vip ports are no longer [] by default, the conditinal has been updated to test for 'ip_address' entries in the request. Partial: blueprint tripleo-routed-networks-templates Depends-On: I773a38fd903fe287132151a4d178326a46890969 Change-Id: I77edc82723d00bfece6752b5dd2c79137db93443
2018-07-12 16:27:08 +00:00
depends_on: [Networks, ServiceNetMap]
type: OS::TripleO::Network::Ports::ExternalVipPort
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ControlPlaneSubnetCidr:
if:
- ctlplane_subnet_cidr_set
- {get_param: ControlPlaneSubnetCidr}
- {str_split: ['/', {get_attr: [ControlVirtualIP, subnets, 0, cidr]}, 1]}
ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
PortName: public_virtual_ip
L3 routed networks - subnet fixed_ips (3/3) When using neutron routed networks we need to specify either the subnet or a ip address in the fixed-ips-request when creating neutron ports. a) For the Vip's: Adds VipSubnetMap and VipSubnetMapDefaults parameters in service_net_map.yaml. The two maps are merged, so that the operator can override the subnet where VIP port should be hosted. For example: parameter_defaults: VipSubnetMap: ctlplane: ctlplane-leaf1 InternalApi: internal_api_leaf1 Storage: storage_leaf1 redis: internal_api_leaf1 b) For overcloud node ports: Enrich 'networks' in roles defenition to include both network and subnet data. Changes the list to a map instead of a list of strings. New schema: - name: <role_name> networks: <network_name> subnet: <subnet_name> For backward compatibility a conditional is used to check if the data is a map or not. In either case the internal list of role networks is created as '_role_networks' in the jinja2 templates. When the data is a map, and the map contains the 'subnet' key the subnet specified in roles_data.yaml is used as the subnet in the fixed-ips-reqest when ports are created. If subnet is not set (or role.networks is not a map) the default will be {{network.name_lower}}_subnet. Also, since the fixed_ips request passed to Vip ports are no longer [] by default, the conditinal has been updated to test for 'ip_address' entries in the request. Partial: blueprint tripleo-routed-networks-templates Depends-On: I773a38fd903fe287132151a4d178326a46890969 Change-Id: I77edc82723d00bfece6752b5dd2c79137db93443
2018-07-12 16:27:08 +00:00
FixedIPs:
if:
- public_virtual_fixed_ip_set
- {get_param: PublicVirtualFixedIPs}
- [{subnet: {get_attr: [ServiceNetMap, vip_subnet_map, {{network.name}}]}}]
{%- else %}
{{network.name}}VirtualIP:
L3 routed networks - subnet fixed_ips (3/3) When using neutron routed networks we need to specify either the subnet or a ip address in the fixed-ips-request when creating neutron ports. a) For the Vip's: Adds VipSubnetMap and VipSubnetMapDefaults parameters in service_net_map.yaml. The two maps are merged, so that the operator can override the subnet where VIP port should be hosted. For example: parameter_defaults: VipSubnetMap: ctlplane: ctlplane-leaf1 InternalApi: internal_api_leaf1 Storage: storage_leaf1 redis: internal_api_leaf1 b) For overcloud node ports: Enrich 'networks' in roles defenition to include both network and subnet data. Changes the list to a map instead of a list of strings. New schema: - name: <role_name> networks: <network_name> subnet: <subnet_name> For backward compatibility a conditional is used to check if the data is a map or not. In either case the internal list of role networks is created as '_role_networks' in the jinja2 templates. When the data is a map, and the map contains the 'subnet' key the subnet specified in roles_data.yaml is used as the subnet in the fixed-ips-reqest when ports are created. If subnet is not set (or role.networks is not a map) the default will be {{network.name_lower}}_subnet. Also, since the fixed_ips request passed to Vip ports are no longer [] by default, the conditinal has been updated to test for 'ip_address' entries in the request. Partial: blueprint tripleo-routed-networks-templates Depends-On: I773a38fd903fe287132151a4d178326a46890969 Change-Id: I77edc82723d00bfece6752b5dd2c79137db93443
2018-07-12 16:27:08 +00:00
depends_on: [Networks, ServiceNetMap]
type: OS::TripleO::Network::Ports::{{network.name}}VipPort
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ControlPlaneSubnetCidr:
if:
- ctlplane_subnet_cidr_set
- {get_param: ControlPlaneSubnetCidr}
- {str_split: ['/', {get_attr: [ControlVirtualIP, subnets, 0, cidr]}, 1]}
PortName: {{network.name_lower}}_virtual_ip
L3 routed networks - subnet fixed_ips (3/3) When using neutron routed networks we need to specify either the subnet or a ip address in the fixed-ips-request when creating neutron ports. a) For the Vip's: Adds VipSubnetMap and VipSubnetMapDefaults parameters in service_net_map.yaml. The two maps are merged, so that the operator can override the subnet where VIP port should be hosted. For example: parameter_defaults: VipSubnetMap: ctlplane: ctlplane-leaf1 InternalApi: internal_api_leaf1 Storage: storage_leaf1 redis: internal_api_leaf1 b) For overcloud node ports: Enrich 'networks' in roles defenition to include both network and subnet data. Changes the list to a map instead of a list of strings. New schema: - name: <role_name> networks: <network_name> subnet: <subnet_name> For backward compatibility a conditional is used to check if the data is a map or not. In either case the internal list of role networks is created as '_role_networks' in the jinja2 templates. When the data is a map, and the map contains the 'subnet' key the subnet specified in roles_data.yaml is used as the subnet in the fixed-ips-reqest when ports are created. If subnet is not set (or role.networks is not a map) the default will be {{network.name_lower}}_subnet. Also, since the fixed_ips request passed to Vip ports are no longer [] by default, the conditinal has been updated to test for 'ip_address' entries in the request. Partial: blueprint tripleo-routed-networks-templates Depends-On: I773a38fd903fe287132151a4d178326a46890969 Change-Id: I77edc82723d00bfece6752b5dd2c79137db93443
2018-07-12 16:27:08 +00:00
FixedIPs:
if:
- {{network.name_lower}}_virtual_fixed_ip_set
- {get_param: {{network.name}}VirtualFixedIPs}
- [{subnet: {get_attr: [ServiceNetMap, vip_subnet_map, {{network.name}}]}}]
{% endif %}
{%- endfor %}
VipMap:
type: OS::TripleO::Network::Ports::NetVipMap
properties:
ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ControlPlaneSubnetCidr:
if:
- ctlplane_subnet_cidr_set
- {get_param: ControlPlaneSubnetCidr}
- {str_split: ['/', {get_attr: [ControlVirtualIP, subnets, 0, cidr]}, 1]}
{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %}
{%- if network.name == 'External' %}
ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
Add IPv6 Support to Isolated Networks This change adds a new set of network templates with IPv6 subnets that can be used instead of the existing IPv4 networks. Each network can use either the IPv4 or IPv6 template, and the Neutron subnet will be created with the specified IP version. The default addresses used for the IPv6 networks use the fd00::/8 prefix for the internal isolated networks (this range is reserved for private use similar to 10.0.0.0/8), and 2001:db8:fd00:1000::/64 is used as an example default for the External network (2001:db8::/32 are the documentation addresses [RFC3849]), but this would ordinarily be a globally addressable subnet. These parameters may be overridden in an environment file. This change will require updates to the OpenStack Puppet Modules to support IPv6 addresses in some of the hieradata values. Many of the OPM modules already have IPv6 support to support IPv6 deployments in Packstack, but some OPM packages that apply only to Instack/TripleO deployments need to be updated. IPv6 addresses used in URLs need to be surrounded by brackets in order to differentiate IP address from port number. This change adds a new output to the network/ports resources for ip_address_uri, which is an IP address with brackets in the case of IPv6, and a raw IP address without brackets for IPv4 ports. This change also updates some URLs which are constructed in Heat. This has been tested and problems were found with Puppet not accepting IPv6 addresses. This is addressed in the latest Puppet. Additional changes were required to make this work with Ceph. IPv6 tunnel endpoints with Open vSwitch are not yet supported (although support is coming soon), so this review leaves the Tenant network as an isolated IPv4 network for the time being. Change-Id: Ie7a742bdf1db533edda2998a53d28528f80ef8e2
2015-10-15 15:10:44 +00:00
ExternalIpUri: {get_attr: [PublicVirtualIP, ip_address_uri]}
{%- else %}
{{network.name}}Ip: {get_attr: [{{network.name}}VirtualIP, ip_address]}
{{network.name}}IpUri: {get_attr: [{{network.name}}VirtualIP, ip_address_uri]}
{%- endif %}
{%- endfor %}
# No tenant or management VIP required
# Because of nested get_attr functions in the KeystoneAdminVip output, we
# can't determine which attributes of VipMap are used until after
# ServiceNetMap's attribute values are available.
depends_on: ServiceNetMap
# Optional ExtraConfig for all nodes - all roles are passed in here, but
# the nested template may configure each role differently (or not at all)
AllNodesExtraConfig:
type: OS::TripleO::AllNodesExtraConfig
properties:
servers:
{%- for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{%- endfor %}
BlacklistedIpAddresses:
type: OS::Heat::Value
properties:
value:
list_concat:
{%- for role in roles %}
- {get_attr: [{{role.name}}, blacklist_ip_address]}
{%- endfor %}
AnsibleHostVars:
type: OS::Heat::Value
properties:
type: json
value:
{%- for role in roles %}
{{role.name}}:
map_merge:
list_concat:
- {get_attr: [{{role.name}}, ansible_host_vars_map]}
{%- endfor %}
BlacklistedHostnames:
type: OS::Heat::Value
properties:
value:
list_concat:
{%- for role in roles %}
- {get_attr: [{{role.name}}, blacklist_hostname]}
{%- endfor %}
# Post deployment steps for all roles
AllNodesDeploySteps:
type: OS::TripleO::PostDeploySteps
depends_on:
- AllNodesExtraConfig
properties:
servers:
{%- for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{%- endfor %}
EndpointMap: {get_attr: [EndpointMapData, value]}
role_data:
{%- for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{%- endfor %}
{%- for role in roles %}
{{role.name}}Count: {get_param: {{role.name}}Count}
{%- endfor %}
ServiceNetMapLower: {get_attr: [ServiceNetMap, service_net_map_lower]}
PingTestIpsMap:
{%- for role in roles %}
{{role.name}}:
list_join:
- ' '
- - yaql:
expression: coalesce($.data, []).first(null)
data: {get_attr: [{{role.name}}, ip_address]}
{%- for network in networks %}
{%- if network.enabled|default(true) and network.name in role.networks|default([]) %}
- yaql:
expression: coalesce($.data, []).first(null)
data: {get_attr: [{{role.name}}, {{network.name_lower}}_ip_address]}
{%- endif %}
{%- endfor %}
{%- endfor %}
HostsEntry: {get_attr: [HostsEntryValue, value]}
EnabledServices:
list_concat:
{%- for role in roles %}
- {get_attr: [{{role.name}}ServiceNames, value]}
{%- endfor %}
ControlVirtualIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
EnabledNetworks:
{%- for network in networks if network.enabled|default(true) %}
- {{ network.name }}
{%- endfor %}
NetVipMap:
map_merge:
- {get_attr: [VipMap, net_ip_map]}
- redis: {get_attr: [RedisVirtualIP, ip_address]}
- ovn_dbs: {get_attr: [OVNDBsVirtualIP, ip_address]}
CloudNames: {get_attr: [CloudNames, value]}
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
value: true
KeystoneURL:
description: URL for the Overcloud Keystone service
value: {get_attr: [EndpointMapData, value, KeystonePublic, uri_no_suffix]}
KeystoneAdminVip:
description: Keystone Admin VIP endpoint
# Note that these nested get_attr functions require a dependency
# relationship between VipMap and ServiceNetMap, since we can't determine
# which attributes of VipMap are used until after ServiceNetMap's attribute
# values are available. If this is ever reworked to not use nested
# get_attr, that dependency can be removed.
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
EndpointMap:
description: |
Mapping of the resources with the needed info for their endpoints.
This includes the protocol used, the IP, port and also a full
representation of the URI.
value: {get_attr: [EndpointMapData, value]}
HostsEntry:
description: |
The content that should be appended to your /etc/hosts if you want to get
hostname-based access to the deployed nodes (useful for testing without
setting up a DNS).
value:
list_join:
- "\n"
- - {get_attr: [HostsEntryValue, value]}
- - {get_attr: [VipHosts, value]}
EnabledServices:
description: The services enabled on each role
value:
{%- for role in roles %}
docker: new hybrid deployment architecture and configuration This patch implements a new docker deployment architecture that should us to install docker services in a stepwise manner alongside of baremetal puppet services. This works by using Yaql to select docker specific services (docker/services/*.yaml) vs the puppet specific ones and then applying the selected Json to relevant Heat software deployments for docker and baremetal puppet in a stepwise fashion. Additionally the new architecture leverages new composable services interfaces from Newton to allow configuration of per-service container configuration sets (directories that are bind mounted into kolla containers) by using the Kolla containers themselves. It does this by spinning up a throw away "configuration only" version of the container being configured itself, then running the puppet apply in that container and copying the generated config files into /var/lib/config-data. This avoids having to install all of the OpenStack dependency packages in the heat-agent-container itself (our previous approach) and should allow us to configure a much wider variety of container config files that would otherwise be impossible with the previous shared approach. The new approach (combined) should allow us to configure containers in both the undercloud and overcloud and incrementally add CI coverage to services as we containerize them. Co-Authored-By: Martin André <m.andre@redhat.com> Co-Authored-By: Ian Main <imain@redhat.com> Co-Authored-By: Flavio Percoco <flavio@redhat.com> Change-Id: Ibcff99f03e6751fbf3197adefd5d344178b71fc2
2017-01-04 03:21:44 +00:00
{{role.name}}: {get_attr: [{{role.name}}ServiceNames, value]}
{%- endfor %}
RoleData:
description: The configuration data associated with each role
value:
{%- for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{%- endfor %}
RoleConfig:
description: The configuration workflows associated with each role
value: {get_attr: [AllNodesDeploySteps, RoleConfig]}
RoleNetIpMap:
description: Mapping of each network to a list of IPs for each role
value:
{%- for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
{%- endfor %}
RoleGroupVars:
description: Mapping of roles to ansible group_vars to be applied config in those roles
value:
{%- for role in roles %}
{{role.name}}:
map_merge:
- {get_attr: [{{role.name}}GroupVars, value]}
- {get_attr: [{{role.name}}ConfigData, value]}
- any_errors_fatal: {get_param: {{role.name}}AnyErrorsFatal}
max_fail_percentage: {get_param: {{role.name}}MaxFailPercentage}
neutron_physical_bridge_name: {get_param: NeutronPhysicalBridge}
neutron_public_interface_name: {get_param: NeutronPublicInterface}
network_deployment_actions: {get_attr: [{{role.name}}NetworkDeploymentActionsValue, value]}
{%- endfor %}
RoleNetHostnameMap:
description: Mapping of each network to a list of hostnames for each role
value:
{%- for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{%- endfor %}
RoleTags:
description: Tags for each role, as defined in roles_data.yaml
value:
{%- for role in roles %}
{{role.name}}: {{role.tags|default([])}}
{%- endfor %}
VipMap:
description: Mapping of each network to VIP addresses. Also includes the Redis and OVN DBs VIPs.
value:
map_merge:
- {get_attr: [VipMap, net_ip_map]}
- redis: {get_attr: [RedisVirtualIP, ip_address]}
- ovn_dbs: {get_attr: [OVNDBsVirtualIP, ip_address]}
ServerIdData:
description: Mapping of each role to a list of nova server IDs and the bootstrap ID
value: {get_attr: [ServerIdMap, value]}
BlacklistedHostnames:
description: List of blacklisted hostnames
value: {get_attr: [BlacklistedHostnames, value]}
BlacklistedIpAddresses:
description: List of blacklisted ctlplane IP addresses
value: {get_attr: [BlacklistedIpAddresses, value]}
GlobalConfig:
description: The global_config (hieradata).
value: {get_attr: [GlobalConfig, value]}
HostnameNetworkConfigMap:
description: Mapping of hostname to NetworkConfig resource
value:
map_merge:
list_concat:
{%- for role in roles %}
- {get_attr: [{{role.name}}, hostname_network_config_map]}
{%- endfor %}
AnsibleHostVarsMap:
description: Map of Ansible Host variables per role
value: {get_attr: [AnsibleHostVars, value]}