1186 lines
50 KiB
Plaintext
Raw Normal View History

# certain initialization steps (run in a container) will occur
# on the role marked as primary controller or the first role listed
{%- if enabled_roles is not defined or enabled_roles == [] -%}
# On upgrade certain roles can be disabled for operator driven upgrades
# See major_upgrade_steps.j2.yaml and post-upgrade.j2.yaml
{%- set enabled_roles = roles -%}
{%- endif -%}
{%- set primary_role = [enabled_roles[0]] -%}
{%- for role in enabled_roles -%}
{%- if 'primary' in role.tags and 'controller' in role.tags -%}
{%- set _ = primary_role.pop() -%}
{%- set _ = primary_role.append(role) -%}
{%- endif -%}
{%- endfor -%}
{%- set primary_role_name = primary_role[0].name -%}
# primary role is: {{primary_role_name}}
{% set deploy_steps_max = 6 -%}
{% set update_steps_max = 6 -%}
{% set external_update_steps_max = 2 -%}
{% set pre_upgrade_rolling_steps_max = 1 -%}
{% set upgrade_steps_max = 6 -%}
{% set external_upgrade_steps_max = 3 -%}
{% set post_upgrade_steps_max = 4 -%}
{% set post_update_steps_max = 4 -%}
{% set scale_steps_max = 1 -%}
heat_template_version: rocky
description: >
Post-deploy configuration steps via puppet for all roles,
as defined in ../roles_data.yaml
parameters:
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
DeployIdentifier:
default: ''
type: string
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
deployment_source_hosts:
default: 'Undercloud'
type: string
description: Host or hostgroup that runs the deployment
deployment_target_hosts:
default: ''
type: string
description: >
Host or hostgroup that consists of the target systems for the deployment.
Defaults to all hosts in the current Heat stack if not set.
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
ConfigDebug:
default: false
description: Whether to run config management (e.g. Puppet) in debug mode.
type: boolean
HideSensitiveLogs:
default: true
type: boolean
description: >
Set it to false if you don't want to activate the no_log feature within
ansible modules.
EnablePuppet:
default: true
description: Whether to run the puppet (baremetal) deployment tasks.
type: boolean
EnablePaunch:
default: false
description: >
(DEPRECATED) Whether to run paunch during container deployment tasks.
type: boolean
DockerPuppetDebug:
type: boolean
default: false
description: Set to True to enable debug logging with Puppet Containers
DockerPuppetProcessCount:
type: number
default: 6
description: Number of concurrent processes to use when running container-puppet to generate config files.
ContainerCli:
type: string
default: 'podman'
description: CLI tool used to manage containers.
constraints:
- allowed_values: ['docker', 'podman']
DockerPuppetMountHostPuppet:
type: boolean
default: true
description: Whether containerized puppet executions use modules from the baremetal host. Defaults to true. Can be set to false to consume puppet modules from containers directly.
ContainerLogStdoutPath:
type: string
description: Absolute path for container stdout output (Podman only)
default: /var/log/containers/stdouts
ContainerHealthcheckDisabled:
type: boolean
description: Whether or not we disable the container healthcheck.
default: false
SELinuxMode:
default: 'enforcing'
description: Configures SELinux mode
type: string
constraints:
- allowed_values: [ 'enforcing', 'permissive', 'disabled' ]
{% for role in enabled_roles %}
{{role.name}}Count:
description: Number of {{role.name}} nodes to deploy
type: number
default: {{role.CountDefault|default(0)}}
{% endfor %}
ServiceNetMapLower:
description: Mapping of service name to network name
type: json
default: {}
ValidateControllersIcmp:
default: true
description: Validation to ensure that all controllers can be reached with ICMP
type: boolean
ValidateGatewaysIcmp:
default: true
description: Validation to ensure that all gateways can be reached with ICMP
type: boolean
ValidateFqdn:
default: false
description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.
type: boolean
PingTestIpsMap:
default: ''
description: A map of role name to a space separated list of IP addresses used to ping test each available network interface.
type: json
StackAction:
type: string
description: >
Heat action on performed top-level stack. Note StackUpdateType is
set to UPGRADE when a major-version upgrade is in progress.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
NetworkSafeDefaults:
default: true
description: Allow to enable/disable safe networking defaults if os-net-config would fail to run with the provided config.
type: boolean
DeployArtifactURLs:
default: []
description: A list of HTTP URLs containing deployment artifacts to be pulled.
Currently supports tarballs and RPM packages.
type: comma_delimited_list
DeployArtifactFILEs:
default: []
description: A list of files containing deployment artifacts to be pushed.
Currently supports tarballs and RPM packages.
type: comma_delimited_list
HostsEntry:
default: []
type: comma_delimited_list
description: A list of entries to be added to /etc/hosts on each node.
AnsibleHostVarsMap:
type: json
default: {}
StackUpdateType:
type: string
description: >
Type of update, to differentiate between UPGRADE and UPDATE cases
when StackAction is UPDATE (both are the same stack action).
constraints:
- allowed_values: ['', 'UPGRADE']
default: ''
ContainerCli:
type: string
default: 'podman'
description: CLI tool used to manage containers.
constraints:
- allowed_values: ['docker', 'podman']
EnabledServices:
default: []
type: comma_delimited_list
ControlVirtualIP:
type: string
EnabledNetworks:
type: comma_delimited_list
NetVipMap:
type: json
{%- for network in networks if network.enabled|default(true) %}
{{network.name}}NetName:
default: {{network.name_lower}}
description: The name of the {{network.name_lower}} network.
type: string
{%- endfor %}
CloudNames:
type: json
EnableInternalTLS:
type: boolean
default: false
CloudDomain:
default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This must match the
overcloud_domain_name configured on the undercloud.
NovaAdditionalCell:
default: false
description: Whether this is an cell additional to the default cell.
type: boolean
AllNodesExtraMapData:
default: {}
type: json
description: Map of extra data (hieradata) to set on each node.
UndercloudHostsEntries:
default: []
description: >
List of undercloud hosts entries to be appended to /etc/hosts. The
value is populated with the HEAT_HOSTS entries on the undercloud by
tripleoclient when running deploy.
type: comma_delimited_list
ExtraHostsEntries:
default: []
description: List of extra hosts entries to be appended to /etc/hosts
type: comma_delimited_list
VipHostsEntries:
default: []
description: List of VIP (virtual IP) hosts entries to be appended to /etc/hosts
type: comma_delimited_list
KeystoneResourcesConfigs:
description: The keystone resources config.
type: json
default: {}
RootStackName:
description: The name of the stack/plan.
type: string
NetCidrMap:
description: Mapping of CIDRs to network name
type: json
default: {}
parameter_groups:
- label: deprecated
description: |
The following parameters are deprecated and will be removed. They should not
be relied on for new deployments. If you have concerns regarding deprecated
parameters, please contact the TripleO development team on IRC or the
OpenStack mailing list.
parameters:
- EnablePaunch
conditions:
{% for role in enabled_roles %}
{{role.name}}NonZero:
not:
equals:
- {get_param: {{role.name}}Count}
- 0
{% endfor %}
resources:
ExternalDeployTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# processing from per-role unique tasks into globally unique tasks
expression: coalesce($.data, []).flatten().distinct()
data:
{%- for role in enabled_roles %}
- get_param: [role_data, {{role.name}}, external_deploy_tasks]
{%- endfor %}
ExternalPostDeployTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# processing from per-role unique tasks into globally unique tasks
expression: coalesce($.data, []).flatten().distinct()
data:
{%- for role in enabled_roles %}
- get_param: [role_data, {{role.name}}, external_post_deploy_tasks]
{%- endfor %}
ScaleTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# processing from per-role unique tasks into globally unique tasks
expression: coalesce($.data, []).flatten().distinct()
data:
{%- for role in enabled_roles %}
- get_param: [role_data, {{role.name}}, scale_tasks]
{%- endfor %}
ExternalUpdateTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# processing from per-role unique tasks into globally unique tasks
expression: coalesce($.data, []).flatten().distinct()
data:
{%- for role in enabled_roles %}
- get_param: [role_data, {{role.name}}, external_update_tasks]
{%- endfor %}
ExternalUpgradeTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# processing from per-role unique tasks into globally unique tasks
expression: coalesce($.data, []).flatten().distinct()
data:
{%- for role in enabled_roles %}
- get_param: [role_data, {{role.name}}, external_upgrade_tasks]
{%- endfor %}
BootstrapServerId:
type: OS::Heat::Value
properties:
value:
yaql:
# Use a constant string of "bootstrap_server_id" when there are no
# servers in the primary role, such as in the case when all
# Controllers are blacklisted. No server id's will match the string
# which is what we want when all are blacklisted.
expression: switch($.data = {} => "no_bootstrap_server", $.data != {} => $.data.items().orderBy($[0]).first()[1])
data: {get_param: [servers, {{primary_role_name}}]}
# BEGIN CONFIG STEPS, only on enabled_roles
{%- for role in enabled_roles %}
# Note, this should be the last step to execute configuration changes.
# Ensure that all {{role.name}}ExtraConfigPost steps are executed
# after all the previous deployment steps.
{{role.name}}ExtraConfigPost:
condition: {{role.name}}NonZero
type: OS::TripleO::NodeExtraConfigPost
properties:
servers: {get_param: [servers, {{role.name}}]}
EndpointMap: {get_param: EndpointMap}
# The {{role.name}}PostConfig steps are in charge of
# quiescing all services, i.e. in the Controller case,
# we should run a full service reload.
{{role.name}}PostConfig:
condition: {{role.name}}NonZero
type: OS::TripleO::Tasks::{{role.name}}PostConfig
depends_on:
{%- for dep in enabled_roles %}
- {{dep.name}}ExtraConfigPost
{%- endfor %}
properties:
servers: {get_param: servers}
input_values:
deploy_identifier: {get_param: DeployIdentifier}
{% endfor %}
outputs:
RoleConfig:
description: Mapping of config data for all roles
value:
global_vars:
deploy_steps_max: {{deploy_steps_max}}
service_net_map: {get_param: ServiceNetMapLower}
validate_controllers_icmp: {get_param: ValidateControllersIcmp}
validate_gateways_icmp: {get_param: ValidateGatewaysIcmp}
validate_fqdn: {get_param: ValidateFqdn}
ping_test_ips: {get_param: PingTestIpsMap}
stack_action: {get_param: StackAction}
network_safe_defaults: {get_param: NetworkSafeDefaults}
deploy_artifact_urls: {get_param: DeployArtifactURLs}
deploy_artifact_files: {get_param: DeployArtifactFILEs}
hosts_entry: {get_param: HostsEntry}
primary_role_name: {{ primary_role_name }}
deploy_identifier: {get_param: DeployIdentifier}
stack_update_type: {get_param: StackUpdateType}
container_cli: {get_param: ContainerCli}
enabled_services: {get_param: EnabledServices}
control_virtual_ip: {get_param: ControlVirtualIP}
enabled_networks: {get_param: EnabledNetworks}
net_vip_map: {get_param: NetVipMap}
nova_additional_cell: {get_param: NovaAdditionalCell}
hide_sensitive_logs: {get_param: HideSensitiveLogs}
{%- for network in networks if network.enabled|default(true) %}
{{network.name_lower}}_net_name: {get_param: {{network.name}}NetName}
{%- endfor %}
networks:
{%- for network in networks if network.enabled|default(true) %}
{{network.name}}:
name: {get_param: {{network.name}}NetName}
name_lower: {{ network.name_lower }}
{%- endfor %}
network_virtual_ips:
ctlplane:
ip_address: {get_param: [NetVipMap, ctlplane]}
index: 1
{%- for network in networks if network.vip|default(false) and network.enabled|default(true) %}
# External virtual ip is currently being handled separately as public_virtual_ip.
# Likewise, optional StorageNFS virtual ip is handled separately as ganesha_vip.
{%- if network.name != 'External' and network.name != 'StorageNFS' %}
{{network.name_lower}}:
ip_address: {get_param: [NetVipMap, {get_param: {{network.name}}NetName}]}
index: {{loop.index + 1}}
{%- endif %}
{%- endfor %}
cloud_names: {get_param: CloudNames}
enable_internal_tls: {get_param: EnableInternalTLS}
cloud_domain: {get_param: CloudDomain}
all_nodes_extra_map_data: {get_param: AllNodesExtraMapData}
undercloud_hosts_entries: {get_param: UndercloudHostsEntries}
extra_hosts_entries: {get_param: ExtraHostsEntries}
vip_hosts_entries: {get_param: VipHostsEntries}
keystone_resources: {get_param: KeystoneResourcesConfigs}
net_cidr_map: {get_param: NetCidrMap}
common_deploy_steps_playbooks: {get_file: deploy-steps-playbooks-common.yaml}
common_deploy_steps_tasks: {get_file: deploy-steps-tasks.yaml}
common_container_config_scripts: {get_file: common-container-config-scripts.yaml}
hiera_steps_tasks: {get_file: hiera-steps-tasks.yaml}
deploy_steps_tasks_step_0: {get_file: deploy-steps-tasks-step-0.yaml}
common_deploy_steps_tasks_step_1: {get_file: deploy-steps-tasks-step-1.yaml}
container_puppet_script: {get_file: ./container-puppet.sh}
generate-config-tasks: {get_file: generate-config-tasks.yaml}
host-container-puppet-tasks: {get_file: host-container-puppet-tasks.yaml}
deploy_steps_playbook:
{% block deploy_steps_str_replace_params %}
str_replace:
params:
BOOTSTRAP_SERVER_ID: {get_attr: [BootstrapServerId, value]}
DEPLOY_SOURCE_HOST: {get_param: deployment_source_hosts}
DEPLOY_TARGET_HOST:
if:
- equals:
- {get_param: deployment_target_hosts}
- ""
- {get_param: RootStackName}
- {get_param: deployment_target_hosts}
DEPLOY_IDENTIFIER: {get_param: DeployIdentifier}
ENABLE_DEBUG: {get_param: ConfigDebug}
ENABLE_PUPPET: {get_param: EnablePuppet}
CONTAINER_CLI: {get_param: ContainerCli}
CONTAINER_LOG_STDOUT_PATH: {get_param: ContainerLogStdoutPath}
CONTAINER_HEALTHCHECK_DISABLED: {get_param: ContainerHealthcheckDisabled}
DOCKER_PUPPET_DEBUG: {get_param: DockerPuppetDebug}
DOCKER_PUPPET_PROCESS_COUNT: {get_param: DockerPuppetProcessCount}
DOCKER_PUPPET_MOUNT_HOST_PUPPET: {get_param: DockerPuppetMountHostPuppet}
SELINUX_MODE: {get_param: SELinuxMode}
{% endblock %}
template: |
- import_playbook: common_deploy_steps_playbooks.yaml
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST"
deploy_target_host: "DEPLOY_TARGET_HOST"
- hosts: DEPLOY_TARGET_HOST
strategy: tripleo_linear
name: Manage SELinux
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
tasks:
- name: Set selinux state
become: true
selinux:
policy: targeted
state: SELINUX_MODE
- hosts: all
strategy: tripleo_linear
name: Generate /etc/hosts
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
tasks:
{% raw %}
- name: Configure Hosts Entries
include_role:
name: tripleo_hosts_entries
vars:
tripleo_hosts_entries_undercloud_hosts_entries: "{{ undercloud_hosts_entries }}"
tripleo_hosts_entries_extra_hosts_entries: "{{ extra_hosts_entries }}"
tripleo_hosts_entries_vip_hosts_entries: "{{ vip_hosts_entries }}"
{% endraw %}
- hosts: DEPLOY_TARGET_HOST
strategy: tripleo_linear
name: Common roles for TripleO servers
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
# pre_tasks run before any roles in a play, so we use it for the
# named debug task for --start-at-task.
pre_tasks:
- name: Common roles for TripleO servers
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Common roles for TripleO servers' to resume from this task
tasks:
- include_role:
name: tripleo_bootstrap
- include_role:
name: tripleo_ssh_known_hosts
tags:
- common_roles
- hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST
Switch deploy steps to tripleo_free The tripleo_free strategy should allow the tasks to run freely for a given playbook that defines using the tripleo_free strategy. The defaul strategy is a linear one that will execute each task across all servers prior to moving to the next task. The tripleo_free strategy will execute the tasks on servers without syncryonizing the tasks within a given playbook. Because TripleO uses step concepts in our deployment, we already have the syncronization points in the main playbook. The outer playbook should be done linearly but the deployment steps themselves should be done freely. The tripleo_free playbook won't stop execution on all hosts if one host fails or becomes unreachable. It will however end the play exeuction if any error occurs on any host. This is similar to the deployment failures we used to have with Heat where a failure on any single node would stop the deployment at a given deployment step. A future improvement of this will be to add logic to handle a failure percentage on a given TripleO role to only stop the playbook if the failure percentage exceeds a defined amount. Currently any failure will stop a playbook but may not stop later tasks from executing on the rest of the hosts. We will likely need to implement a tripleo_linear strategy based on the upstream linear strategy to understand these failure percentages as well. NOTE: During the testing of this, we identified two issues with the free strategy in ansible itself. We will need those fixes landed in the version of ansible prior to being able to land this. Depends-On: https://github.com/ansible/ansible/pull/69730 Depends-On: https://github.com/ansible/ansible/pull/69524 Change-Id: Ib4a02a192377aafab5970647d74977cb1189bcae
2020-04-30 13:23:32 -06:00
strategy: tripleo_free
name: Deploy step tasks for step 0
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: 0
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
- import_tasks: deploy_steps_tasks_step_0.yaml
tags:
- overcloud
- deploy_steps
- step0
- hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST
Switch deploy steps to tripleo_free The tripleo_free strategy should allow the tasks to run freely for a given playbook that defines using the tripleo_free strategy. The defaul strategy is a linear one that will execute each task across all servers prior to moving to the next task. The tripleo_free strategy will execute the tasks on servers without syncryonizing the tasks within a given playbook. Because TripleO uses step concepts in our deployment, we already have the syncronization points in the main playbook. The outer playbook should be done linearly but the deployment steps themselves should be done freely. The tripleo_free playbook won't stop execution on all hosts if one host fails or becomes unreachable. It will however end the play exeuction if any error occurs on any host. This is similar to the deployment failures we used to have with Heat where a failure on any single node would stop the deployment at a given deployment step. A future improvement of this will be to add logic to handle a failure percentage on a given TripleO role to only stop the playbook if the failure percentage exceeds a defined amount. Currently any failure will stop a playbook but may not stop later tasks from executing on the rest of the hosts. We will likely need to implement a tripleo_linear strategy based on the upstream linear strategy to understand these failure percentages as well. NOTE: During the testing of this, we identified two issues with the free strategy in ansible itself. We will need those fixes landed in the version of ansible prior to being able to land this. Depends-On: https://github.com/ansible/ansible/pull/69730 Depends-On: https://github.com/ansible/ansible/pull/69524 Change-Id: Ib4a02a192377aafab5970647d74977cb1189bcae
2020-04-30 13:23:32 -06:00
strategy: tripleo_free
name: Server pre network steps
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
tasks:
{% raw %}
- import_tasks: hiera_steps_tasks.yaml
- name: Server pre-network deployments
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Server pre-network deployments' to resume from this task
- include_tasks: deployments.yaml
vars:
force: false
with_items: "{{ hostvars[inventory_hostname]['pre_network_' ~ tripleo_role_name]|default([]) }}"
{% endraw %}
tags:
- overcloud
- pre_deploy_steps
- hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST
Switch deploy steps to tripleo_free The tripleo_free strategy should allow the tasks to run freely for a given playbook that defines using the tripleo_free strategy. The defaul strategy is a linear one that will execute each task across all servers prior to moving to the next task. The tripleo_free strategy will execute the tasks on servers without syncryonizing the tasks within a given playbook. Because TripleO uses step concepts in our deployment, we already have the syncronization points in the main playbook. The outer playbook should be done linearly but the deployment steps themselves should be done freely. The tripleo_free playbook won't stop execution on all hosts if one host fails or becomes unreachable. It will however end the play exeuction if any error occurs on any host. This is similar to the deployment failures we used to have with Heat where a failure on any single node would stop the deployment at a given deployment step. A future improvement of this will be to add logic to handle a failure percentage on a given TripleO role to only stop the playbook if the failure percentage exceeds a defined amount. Currently any failure will stop a playbook but may not stop later tasks from executing on the rest of the hosts. We will likely need to implement a tripleo_linear strategy based on the upstream linear strategy to understand these failure percentages as well. NOTE: During the testing of this, we identified two issues with the free strategy in ansible itself. We will need those fixes landed in the version of ansible prior to being able to land this. Depends-On: https://github.com/ansible/ansible/pull/69730 Depends-On: https://github.com/ansible/ansible/pull/69524 Change-Id: Ib4a02a192377aafab5970647d74977cb1189bcae
2020-04-30 13:23:32 -06:00
strategy: tripleo_free
name: Server network deployments
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
tasks:
{% raw %}
- name: Network Configuration
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Network Configuration' to resume from this task
- name: Check NetworkConfig script existence
delegate_to: localhost
stat:
path: "{{ tripleo_role_name ~ '/NetworkConfig' }}"
register: NetworkConfig_stat
when: not tripleo_network_config_with_ansible
- name: Run Network Config
import_role:
name: tripleo_network_config
vars:
tripleo_network_config_script_path: "{{ NetworkConfig_stat.stat.path }}"
tripleo_network_config_action: "{{ stack_action }}"
tripleo_network_config_network_deployment_actions: "{{ network_deployment_actions }}"
tripleo_network_config_async_timeout: "{{ async_timeout | default(300) }}"
tripleo_network_config_async_poll: "{{ async_poll | default(3) }}"
tripleo_network_config_hide_sensitive_logs: false
tripleo_network_config_legacy_script: false
tripleo_network_config_safe_defaults: "{{ network_safe_defaults }}|bool"
when:
- tripleo_network_config_with_ansible or NetworkConfig_stat.stat.exists
tags:
- overcloud
- pre_deploy_steps
- network_deploy_steps
{% endraw %}
- hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST
strategy: tripleo_free
name: Server network validation
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
tasks:
{% raw %}
- name: Basic Network Validation
include_role:
name: tripleo_nodes_validation
vars:
tripleo_nodes_validation_validate_controllers_icmp: "{{ validate_controllers_icmp }}"
tripleo_nodes_validation_validate_gateway_icmp: "{{ validate_gateways_icmp }}"
tripleo_nodes_validation_validate_fqdn: "{{ validate_fqdn }}"
tripleo_nodes_validation_ping_test_ips: "{{ ping_test_ips.get(tripleo_role_name).split(' ') | list | unique }}"
tags:
- overcloud
- pre_deploy_steps
- network_deploy_steps
{% endraw %}
- hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST
strategy: tripleo_free
name: Server pre deployment steps
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
tasks:
{% raw %}
- import_tasks: hiera_steps_tasks.yaml
- name: Server deployments
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Server deployments' to resume from this task
- include_tasks: deployments.yaml
vars:
force: false
with_items: "{{ hostvars[inventory_hostname]['pre_deployments_' ~ tripleo_role_name]|default([]) }}"
{% endraw %}
tags:
- overcloud
- pre_deploy_steps
- hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST
Switch deploy steps to tripleo_free The tripleo_free strategy should allow the tasks to run freely for a given playbook that defines using the tripleo_free strategy. The defaul strategy is a linear one that will execute each task across all servers prior to moving to the next task. The tripleo_free strategy will execute the tasks on servers without syncryonizing the tasks within a given playbook. Because TripleO uses step concepts in our deployment, we already have the syncronization points in the main playbook. The outer playbook should be done linearly but the deployment steps themselves should be done freely. The tripleo_free playbook won't stop execution on all hosts if one host fails or becomes unreachable. It will however end the play exeuction if any error occurs on any host. This is similar to the deployment failures we used to have with Heat where a failure on any single node would stop the deployment at a given deployment step. A future improvement of this will be to add logic to handle a failure percentage on a given TripleO role to only stop the playbook if the failure percentage exceeds a defined amount. Currently any failure will stop a playbook but may not stop later tasks from executing on the rest of the hosts. We will likely need to implement a tripleo_linear strategy based on the upstream linear strategy to understand these failure percentages as well. NOTE: During the testing of this, we identified two issues with the free strategy in ansible itself. We will need those fixes landed in the version of ansible prior to being able to land this. Depends-On: https://github.com/ansible/ansible/pull/69730 Depends-On: https://github.com/ansible/ansible/pull/69524 Change-Id: Ib4a02a192377aafab5970647d74977cb1189bcae
2020-04-30 13:23:32 -06:00
strategy: tripleo_free
name: Host prep steps
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
{% raw %}
- name: Deploy pull Artifacts
tripleo_deploy_artifacts:
artifact_urls: "{{ deploy_artifact_urls | default([]) }}"
artifact_paths: "{{ deploy_artifact_files | default([]) }}"
when:
- ((deploy_artifact_urls | default([]) | length) > 0) or
((deploy_artifact_files | default([]) | length) > 0)
{% endraw %}
{%- for role in roles %}
- name: {{role.name}} Host prep block
when:
- tripleo_role_name == '{{role.name}}'
block:
- name: {{role.name}} Host prep steps
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task '{{role.name}} Host prep steps' to resume from this task
- include_tasks: {{role.name}}/host_prep_tasks.yaml
{%- endfor %}
tags:
- overcloud
- host_prep_steps
{%- for step in range(1,deploy_steps_max) %}
- hosts: DEPLOY_SOURCE_HOST
Switch deploy steps to tripleo_free The tripleo_free strategy should allow the tasks to run freely for a given playbook that defines using the tripleo_free strategy. The defaul strategy is a linear one that will execute each task across all servers prior to moving to the next task. The tripleo_free strategy will execute the tasks on servers without syncryonizing the tasks within a given playbook. Because TripleO uses step concepts in our deployment, we already have the syncronization points in the main playbook. The outer playbook should be done linearly but the deployment steps themselves should be done freely. The tripleo_free playbook won't stop execution on all hosts if one host fails or becomes unreachable. It will however end the play exeuction if any error occurs on any host. This is similar to the deployment failures we used to have with Heat where a failure on any single node would stop the deployment at a given deployment step. A future improvement of this will be to add logic to handle a failure percentage on a given TripleO role to only stop the playbook if the failure percentage exceeds a defined amount. Currently any failure will stop a playbook but may not stop later tasks from executing on the rest of the hosts. We will likely need to implement a tripleo_linear strategy based on the upstream linear strategy to understand these failure percentages as well. NOTE: During the testing of this, we identified two issues with the free strategy in ansible itself. We will need those fixes landed in the version of ansible prior to being able to land this. Depends-On: https://github.com/ansible/ansible/pull/69730 Depends-On: https://github.com/ansible/ansible/pull/69524 Change-Id: Ib4a02a192377aafab5970647d74977cb1189bcae
2020-04-30 13:23:32 -06:00
strategy: tripleo_free
name: External deployment step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: '{{step}}'
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
- name: External deployment step {{step}}
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'External deployment step {{step}}' to resume from this task
- include_tasks: "external_deploy_steps_tasks_step{{step}}.yaml"
when:
- "'external_deploy_steps_tasks_step{{step}}.yaml' is exists"
tags:
- external
- external_deploy_steps
- step{{step}}
- hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST
Switch deploy steps to tripleo_free The tripleo_free strategy should allow the tasks to run freely for a given playbook that defines using the tripleo_free strategy. The defaul strategy is a linear one that will execute each task across all servers prior to moving to the next task. The tripleo_free strategy will execute the tasks on servers without syncryonizing the tasks within a given playbook. Because TripleO uses step concepts in our deployment, we already have the syncronization points in the main playbook. The outer playbook should be done linearly but the deployment steps themselves should be done freely. The tripleo_free playbook won't stop execution on all hosts if one host fails or becomes unreachable. It will however end the play exeuction if any error occurs on any host. This is similar to the deployment failures we used to have with Heat where a failure on any single node would stop the deployment at a given deployment step. A future improvement of this will be to add logic to handle a failure percentage on a given TripleO role to only stop the playbook if the failure percentage exceeds a defined amount. Currently any failure will stop a playbook but may not stop later tasks from executing on the rest of the hosts. We will likely need to implement a tripleo_linear strategy based on the upstream linear strategy to understand these failure percentages as well. NOTE: During the testing of this, we identified two issues with the free strategy in ansible itself. We will need those fixes landed in the version of ansible prior to being able to land this. Depends-On: https://github.com/ansible/ansible/pull/69730 Depends-On: https://github.com/ansible/ansible/pull/69524 Change-Id: Ib4a02a192377aafab5970647d74977cb1189bcae
2020-04-30 13:23:32 -06:00
strategy: tripleo_free
name: Deploy step tasks for {{step}}
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: '{{step}}'
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
Fix haproxy firewall rules The problem we want to selve is that the change https://review.opendev.org/#/c/631486/ (moving iptables creation to the host) never really worked. The reason it never worked and we never noticed is two-fold: A) It ran: -e include ::tripleo::profile::base::haproxy the problem is that without quoting puppet basically does a noop B) Once the quoting is fixed it breaks because 'export FACTER_step' exports a custom fact but does not export a hiera key per-se (so calls to hiera('step') would fail So we add proper quoting only on the variables that are arguments to a parameter so that there is no risk of ansible doing the wrong thing and puppet gets the correct arguments. We also explicitely set the step for hiera in the deploy_steps_tasks. The reason we need it is because in non-HA the iptables rules would be created at step 1. But since the deploy_steps_tasks run before the actual tasks that set the step hieradata.we would get the following error: Error: Function lookup() did not find a value for the name 'step' We can just write out the step hiera key during the deploy_steps_tasks, it will be enforced again shortly afterwards once the common/deploy-steps-tasks.yaml gets invoked. We also switch back to puppet_execute: ::tripleo::profile::base::haproxy even for the pacemaker profile. This was broken by the flattening of the haproxy service (Id55ae44a7b1b5f08b40170f7406e14973fa93639) Co-Authored-By: Luca Miccini <lmiccini@redhat.com> Change-Id: Iab310207ca17a6c596470dda30a39e029c4fe09c Closes-Bug: #1828250
2019-05-08 17:14:51 +02:00
- name: Write the config_step hieradata for the deploy step {{step}} tasks
{% raw %}
Fix haproxy firewall rules The problem we want to selve is that the change https://review.opendev.org/#/c/631486/ (moving iptables creation to the host) never really worked. The reason it never worked and we never noticed is two-fold: A) It ran: -e include ::tripleo::profile::base::haproxy the problem is that without quoting puppet basically does a noop B) Once the quoting is fixed it breaks because 'export FACTER_step' exports a custom fact but does not export a hiera key per-se (so calls to hiera('step') would fail So we add proper quoting only on the variables that are arguments to a parameter so that there is no risk of ansible doing the wrong thing and puppet gets the correct arguments. We also explicitely set the step for hiera in the deploy_steps_tasks. The reason we need it is because in non-HA the iptables rules would be created at step 1. But since the deploy_steps_tasks run before the actual tasks that set the step hieradata.we would get the following error: Error: Function lookup() did not find a value for the name 'step' We can just write out the step hiera key during the deploy_steps_tasks, it will be enforced again shortly afterwards once the common/deploy-steps-tasks.yaml gets invoked. We also switch back to puppet_execute: ::tripleo::profile::base::haproxy even for the pacemaker profile. This was broken by the flattening of the haproxy service (Id55ae44a7b1b5f08b40170f7406e14973fa93639) Co-Authored-By: Luca Miccini <lmiccini@redhat.com> Change-Id: Iab310207ca17a6c596470dda30a39e029c4fe09c Closes-Bug: #1828250
2019-05-08 17:14:51 +02:00
copy:
content: "{{ dict(step=step | int) | to_json }}"
Fix haproxy firewall rules The problem we want to selve is that the change https://review.opendev.org/#/c/631486/ (moving iptables creation to the host) never really worked. The reason it never worked and we never noticed is two-fold: A) It ran: -e include ::tripleo::profile::base::haproxy the problem is that without quoting puppet basically does a noop B) Once the quoting is fixed it breaks because 'export FACTER_step' exports a custom fact but does not export a hiera key per-se (so calls to hiera('step') would fail So we add proper quoting only on the variables that are arguments to a parameter so that there is no risk of ansible doing the wrong thing and puppet gets the correct arguments. We also explicitely set the step for hiera in the deploy_steps_tasks. The reason we need it is because in non-HA the iptables rules would be created at step 1. But since the deploy_steps_tasks run before the actual tasks that set the step hieradata.we would get the following error: Error: Function lookup() did not find a value for the name 'step' We can just write out the step hiera key during the deploy_steps_tasks, it will be enforced again shortly afterwards once the common/deploy-steps-tasks.yaml gets invoked. We also switch back to puppet_execute: ::tripleo::profile::base::haproxy even for the pacemaker profile. This was broken by the flattening of the haproxy service (Id55ae44a7b1b5f08b40170f7406e14973fa93639) Co-Authored-By: Luca Miccini <lmiccini@redhat.com> Change-Id: Iab310207ca17a6c596470dda30a39e029c4fe09c Closes-Bug: #1828250
2019-05-08 17:14:51 +02:00
dest: /etc/puppet/hieradata/config_step.json
force: true
mode: '0600'
{% endraw %}
{% if step == 1 %}
- name: Overcloud common bootstrap tasks for step 1
block:
- name: Overcloud common bootstrap tasks for step 1
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Overcloud common bootstrap tasks for step 1' to resume from this task
- name: "Check if /var/lib/tripleo-config/container-startup-config/step_{{step}} already exists"
stat:
path: "/var/lib/tripleo-config/container-startup-config/step_{{step}}"
register: container_startup_configs_json_stat
- name: Write config data at the start of step 1
include_tasks: common_deploy_steps_tasks_step_1.yaml
when:
- ((deploy_identifier is defined and deploy_identifier != "" and deploy_identifier is not none) or
not container_startup_configs_json_stat.stat.exists)
{% endif %}
Move the Overcloud common bootstrap tasks for step 1 before the deploy tasks The "Overcloud common bootstrap tasks for step 1" setup pulls in the tasks from the following file: common_deploy_steps_tasks_step_1: {get_file: deploy-steps-tasks-step-1.yaml} Now in that file we mainly do the following tasks: - Set up the /var/lib/{tripleo-config,container-puppet,kolla} folders - Write the container config json files - Set up some puppet folders - Write puppet step_config manifestes Not only it makes sense to have these preparation steps before the deployment steps, even at step1, but it also is strictly needed for the Frr/Bgp service. The reason for that is that Frr runs containerized and needs to start at deploy_step 1, so that traffic to the other bgp nodes is working before step_config step 1 which contains the puppet invocation to set up the cluster. In particular the frr/bgp container needs to be started during deployment step1 and to do so we need the kolla files and the other container startup files to be set up before we invoke podman. I could not figure out from the git history as to why this was not done in the first place, it seems to not have been done on purpose. So I did some extra testing to make sure nothing got broken by this: 1) Tested a composable control plane largish env with this patch only (train) 2) Tested a minor updated process on (1) 3) Tested a redeploy on (1) 4) Tested an FFU upgrade from queens to train with this change applied (3xctrl + 2xcmp) 5) Tested a BGP deployment spread over 3 racks in a spine/leaf configuration (~a dozen of deployments) Change-Id: I0e6594bfd1ff2e27bb4917c157f163643a811ca6
2021-01-27 17:08:46 +01:00
- name: Deploy step tasks for {{step}}
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Deploy step tasks for {{step}}' to resume from this task
{%- for role in roles %}
- include_tasks: "{{role.name}}/deploy_steps_tasks_step{{step}}.yaml"
when:
- tripleo_role_name == '{{role.name}}'
- "'{{role.name}}/deploy_steps_tasks_step{{step}}.yaml' is exists"
{%- endfor %}
- name: Overcloud common deploy step tasks {{step}}
block:
- name: Overcloud common deploy step tasks {{step}}
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Overcloud common deploy step tasks {{step}}' to resume from this task
- name: "Check if /var/lib/tripleo-config/container-startup-config/step_{{step}} already exists"
stat:
path: "/var/lib/tripleo-config/container-startup-config/step_{{step}}"
register: container_startup_configs_json_stat
- include_tasks: common_deploy_steps_tasks.yaml
when: (deploy_identifier is defined and deploy_identifier != "" and deploy_identifier is not none) or
(container_startup_configs_json_stat is defined and not container_startup_configs_json_stat.stat.exists)
tags:
- overcloud
- deploy_steps
- step{{step}}
{%- endfor %}
- hosts: {{primary_role_name}}:DEPLOY_TARGET_HOST
Switch deploy steps to tripleo_free The tripleo_free strategy should allow the tasks to run freely for a given playbook that defines using the tripleo_free strategy. The defaul strategy is a linear one that will execute each task across all servers prior to moving to the next task. The tripleo_free strategy will execute the tasks on servers without syncryonizing the tasks within a given playbook. Because TripleO uses step concepts in our deployment, we already have the syncronization points in the main playbook. The outer playbook should be done linearly but the deployment steps themselves should be done freely. The tripleo_free playbook won't stop execution on all hosts if one host fails or becomes unreachable. It will however end the play exeuction if any error occurs on any host. This is similar to the deployment failures we used to have with Heat where a failure on any single node would stop the deployment at a given deployment step. A future improvement of this will be to add logic to handle a failure percentage on a given TripleO role to only stop the playbook if the failure percentage exceeds a defined amount. Currently any failure will stop a playbook but may not stop later tasks from executing on the rest of the hosts. We will likely need to implement a tripleo_linear strategy based on the upstream linear strategy to understand these failure percentages as well. NOTE: During the testing of this, we identified two issues with the free strategy in ansible itself. We will need those fixes landed in the version of ansible prior to being able to land this. Depends-On: https://github.com/ansible/ansible/pull/69730 Depends-On: https://github.com/ansible/ansible/pull/69524 Change-Id: Ib4a02a192377aafab5970647d74977cb1189bcae
2020-04-30 13:23:32 -06:00
strategy: tripleo_free
name: Server Post Deployments
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
tasks:
- name: Server Post Deployments
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'Server Post Deployments' to resume from this task
{% raw %}
- include_tasks: deployments.yaml
vars:
force: false
with_items: "{{ hostvars[inventory_hostname]['post_deployments_' ~ tripleo_role_name]|default([]) }}"
tags:
- overcloud
- post_deploy_steps
{% endraw %}
- hosts: DEPLOY_SOURCE_HOST
strategy: tripleo_linear
name: External deployment Post Deploy tasks
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
- name: External deployment Post Deploy tasks
delegate_to: localhost
run_once: true
debug:
msg: Use --start-at-task 'External deployment Post Deploy tasks' to resume from this task
- import_tasks: external_post_deploy_steps_tasks.yaml
tags:
- external
- external_deploy_steps
- external_post_deploy_steps
external_deploy_steps_tasks: {get_attr: [ExternalDeployTasks, value]}
external_post_deploy_steps_tasks: {get_attr: [ExternalPostDeployTasks, value]}
update_steps_playbook:
{{ self.deploy_steps_str_replace_params() }}
template: |
- import_playbook: common_deploy_steps_playbooks.yaml
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST"
deploy_target_host: "DEPLOY_TARGET_HOST"
{%- for role in roles %}
- hosts: {{role.name}}
name: Run update
serial: "{% raw %}{{ update_serial | default({% endraw %}{{ role.update_serial | default(1) }}{% raw %})}}{% endraw %}"
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tripleo_minor_update: true
tripleo_redhat_enforce: {{role.rhsm_enforce|default("true")}}
tasks:
- import_tasks: hiera_steps_tasks.yaml
- import_tasks: common_container_config_scripts.yaml
{%- for step in range(0,update_steps_max) %}
- import_tasks: "{{ role.name }}/update_tasks_step{{ step }}.yaml"
vars:
step: "{{ step }}"
{%- endfor %}
- import_tasks: "{{role.name}}/host_prep_tasks.yaml"
when: tripleo_role_name == '{{role.name}}'
- import_tasks: deploy_steps_tasks_step_0.yaml
vars:
step: 0
- name: Write config data at the start of step 1
import_tasks: common_deploy_steps_tasks_step_1.yaml
{%- for step in range(1,deploy_steps_max) %}
- import_tasks: common_deploy_steps_tasks.yaml
vars:
step: "{{ step }}"
{%- endfor %}
{%- for step in range(0,post_update_steps_max) %}
- import_tasks: "{{ role.name }}/post_update_tasks_step{{ step }}.yaml"
vars:
step: "{{ step }}"
{%- endfor %}
{%- endfor %}
external_update_steps_tasks: {get_attr: [ExternalUpdateTasks, value]}
external_update_steps_playbook:
{{ self.deploy_steps_str_replace_params() }}
template: |
- import_playbook: common_deploy_steps_playbooks.yaml
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST"
deploy_target_host: "DEPLOY_TARGET_HOST"
{%- for step in range(external_update_steps_max) %}
- hosts: DEPLOY_SOURCE_HOST
name: External update step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: '{{step}}'
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
- import_tasks: external_update_steps_tasks.yaml
tags:
- step{{step}}
- external
- external_update_steps
{%- endfor %}
{%- for step in range(1,deploy_steps_max) %}
# putting both update and deploy tasks in the same
# playbook allows influencing the deploy tasks by
# variables "exported" from update tasks
- hosts: DEPLOY_SOURCE_HOST
name: External deploy step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: '{{step}}'
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
- import_tasks: external_deploy_steps_tasks.yaml
tags:
- external
- external_deploy_steps
- step{{step}}
{%- endfor %}
pre_upgrade_rolling_steps_tasks: |
{%- for role in roles %}
- include_tasks: {{role.name}}/pre_upgrade_rolling_tasks.yaml
when: tripleo_role_name == '{{role.name}}'
tags:
- always
{%- endfor %}
pre_upgrade_rolling_steps_playbook:
{{ self.deploy_steps_str_replace_params() }}
template: |
{%- for role in roles %}
- import_playbook: common_deploy_steps_playbooks.yaml
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST:{{role.name}}"
deploy_target_host: "DEPLOY_TARGET_HOST"
- hosts: {{role.name}}
name: Run pre-upgrade rolling tasks
serial: {{ role.deploy_serial | default(1) }}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
tasks:
- include_tasks: pre_upgrade_rolling_steps_tasks.yaml
with_sequence: start=0 end={{pre_upgrade_rolling_steps_max-1}}
loop_control:
loop_var: step
{%- endfor %}
upgrade_steps_playbook:
{{ self.deploy_steps_str_replace_params() }}
template: |
- import_playbook: common_deploy_steps_playbooks.yaml
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST"
deploy_target_host: "DEPLOY_TARGET_HOST"
{%- for step in range(0,upgrade_steps_max) %}
- hosts: DEPLOY_TARGET_HOST
strategy: tripleo_free
name: Upgrade tasks for step {{step}}
become: true
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
step: '{{step}}'
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
tasks:
{%- for role in roles %}
- include_tasks: {{role.name}}/upgrade_tasks_step{{step}}.yaml
when: tripleo_role_name == '{{role.name}}'
tags:
- always
{%- endfor %}
tags:
- upgrade_steps
- upgrade_step{{step}}
{%- endfor %}
post_upgrade_steps_tasks: |
{%- for role in roles %}
- include_tasks: {{role.name}}/post_upgrade_tasks.yaml
when: tripleo_role_name == '{{role.name}}'
tags:
- always
{%- endfor %}
post_upgrade_steps_playbook:
{{ self.deploy_steps_str_replace_params() }}
template: |
- import_playbook: common_deploy_steps_playbooks.yaml
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST"
deploy_target_host: "DEPLOY_TARGET_HOST"
- hosts: DEPLOY_TARGET_HOST
strategy: tripleo_free
any_errors_fatal: yes
tasks:
- include_tasks: post_upgrade_steps_tasks.yaml
with_sequence: start=0 end={{post_upgrade_steps_max-1}}
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
loop_control:
loop_var: step
external_upgrade_steps_tasks: {get_attr: [ExternalUpgradeTasks, value]}
external_upgrade_steps_playbook:
{{ self.deploy_steps_str_replace_params() }}
template: |
- import_playbook: common_deploy_steps_playbooks.yaml
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST"
deploy_target_host: "DEPLOY_TARGET_HOST"
{%- for step in range(external_upgrade_steps_max) %}
- hosts: DEPLOY_SOURCE_HOST
strategy: tripleo_free
name: External upgrade step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
become: false
vars:
# Explicit ansible_python_interpreter to allow connecting
# to different OS releases (EL7/8) while using delegate_to.
ansible_python_interpreter: /usr/libexec/platform-python
step: '{{step}}'
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
- import_tasks: external_upgrade_steps_tasks.yaml
tags:
- step{{step}}
- external
- external_upgrade_steps
{%- endfor %}
{%- for step in range(1,deploy_steps_max) %}
# putting both upgrade and deploy tasks in the same
# playbook allows influencing the deploy tasks by
# variables "exported" from upgrade tasks
- hosts: DEPLOY_SOURCE_HOST
name: External deploy step {{step}}
gather_facts: "{% raw %}{{ gather_facts | default(false) }}{% endraw %}"
any_errors_fatal: yes
become: false
vars:
# Explicit ansible_python_interpreter to allow connecting
# to different OS releases (EL7/8) while using delegate_to.
ansible_python_interpreter: /usr/libexec/platform-python
step: '{{step}}'
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
container_healthcheck_disabled: CONTAINER_HEALTHCHECK_DISABLED
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
- import_tasks: external_deploy_steps_tasks.yaml
tags:
- step{{step}}
- external
- external_deploy_steps
{%- endfor %}
scale_steps_tasks: {get_attr: [ScaleTasks, value]}
scale_playbook:
{{ self.deploy_steps_str_replace_params() }}
template: |
# Collect the facts from the overcloud nodes but ignore unreachable
# nodes in the case of a dead node which needs to be part of the
# scale-down operation.
- import_playbook: common_deploy_steps_playbooks.yaml
vars:
deploy_source_host: "DEPLOY_SOURCE_HOST"
deploy_target_host: "DEPLOY_TARGET_HOST"
- hosts: DEPLOY_TARGET_HOST
name: Scaling
# NOTE(cloudnull): This is set to true explicitly so that we have up-to-date facts
# on all DEPLOY_TARGET_HOST when performing a scaling operation.
# Without up-to-date facts, we're creating a potential failure
# scenario.
gather_facts: true
ignore_unreachable: true
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
# Only run the scale tasks on the nodes that are alive.
# We expect the tasks to work on the nodes that are alive.
# If a task is allowed to fail, it needs to be configured at
# the task definition level but not here.
- include_tasks: scale_steps_tasks.yaml
with_sequence: start=1 end={{scale_steps_max}}
loop_control:
loop_var: step
tags: always
# we use ansible_facts['hostname'] to determine if the host is alive
# or not.
when: ansible_facts['hostname'] is defined
# We don't want to run the scale tasks on dead nodes, to allow
# the operator to scale down the cloud no matter the state of
# the servers.
# However, we notify the operator if the node wasn't reachable.
# Using fail and not debug module to make it more visible
# in the logs.
- fail:
msg: "Node is unreachable. No scale tasks will be run."
ignore_errors: True
tags: always
# we use ansible_facts['hostname'] to determine if the host is alive
# or not.
when: ansible_facts['hostname'] is not defined
tags:
- scale
post_update_steps_tasks: |
{%- for role in roles %}
- include_tasks: {{role.name}}/post_update_tasks.yaml
when: tripleo_role_name == '{{role.name}}'
tags:
- always
{%- endfor %}