Merge "Introduce scale_tasks"

This commit is contained in:
Zuul 2019-04-30 23:54:00 +00:00 committed by Gerrit Code Review
commit 694777ba05
4 changed files with 117 additions and 1 deletions

View File

@ -24,6 +24,7 @@
{% set fast_forward_upgrade_steps_max = 9 -%} {% set fast_forward_upgrade_steps_max = 9 -%}
{% set fast_forward_upgrade_prep_steps_max = 3 -%} {% set fast_forward_upgrade_prep_steps_max = 3 -%}
{% set post_update_steps_max = 4 -%} {% set post_update_steps_max = 4 -%}
{% set scale_steps_max = 1 -%}
heat_template_version: rocky heat_template_version: rocky
@ -148,6 +149,19 @@ resources:
- get_param: [role_data, {{role.name}}, external_post_deploy_tasks] - get_param: [role_data, {{role.name}}, external_post_deploy_tasks]
{%- endfor %} {%- endfor %}
ScaleTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# processing from per-role unique tasks into globally unique tasks
expression: coalesce($.data, []).flatten().distinct()
data:
{%- for role in enabled_roles %}
- get_param: [role_data, {{role.name}}, scale_tasks]
{%- endfor %}
ExternalUpdateTasks: ExternalUpdateTasks:
type: OS::Heat::Value type: OS::Heat::Value
properties: properties:
@ -823,6 +837,89 @@ outputs:
tags: tags:
- external - external
- external_deploy_steps - external_deploy_steps
scale_steps_tasks: {get_attr: [ScaleTasks, value]}
scale_playbook:
str_replace:
params:
DEPLOY_SOURCE_HOST: {get_param: deployment_source_hosts}
DEPLOY_TARGET_HOST: {get_param: deployment_target_hosts}
DEPLOY_IDENTIFIER: {get_param: DeployIdentifier}
BOOTSTRAP_SERVER_ID: {get_attr: [BootstrapServerId, value]}
ENABLE_DEBUG: {get_param: ConfigDebug}
ENABLE_PUPPET: {get_param: EnablePuppet}
DOCKER_PUPPET_DEBUG: {get_param: DockerPuppetDebug}
DOCKER_PUPPET_PROCESS_COUNT: {get_param: DockerPuppetProcessCount}
DOCKER_PUPPET_MOUNT_HOST_PUPPET: {get_param: DockerPuppetMountHostPuppet}
CONTAINER_CLI: {get_param: ContainerCli}
CONTAINER_LOG_STDOUT_PATH: {get_param: ContainerLogStdoutPath}
template: |
- hosts: DEPLOY_SOURCE_HOST
name: Gather facts from undercloud
gather_facts: yes
become: false
tags:
- always
- facts
# Collect the facts from the overcloud nodes but ignore unreachable
# nodes in the case of a dead node which needs to be part of the
# scale-down operation.
- hosts: DEPLOY_TARGET_HOST
name: Gather facts from overcloud
gather_facts: yes
ignore_unreachable: True
tags:
- always
- facts
- hosts: all
name: Load global variables
gather_facts: no
tasks:
- include_vars: global_vars.yaml
tags:
- always
- hosts: DEPLOY_TARGET_HOST
name: Scaling
gather_facts: no
any_errors_fatal: yes
become: false
vars:
bootstrap_server_id: BOOTSTRAP_SERVER_ID
deploy_identifier: DEPLOY_IDENTIFIER
enable_debug: ENABLE_DEBUG
enable_puppet: ENABLE_PUPPET
container_cli: CONTAINER_CLI
container_log_stdout_path: CONTAINER_LOG_STDOUT_PATH
docker_puppet_debug: DOCKER_PUPPET_DEBUG
docker_puppet_process_count: DOCKER_PUPPET_PROCESS_COUNT
docker_puppet_mount_host_puppet: DOCKER_PUPPET_MOUNT_HOST_PUPPET
tasks:
# Only run the scale tasks on the nodes that are alive.
# We expect the tasks to work on the nodes that are alive.
# If a task is allowed to fail, it needs to be configured at
# the task definition level but not here.
- include_tasks: scale_steps_tasks.yaml
with_sequence: start=0 end={{scale_steps_max}}
loop_control:
loop_var: step
tags: always
# we use ansible_hostname to determine if the host is alive
# or not.
when: ansible_hostname is defined
# We don't want to run the scale tasks on dead nodes, to allow
# the operator to scale down the cloud no matter the state of
# the servers.
# However, we notify the operator if the node wasn't reachable.
# Using fail and not debug module to make it more visible
# in the logs.
- fail:
msg: "Node is unreachable. No scale tasks will be run."
ignore_errors: True
tags: always
# we use ansible_hostname to determine if the host is alive
# or not.
when: ansible_hostname is not defined
tags:
- scale
fast_forward_upgrade_playbook: fast_forward_upgrade_playbook:
str_replace: str_replace:
params: params:

View File

@ -175,6 +175,16 @@ resources:
expression: coalesce($.data, []).where($ != null).select($.get('external_post_deploy_tasks')).where($ != null).flatten().distinct() expression: coalesce($.data, []).where($ != null).select($.get('external_post_deploy_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]} data: {get_attr: [ServiceChain, role_data]}
ScaleTasks:
type: OS::Heat::Value
properties:
type: comma_delimited_list
value:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: coalesce($.data, []).where($ != null).select($.get('scale_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
ExternalUpdateTasks: ExternalUpdateTasks:
type: OS::Heat::Value type: OS::Heat::Value
properties: properties:
@ -353,6 +363,7 @@ outputs:
external_post_deploy_tasks: {get_attr: [ExternalPostDeployTasks, value]} external_post_deploy_tasks: {get_attr: [ExternalPostDeployTasks, value]}
external_update_tasks: {get_attr: [ExternalUpdateTasks, value]} external_update_tasks: {get_attr: [ExternalUpdateTasks, value]}
external_upgrade_tasks: {get_attr: [ExternalUpgradeTasks, value]} external_upgrade_tasks: {get_attr: [ExternalUpgradeTasks, value]}
scale_tasks: {get_attr: [ScaleTasks, value]}
fast_forward_upgrade_tasks: {get_attr: [FastForwardUpgradeTasks, value]} fast_forward_upgrade_tasks: {get_attr: [FastForwardUpgradeTasks, value]}
fast_forward_post_upgrade_tasks: {get_attr: [FastForwardPostUpgradeTasks, value]} fast_forward_post_upgrade_tasks: {get_attr: [FastForwardPostUpgradeTasks, value]}
pre_upgrade_rolling_tasks: {get_attr: [PreUpgradeRollingTasks, value]} pre_upgrade_rolling_tasks: {get_attr: [PreUpgradeRollingTasks, value]}

View File

@ -0,0 +1,8 @@
---
features:
- |
Composable service templates can now define scale_tasks. They are
meant for scale down/up logic of services which need to be
stopped/started during the scaling procedure. All happens within a single
playbook and the down/up Ansible tags are required to differenciate them
during the run.

View File

@ -66,7 +66,7 @@ OPTIONAL_DOCKER_SECTIONS = ['container_puppet_tasks', 'upgrade_tasks',
'kolla_config', 'global_config_settings', 'kolla_config', 'global_config_settings',
'external_deploy_tasks', 'external_post_deploy_tasks', 'external_deploy_tasks', 'external_post_deploy_tasks',
'container_config_scripts', 'step_config', 'container_config_scripts', 'step_config',
'monitoring_subscription', 'monitoring_subscription', 'scale_tasks',
'external_update_tasks', 'external_upgrade_tasks'] 'external_update_tasks', 'external_upgrade_tasks']
# ansible tasks cannot be an empty dict or ansible is unhappy # ansible tasks cannot be an empty dict or ansible is unhappy
ANSIBLE_TASKS_SECTIONS = ['upgrade_tasks', 'pre_upgrade_rolling_tasks', ANSIBLE_TASKS_SECTIONS = ['upgrade_tasks', 'pre_upgrade_rolling_tasks',