Remove duplicate code in Ansible playbooks

This commit removes some duplicate code in Ansible playbooks repo.

Tests done:
  - remote and local bootstrap controller-0
  - remote and local backup
  - remote and local restore_platform

Change-Id: Id6894c6ae5fcb527a2619c2838fa6510e6142a30
Story: 2004761
Task: 36571
Signed-off-by: Wei Zhou <wei.zhou@windriver.com>
This commit is contained in:
Wei Zhou 2019-09-18 17:14:41 -04:00
parent 8d49c16bc8
commit 0aca9f5372
13 changed files with 263 additions and 247 deletions

View File

@ -10,27 +10,11 @@
gather_facts: no
vars_files:
- host_vars/default.yml
pre_tasks:
- stat:
path: "{{ item }}"
register: files_to_import
with_items:
- "{{ override_files_dir }}/secrets.yml"
- "{{ override_files_dir }}/{{ inventory_hostname }}_secrets.yml"
- "{{ override_files_dir }}/site.yml"
- "{{ override_files_dir }}/{{ inventory_hostname }}.yml"
delegate_to: localhost
- include_vars: "{{ item.item }}"
when: item.stat.exists
with_items: "{{ files_to_import.results }}"
loop_control:
label: "{{ item.item }}"
- host_vars/backup-restore/default.yml
# Main play
roles:
- common
- { role: common/prepare-env }
- { role: backup-restore/prepare-env }
- { role: backup/prepare-env, become: yes }
- { role: backup/backup-system, become: yes }

View File

@ -10,11 +10,11 @@
gather_facts: no
vars_files:
- host_vars/default.yml
- host_vars/bootstrap/default.yml
# Main play
roles:
- include-override-files
- common/prepare-env
- bootstrap/prepare-env
- { role: bootstrap/validate-config, when: not skip_play, become: yes }
- { role: bootstrap/store-passwd, when: not skip_play and save_password, become: yes }
@ -23,7 +23,6 @@
- { role: bootstrap/bringup-essential-services, when: not skip_play, become: yes }
vars:
change_password: false
skip_play: false
replayed: false
mode: 'bootstrap'

View File

@ -0,0 +1,82 @@
---
# ADMIN CREDENTIALS
# =================
#
# WARNING: It is strongly recommended to store these settings in Ansible vault
# file named "secret" under override files directory. Configuration parameters
# stored in vault must start with vault_ prefix (i.e. vault_admin_username,
# vault_admin_password).
#
admin_username: admin
admin_password: St8rlingX*
# INITIAL PASSWORD CHANGE RESPONSE SEQUENCE
# =========================================
#
# The following two parameters are only relevant when the target host is bootstrapped
# remotely and the user wishes to change the initial sysadmin password as part of the
# bootstrap.
#
# WARNING: It is strongly recommended to store this setting in Ansible vault
# file named "secret" under override files directory. Configuration parameters
# stored in vault must start with vault_ prefix (i.e. vault_password_change_responses)
#
password_change: false
password_change_responses:
yes/no: 'yes'
sysadmin*: 'sysadmin'
\(current\) UNIX password: 'sysadmin'
(?i)New password: 'St8rlingX*'
(?i)Retype new password: 'St8rlingX*'
# OVERRIDE FILES DIRECTORY
# ========================
#
# Default directory where user override file(s) can be found
#
override_files_dir: "{{ lookup('env', 'HOME') }}"
# BACKUP AND RESTORE
# ==================
#
# Location where the backup tar file is placed to perform the restore.
# This location must be specified at the command line via ansible-playbook -e option.
initial_backup_dir:
# This variable refers to the tar file that is generated by the backup
# procedure and used in the restore phase. The filename must be specified
# at the command line via ansible-playbook -e option.
backup_filename:
# Default directory where the backup tar file(s) can be found
# on the active controller
backup_dir: /opt/backups
# The platform backup tarball will be named in this format:
# <platform_backup_filename_prefix>_<timestamp>.tgz
#
platform_backup_filename_prefix: "{{ inventory_hostname }}_platform_backup"
# The stx-openstack application backup tarball will be named in this format:
# <openstack_backup_filename_prefix>_<timestamp>.tgz
#
openstack_backup_filename_prefix: "{{ inventory_hostname }}_openstack_backup"
# An indication whether it is a full restore or partial restore.
# true: a full restore where storage partition(s) is/are wiped during
# platform restore and ceph data needs restored
# false: a partial restore where ceph data remain intact during restore
#
# This variable is used for StarlingX OpenStack application restore only
#
restore_ceph_data: false
# Default directory where the system backup tarballs fetched from the
# active controller can be found
#
host_backup_dir: "{{ lookup('env', 'HOME') }}"
# Flag file to indicate if platform restore is in progress
#
restore_in_progress_flag: /etc/platform/.restore_in_progress

View File

@ -207,14 +207,16 @@ admin_password: St8rlingX*
# INITIAL PASSWORD CHANGE RESPONSE SEQUENCE
# =========================================
#
# This parameter is only relevant when the target host is bootstrapped remotely
# and the user wishes to change the initial sysadmin password as part of the
# The following two parameters are only relevant when the target host is bootstrapped
# remotely and the user wishes to change the initial sysadmin password as part of the
# bootstrap.
#
# WARNING: It is strongly recommended to store this setting in Ansible vault
# file named "secret" under override files directory. Configuration parameters
# stored in vault must start with vault_ prefix (i.e. vault_password_change_responses)
#
password_change: false
password_change_responses:
yes/no: 'yes'
sysadmin*: 'sysadmin'
@ -228,47 +230,3 @@ password_change_responses:
# Default directory where user override file(s) can be found
#
override_files_dir: "{{ lookup('env', 'HOME') }}"
# BACKUP AND RESTORE
# ==================
#
# Location where the backup tar file is placed to perform the restore.
# This location must be specified at the command line via ansible-playbook -e option.
initial_backup_dir:
# This variable refers to the tar file that is generated by the backup
# procedure and used in the restore phase. The filename must be specified
# at the command line via ansible-playbook -e option.
backup_filename:
# Default directory where the backup tar file(s) can be found
# on the active controller
backup_dir: /opt/backups
# The platform backup tarball will be named in this format:
# <platform_backup_filename_prefix>_<timestamp>.tgz
#
platform_backup_filename_prefix: "{{ inventory_hostname }}_platform_backup"
# The stx-openstack application backup tarball will be named in this format:
# <openstack_backup_filename_prefix>_<timestamp>.tgz
#
openstack_backup_filename_prefix: "{{ inventory_hostname }}_openstack_backup"
# An indication whether it is a full restore or partial restore.
# true: a full restore where storage partition(s) is/are wiped during
# platform restore and ceph data needs restored
# false: a partial restore where ceph data remain intact during restore
#
# This variable is used for StarlingX OpenStack application restore only
#
restore_ceph_data: false
# Default directory where the system backup tarballs fetched from the
# active controller can be found
#
host_backup_dir: "{{ lookup('env', 'HOME') }}"
# Flag file to indicate if platform restore is in progress
#
restore_in_progress_flag: /etc/platform/.restore_in_progress

View File

@ -8,14 +8,12 @@
gather_facts: no
vars_files:
- host_vars/default.yml
- host_vars/backup-restore/default.yml
roles:
- include-override-files
- restore-platform/pre-restore-bootstrap
vars:
change_password: false
- common/prepare-env
- restore-platform/prepare-env
- backup-restore/transfer-file
- name: Run bootstrap playbook with restore mode
import_playbook: bootstrap.yml mode='restore'
@ -24,8 +22,8 @@
gather_facts: no
vars_files:
- host_vars/default.yml
- host_vars/backup-restore/default.yml
roles:
- include-override-files
- common/prepare-env
- { role: restore-platform/restore-more-data, become: yes }

View File

@ -8,32 +8,6 @@
# This role contains common components (tasks, vars, etc.) that
# can be shared by all the backup and restore playbooks.
# Check host connectivity
- block:
- name: Set SSH port
set_fact:
ansible_port: "{{ ansible_port | default(22) }}"
- name: Update SSH known hosts
lineinfile:
path: ~/.ssh/known_hosts
state: absent
regexp: '^{{ ansible_host }}|^\[{{ ansible_host }}\]:{{ ansible_port }}'
delegate_to: localhost
- name: Check connectivity
local_action: command ping -c 1 {{ ansible_host }}
failed_when: false
register: ping_result
- name: Fail if host is unreachable
fail: msg='Host {{ ansible_host }} is unreachable!'
with_items:
- "{{ ping_result.stdout_lines|list }}"
when: ping_result.rc != 0 and item is search('100% packet loss')
when: inventory_hostname != 'localhost'
- name: Check archive dir
stat:
path: "{{ backup_dir }}"

View File

@ -0,0 +1,47 @@
---
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# SUB-TASKS DESCRIPTION:
# For remote play transfer the backup tar file to controller-0
- block:
# Check if the backup tarball already exists. If it is the second run
# after the reboot, no need to transfer the backup tarball again.
- name: Check if {{ backup_filename }} has been uploaded already
stat:
path: "{{ target_backup_dir }}/{{ backup_filename }}"
register: check_backup_tarball
- block:
# TODO(wzhou): Considering to break backup tarball into multiple small tarfiles
# During restore upload each small tarfile one at a time to restore a subfunction.
# Because Ansible copy module uses ansible_remote_tmp directory as
# a staging area to transfer file, the default ansible_remote_tmp
# which is set in /tmp (1GB) may be too small for backup tarball,
# we require user to set ansible_remote_tmp to a new directory in
# /home/sysadmin via -e option on the command line. For example:
# -e "ansible_remote_tmp=/home/sysadmin/ansible-restore"
- name: Transfer backup tarball to {{ target_backup_dir }} on controller-0
copy:
src: "{{ initial_backup_dir }}/{{ backup_filename }}"
dest: "{{ target_backup_dir }}"
owner: root
group: root
mode: 0644
# As an alternative to Ansible copy, synchronize module may be
# used to transfer large files. But synchronize is broken in Ansible 2.8
# https://github.com/ansible/ansible/issues/56629.
# - name: Transfer backup tarball to /scratch on controller-0
# synchronize:
# src: "{{ initial_backup_dir }}/{{ backup_filename }}"
# dest: "{{ target_backup_dir }}/{{ backup_filename }}"
when: not check_backup_tarball.stat.exists
when: inventory_hostname != "localhost"
become: yes
become_user: root

View File

@ -11,59 +11,6 @@
# - reboot the controller if it is required by the patching
#
- block:
- name: Create {{ restore_in_progress_flag }} flag file
file:
path: "{{ restore_in_progress_flag }}"
state: touch
# For remote play the backup tarball will be transferred to /scratch
- block:
# Check if the backup tarball already exists. If it is the second run
# after the reboot, no need to transfer the backup tarball again.
- name: Check if {{ backup_filename }} has been uploaded already
stat:
path: "/scratch/{{ backup_filename }}"
register: check_backup_tarball
- block:
# TODO(wzhou): Considering to break backup tarball into multiple small tarfiles
# During restore upload each small tarfile one at a time to restore a subfunction.
# Because Ansible copy module uses ansible_remote_tmp directory as
# a staging area to transfer file, the default ansible_remote_tmp
# which is set in /tmp (1GB) may be too small for backup tarball,
# we require user to set ansible_remote_tmp to a new directory in
# /home/sysadmin via -e option on the command line. For example:
# -e "ansible_remote_tmp=/home/sysadmin/ansible-restore"
- name: Transfer backup tarball to /scratch on controller-0
copy:
src: "{{ initial_backup_dir }}/{{ backup_filename }}"
dest: /scratch
owner: root
group: root
mode: 0644
# As an alternative to Ansible copy, synchronize module may be
# used to transfer large files. But synchronize is broken in Ansible 2.8
# https://github.com/ansible/ansible/issues/56629.
# - name: Transfer backup tarball to /scratch on controller-0
# synchronize:
# src: "{{ initial_backup_dir }}/{{ backup_filename }}"
# dest: "/scratch/{{ backup_filename }}"
when: not check_backup_tarball.stat.exists
- name: Set target_backup_dir to /scratch
set_fact:
target_backup_dir: /scratch
when: inventory_hostname != "localhost"
- name: For local play set target_backup_dir to initial_backup_dir
set_fact:
target_backup_dir: "{{ initial_backup_dir }}"
when: inventory_hostname == "localhost"
- name: Set fact for patching staging dir
set_fact:
patching_staging_dir: /scratch/patching

View File

@ -9,57 +9,6 @@
# the next step.
#
# Check host connectivity, change password if provided
- block:
- name: Set SSH port
set_fact:
ansible_port: "{{ ansible_port | default(22) }}"
- name: Update SSH known hosts
lineinfile:
path: ~/.ssh/known_hosts
state: absent
regexp: '^{{ ansible_host }}|^\[{{ ansible_host }}\]:{{ ansible_port }}'
delegate_to: localhost
- name: Check connectivity
local_action: command ping -c 1 {{ ansible_host }}
failed_when: false
register: ping_result
- name: Fail if host is unreachable
fail: msg='Host {{ ansible_host }} is unreachable!'
with_items:
- "{{ ping_result.stdout_lines|list }}"
when: ping_result.rc != 0 and item is search('100% packet loss')
- block:
- name: Fail if password change response sequence is not defined
fail: msg="The mandatory parameter password_change_response is not defined."
when: (vault_password_change_responses is not defined) and
(password_change_responses is not defined)
- debug:
msg: "Changing the initial password.."
- name: Change initial password
expect:
echo: yes
command: "ssh -p {{ ansible_port }} {{ ansible_ssh_user }}@{{ ansible_host }}"
responses: "{{ vault_password_change_responses | default(password_change_responses) }}"
failed_when: false
delegate_to: localhost
rescue:
# Initial password has been changed and the user forgot to exclude
# password_change option in the command line for the replay.
- debug:
msg: "Password has already been changed"
when: change_password
when: inventory_hostname != 'localhost'
# Check for one of unmistakenly StarlingX packages
- name: "Look for unmistakenly {{ image_brand }} package"
command: rpm -q controllerconfig
@ -79,12 +28,6 @@
register: initial_config_complete
- block:
# Restore doesn't support replay
- name: Fail if bootstrap is in restore mode and the host has been unlocked
fail:
msg: "Host {{ ansible_host }} has been unlocked. Cannot perform restore."
when: mode == 'restore'
- name: Set skip_play flag for host
set_fact:
skip_play: true
@ -101,16 +44,6 @@
# Proceed only if skip_play flag is not turned on
- block:
- block:
- name: Check if restore is in progress if bootstrap is with restore mode
stat:
path: "{{ restore_in_progress_flag }}"
register: restore_in_progress
- name: Fail if restore is already in progress
fail:
msg: " Restore is already in progress!"
when: restore_in_progress.stat.exists
# Do load verification and patching if required
- include_tasks: load_patching_tasks.yml

View File

@ -7,6 +7,5 @@ supported_release_versions:
- "19.09"
patching_permdir: /opt/patching
patching_repo_permdir: /www/pages/updates
restore_in_progress_flag: /etc/platform/.restore_in_progress
restore_patching_complete_flag: /etc/platform/.restore_patching_complete
node_is_patched_flag: /var/run/node_is_patched

View File

@ -0,0 +1,75 @@
---
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# ROLE DESCRIPTION:
# This role is to perform tasks that are common to the playbooks.
#
# Include user override files for a play
- stat:
path: "{{ item }}"
register: files_to_import
with_items:
- "{{ override_files_dir }}/secrets.yml"
- "{{ override_files_dir }}/{{ inventory_hostname }}_secrets.yml"
- "{{ override_files_dir }}/site.yml"
- "{{ override_files_dir }}/{{ inventory_hostname }}.yml"
delegate_to: localhost
- include_vars: "{{ item.item }}"
when: item.stat.exists
with_items: "{{ files_to_import.results }}"
# Check host connectivity, change password if provided
- block:
- name: Set SSH port
set_fact:
ansible_port: "{{ ansible_port | default(22) }}"
- name: Update SSH known hosts
lineinfile:
path: ~/.ssh/known_hosts
state: absent
regexp: '^{{ ansible_host }}|^\[{{ ansible_host }}\]:{{ ansible_port }}'
delegate_to: localhost
- name: Check connectivity
local_action: command ping -c 1 {{ ansible_host }}
failed_when: false
register: ping_result
- name: Fail if host is unreachable
fail: msg='Host {{ ansible_host }} is unreachable!'
with_items:
- "{{ ping_result.stdout_lines|list }}"
when: ping_result.rc != 0 and item is search('100% packet loss')
- block:
- name: Fail if password change response sequence is not defined
fail: msg="The mandatory parameter password_change_response is not defined."
when: (vault_password_change_responses is not defined) and
(password_change_responses is not defined)
- debug:
msg: "Changing the initial password.."
- name: Change initial password
expect:
echo: yes
command: "ssh -p {{ ansible_port }} {{ ansible_ssh_user }}@{{ ansible_host }}"
responses: "{{ vault_password_change_responses | default(password_change_responses) }}"
failed_when: false
delegate_to: localhost
rescue:
# Initial password has been changed and the user forgot to exclude
# password_change option in the command line for the replay.
- debug:
msg: "Password has already been changed"
when: password_change
when: inventory_hostname != 'localhost'

View File

@ -1,22 +0,0 @@
---
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# ROLE DESCRIPTION:
# This role is to include user override files for a play.
#
- stat:
path: "{{ item }}"
register: files_to_import
with_items:
- "{{ override_files_dir }}/secrets.yml"
- "{{ override_files_dir }}/{{ inventory_hostname }}_secrets.yml"
- "{{ override_files_dir }}/site.yml"
- "{{ override_files_dir }}/{{ inventory_hostname }}.yml"
delegate_to: localhost
- include_vars: "{{ item.item }}"
when: item.stat.exists
with_items: "{{ files_to_import.results }}"

View File

@ -5,8 +5,10 @@
# SPDX-License-Identifier: Apache-2.0
#
# ROLE DESCRIPTION:
# This role is to retrieve the override file from the backup tarball
# required for the controller bootstrap.
# This role performs following tasks:
# 1. Retrieve the override file from the backup tarball
# required for the controller bootstrap.
# 2. Verify if platform restore should proceed
#
- block:
- name: Fail if backup_filename is not defined or set
@ -53,3 +55,43 @@
when: search_result.rc != 0
delegate_to: localhost
- block:
# Bail if the host has been unlocked
- name: Check initial config flag
stat:
path: /etc/platform/.initial_config_complete
register: initial_config_done
- name: Fail if the host has been unlocked
fail:
msg: "Host {{ ansible_host }} has been unlocked. Cannot perform restore."
when: initial_config_done.stat.exists
- name: Check if restore is in progress if bootstrap is with restore mode
stat:
path: "{{ restore_in_progress_flag }}"
register: restore_in_progress
- name: Fail if restore is already in progress
fail:
msg: " Restore is already in progress!"
when: restore_in_progress.stat.exists
- name: Create {{ restore_in_progress_flag }} flag file
file:
path: "{{ restore_in_progress_flag }}"
state: touch
- name: For remote play set target_backup_dir to /scratch
set_fact:
target_backup_dir: /scratch
when: inventory_hostname != "localhost"
- name: For local play set target_backup_dir to initial_backup_dir
set_fact:
target_backup_dir: "{{ initial_backup_dir }}"
when: inventory_hostname == "localhost"
become: yes
become_user: root