Optimized platform restore playbook
Use "-e 'restore_mode=optimized'" to use the optimized playbook.
Current Usage:
1 - restore_platform.yml -e "backup_filename=${BACKUP_FILENAME} \
registry_backup_filename=${REGISTRY_BACKUP_FILENAME} \
restore_mode=optimized"
2 - Wait for SM to reboot controller-0
3 - sudo touch /var/run/.ansible_bootstrap
4 - source /etc/platform/openrc
5 - system host-unlock controller-0
When the old restore playbook is deprecated, change the default
restore_mode to optimized to allow for a smooth deprecation period.
Tests done using local and remote execution
TEST PLAN:
- AIO-SX Debian -
PASS: Restore using restore_mode=optimized and unlock
REGRESSION TEST PLAN
- AIO-SX Debian -
PASS: Restore without providing restore_mode and unlock
PASS: Bootstrap and unlock
- AIO-SX CentOS -
PASS: Restore without providing restore_mode and unlock
PASS: Bootstrap and unlock
Story: 2010117
Task: 45990
Signed-off-by: Thiago Brito <thiago.brito@windriver.com>
Change-Id: I52eabe4409665e9ae6d8bf643dd6e024ca952fca
This commit is contained in:
294
playbookconfig/src/playbooks/restore_optimized.yml
Normal file
294
playbookconfig/src/playbooks/restore_optimized.yml
Normal file
@@ -0,0 +1,294 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Create /opt/backups
|
||||
file:
|
||||
path: "/opt/backups"
|
||||
state: directory
|
||||
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
|
||||
vars_files:
|
||||
- vars/common/main.yml
|
||||
- host_vars/backup-restore/default.yml
|
||||
|
||||
roles:
|
||||
- common/prepare-env
|
||||
- common/validate-target
|
||||
- backup-restore/validate-input
|
||||
- backup-restore/prepare-env
|
||||
- backup-restore/stage-backup-archives
|
||||
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
become: yes
|
||||
|
||||
vars_files:
|
||||
- host_vars/backup-restore/default.yml
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Define restore facts
|
||||
set_fact:
|
||||
hieradata_workdir: /tmp/hieradata
|
||||
grub_mkconfig: "{{ 'grub2-mkconfig' if os_release == 'centos' else 'grub-mkconfig' }}"
|
||||
network_scripts_location:
|
||||
"{{ '/etc/sysconfig/network-scripts' if os_release == 'centos' else '/etc/network/interfaces.d' }}"
|
||||
network_scripts_location_bkp:
|
||||
"{{ 'etc/sysconfig/network-scripts' if os_release == 'centos' else 'etc/network/interfaces.d' }}"
|
||||
docker_registry_service: "{{ 'docker-distribution' if os_release == 'centos' else 'docker-registry' }}"
|
||||
root_dir: "{{ '/' if os_release == 'centos' else '/var/rootdirs' }}"
|
||||
sysinv_config_permdir: "{{ '/opt/platform/sysinv/' + software_version }}"
|
||||
|
||||
- name: Setup flags to control puppet manifest apply
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: touch
|
||||
# TODO(abailey): Need to add proper support for duplex
|
||||
loop:
|
||||
- /etc/platform/simplex
|
||||
|
||||
- name: Create hieradata workdir
|
||||
file:
|
||||
path: "{{ hieradata_workdir }}"
|
||||
state: directory
|
||||
|
||||
- name: Restore puppet hieradata to working directory
|
||||
command: "tar -C {{ hieradata_workdir }} -xpf {{ platform_backup_fqpn }} \
|
||||
--overwrite --transform='s,.*/,,' \
|
||||
opt/platform/puppet/{{ software_version }}/hieradata"
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Create puppet hieradata runtime configuration
|
||||
copy:
|
||||
dest: "{{ hieradata_workdir }}/runtime.yaml"
|
||||
content: |
|
||||
platform::network::mgmt::params::subnet_version: 4
|
||||
platform::network::mgmt::params::controller0_address: 127.0.0.1
|
||||
platform::network::mgmt::params::controller1_address: 127.0.0.2
|
||||
force: yes
|
||||
|
||||
- name: Applying puppet restore manifest
|
||||
command: >-
|
||||
/usr/local/bin/puppet-manifest-apply.sh
|
||||
{{ hieradata_workdir }}
|
||||
localhost
|
||||
controller
|
||||
restore
|
||||
{{ hieradata_workdir }}/runtime.yaml
|
||||
environment:
|
||||
INITIAL_CONFIG_PRIMARY: "true"
|
||||
LC_ALL: "en_US.UTF-8"
|
||||
|
||||
# TODO(outbrito): puppet sets permission to 750, not sure why...
|
||||
- name: Set /opt/backups to 755 so postgres can read it
|
||||
file:
|
||||
path: "/opt/backups"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Create device image filesystem paths
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
loop:
|
||||
- /opt/platform/device_images
|
||||
- /var/www/pages/device_images
|
||||
|
||||
- name: Create device image bind mount
|
||||
command: "mount -o bind -t ext4 /opt/platform/device_images /var/www/pages/device_images"
|
||||
|
||||
- name: Restore configuration files
|
||||
command: "tar -C / -xpf {{ platform_backup_fqpn }} --overwrite {{ item }}"
|
||||
loop:
|
||||
- etc/barbican
|
||||
- etc/containerd
|
||||
- etc/default
|
||||
- etc/docker
|
||||
- etc/docker-distribution
|
||||
- etc/drbd.d
|
||||
- etc/etcd
|
||||
- etc/haproxy
|
||||
- etc/hosts
|
||||
- etc/keystone
|
||||
- etc/kubernetes
|
||||
- etc/pki
|
||||
- etc/platform/openrc
|
||||
- etc/resolv.conf
|
||||
- etc/ssl
|
||||
- etc/sysinv
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Update boot loader configuration
|
||||
command: "{{ grub_mkconfig }} -o /boot/grub2/grub.cfg"
|
||||
|
||||
- name: Determine network configuration files
|
||||
find:
|
||||
paths: "{{ network_scripts_location }}"
|
||||
patterns: "ifcfg-*"
|
||||
register: network_files_to_delete
|
||||
|
||||
- name: Remove network configuration files
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ network_files_to_delete.files }}"
|
||||
|
||||
- name: Restore network configuration files
|
||||
command: "tar -C / -xpf {{ platform_backup_fqpn }} --overwrite --wildcards {{ network_scripts_location_bkp }}/*"
|
||||
|
||||
- name: Restore profile files
|
||||
command: "tar -C / -xpf {{ platform_backup_fqpn }} --overwrite {{ item }}"
|
||||
loop:
|
||||
- "etc/profile.d/kubeconfig.sh"
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Restore ldap data
|
||||
import_role:
|
||||
name: backup-restore/restore-ldap
|
||||
|
||||
- name: Restore etcd snapshot
|
||||
import_role:
|
||||
name: backup-restore/restore-etcd
|
||||
|
||||
- name: Restore Postgres
|
||||
import_role:
|
||||
name: backup-restore/restore-postgres
|
||||
|
||||
- name: Restore persistent configuration
|
||||
command: "tar -C / -xpf {{ platform_backup_fqpn }} --overwrite {{ item }}"
|
||||
loop:
|
||||
- opt/patching
|
||||
- opt/platform
|
||||
- opt/extension
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Check archived kubelet dir
|
||||
shell: "tar -tf {{ platform_backup_fqpn }} | grep 'var/lib/kubelet'"
|
||||
args:
|
||||
warn: false
|
||||
register: kubelet_dir_result
|
||||
|
||||
- name: Restore kubelet configuration
|
||||
command: "tar -C / -xpf {{ platform_backup_fqpn }} --overwrite var/lib/kubelet/"
|
||||
args:
|
||||
warn: false
|
||||
when: kubelet_dir_result.rc == 0
|
||||
|
||||
- name: Restore kubelet pmond configuration file
|
||||
command: "tar -C / -xpf {{ platform_backup_fqpn }} --overwrite {{ item }}"
|
||||
loop:
|
||||
- etc/pmon.d/kubelet.conf
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
||||
- name: Restore container registry filesystem
|
||||
command: "tar -C / -xpf {{ registry_backup_fqpn }} --overwrite var/lib/docker-distribution/"
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Check home dir for CentOS
|
||||
block:
|
||||
|
||||
- name: Check if home was backed up
|
||||
shell: "tar -tf {{ platform_backup_fqpn }} | grep -E '^home\\/'"
|
||||
args:
|
||||
warn: false
|
||||
register: home_dir_result
|
||||
|
||||
- name: Restore home directory
|
||||
command: "tar -C / -xpf {{ platform_backup_fqpn }} --overwrite home/"
|
||||
args:
|
||||
warn: false
|
||||
when: home_dir_result.rc == 0
|
||||
|
||||
when: os_release == "centos"
|
||||
|
||||
- name: Check home dir for Debian
|
||||
block:
|
||||
|
||||
- name: Check if home was backed up
|
||||
shell: "tar -tf {{ platform_backup_fqpn }} | grep 'var/home/'"
|
||||
args:
|
||||
warn: false
|
||||
register: home_dir_result
|
||||
|
||||
- name: Restore home directory
|
||||
command: "tar -C / -xpf {{ platform_backup_fqpn }} --overwrite var/home/"
|
||||
args:
|
||||
warn: false
|
||||
when: home_dir_result.rc == 0
|
||||
|
||||
when: os_release == "debian"
|
||||
|
||||
- name: Lookup controller host address
|
||||
command: "gethostip -d controller"
|
||||
register: host_lookup
|
||||
|
||||
- name: Define controller host address
|
||||
set_fact:
|
||||
controller_address: "{{ host_lookup.stdout_lines[0] }}"
|
||||
|
||||
- name: Configure controller host address
|
||||
command: "ip addr add {{ controller_address }} dev lo scope host"
|
||||
|
||||
- name: Disable local registry authentication
|
||||
command: "sed -i '/auth:/,$d' /etc/docker-distribution/registry/config.yml"
|
||||
|
||||
- name: Start docker registry service
|
||||
systemd:
|
||||
name: "{{ docker_registry_service }}"
|
||||
state: restarted
|
||||
|
||||
- name: Start containerd service
|
||||
systemd:
|
||||
name: containerd
|
||||
state: restarted
|
||||
|
||||
- name: Pull kubernetes local container images
|
||||
command: "crictl pull registry.local:9001/{{ item }}"
|
||||
loop:
|
||||
- k8s.gcr.io/kube-apiserver:v1.23.1
|
||||
- k8s.gcr.io/kube-scheduler:v1.23.1
|
||||
- k8s.gcr.io/kube-controller-manager:v1.23.1
|
||||
- k8s.gcr.io/coredns/coredns:v1.8.6
|
||||
|
||||
# restore-more-data/tasks/main.yml#459
|
||||
# Set all the hosts including controller-0 to locked/disabled/offline state.
|
||||
# After the services are restarted, mtce will update controller-0 to
|
||||
# locked/disabled/online state. Setting controller-0 to offline state now
|
||||
# will ensure that keystone, sysinv and mtcAgent are indeed in-service after being restated.
|
||||
- name: Set all the hosts to locked/disabled/offline state
|
||||
shell: >-
|
||||
psql -c "update i_host set administrative='locked', operational='disabled',
|
||||
availability='offline'" sysinv
|
||||
become_user: postgres
|
||||
|
||||
# NOTE(outbrito): If I leave the task below like this, sm comes up as part of the restore and
|
||||
# brings drbd up once the node reboots, then I had to enable/start kubelet manually. I also had
|
||||
# to bounce drbd since after the snapshot restore, drbd doesn't get the restored data promptly
|
||||
# I think there is some kind of caching involved
|
||||
- name: Restore complete, set flags
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: touch
|
||||
loop:
|
||||
- /var/run/.ansible_bootstrap # bootstrap/prepare-env/tasks/main.yml#614
|
||||
- /etc/platform/.initial_k8s_config_complete # bringup_kubemaster.yml#L429
|
||||
- /etc/platform/.initial_config_complete # sm will restart after a while
|
||||
@@ -4,30 +4,12 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
|
||||
vars_files:
|
||||
- vars/common/main.yml
|
||||
- host_vars/backup-restore/default.yml
|
||||
|
||||
roles:
|
||||
- common/prepare-env
|
||||
- restore-platform/precheck
|
||||
- common/validate-target
|
||||
- restore-platform/prepare-env
|
||||
- restore-platform/restore-sw-patches
|
||||
|
||||
- name: Run bootstrap playbook with restore mode
|
||||
import_playbook: bootstrap.yml mode='restore'
|
||||
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
|
||||
vars_files:
|
||||
- host_vars/bootstrap/default.yml
|
||||
- host_vars/backup-restore/default.yml
|
||||
|
||||
roles:
|
||||
- common/prepare-env
|
||||
- { role: restore-platform/restore-more-data, become: yes }
|
||||
- name: "Running restore playbook {{ restore_playbook }}"
|
||||
import_playbook: "{{ restore_playbook }}"
|
||||
vars:
|
||||
restore_mode: "old"
|
||||
restore_playbook: "{{ 'restore_optimized.yml' if restore_mode == 'optimized' else 'restore_platform_old.yml' }}"
|
||||
|
||||
34
playbookconfig/src/playbooks/restore_platform_old.yml
Normal file
34
playbookconfig/src/playbooks/restore_platform_old.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
|
||||
vars_files:
|
||||
- vars/common/main.yml
|
||||
- host_vars/backup-restore/default.yml
|
||||
|
||||
roles:
|
||||
- common/prepare-env
|
||||
- restore-platform/precheck
|
||||
- common/validate-target
|
||||
- backup-restore/validate-input
|
||||
- restore-platform/prepare-env
|
||||
- restore-platform/restore-sw-patches
|
||||
|
||||
- name: Run bootstrap playbook with restore mode
|
||||
import_playbook: bootstrap.yml mode='restore'
|
||||
|
||||
- hosts: all
|
||||
gather_facts: no
|
||||
|
||||
vars_files:
|
||||
- host_vars/bootstrap/default.yml
|
||||
- host_vars/backup-restore/default.yml
|
||||
|
||||
roles:
|
||||
- common/prepare-env
|
||||
- { role: restore-platform/restore-more-data, become: yes }
|
||||
@@ -11,6 +11,7 @@
|
||||
- host_vars/backup-restore/default.yml
|
||||
|
||||
roles:
|
||||
- restore-user-images/validate-input
|
||||
- { role: common/prepare-env }
|
||||
- { role: restore-user-images/prepare-env, become: yes }
|
||||
- { role: restore-user-images/restore-local-registry-images, become: yes,
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASK DESCRIPTION:
|
||||
# These tasks restore the etcd database.
|
||||
|
||||
- set_fact:
|
||||
etcd_tmp_dir: "{{ staging_dir }}/etcd_tmp_dir"
|
||||
|
||||
- block:
|
||||
- name: Check if temporary etcd folder exists
|
||||
stat:
|
||||
@@ -28,35 +31,29 @@
|
||||
|
||||
- name: Extract etcd database backup to temporary folder
|
||||
command: >-
|
||||
tar -C {{ etcd_tmp_dir }} -xpf {{ restore_data_file }} --wildcards
|
||||
tar -C {{ etcd_tmp_dir }} -xpf {{ platform_backup_fqpn }} --wildcards
|
||||
--transform='s,.*/,,' '*/etcd-snapshot.db'
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Remove old etcd database
|
||||
file:
|
||||
path: "/opt/etcd/{{ software_version }}/controller.etcd"
|
||||
state: absent
|
||||
|
||||
- name: Restore etcd database from snapshot file to temp folder
|
||||
command: etcdctl snapshot restore etcd-snapshot.db --endpoints=controller:2379
|
||||
command: >-
|
||||
etcdctl snapshot restore etcd-snapshot.db --endpoints=controller:2379
|
||||
--data-dir /opt/etcd/{{ software_version }}/controller.etcd
|
||||
args:
|
||||
chdir: "{{ etcd_tmp_dir }}"
|
||||
environment:
|
||||
ETCDCTL_API: 3
|
||||
|
||||
- name: Stop etcd
|
||||
service:
|
||||
- name: Enable etcd
|
||||
systemd:
|
||||
name: etcd
|
||||
state: stopped
|
||||
|
||||
- name: Remove old etcd database
|
||||
file:
|
||||
path: "/opt/etcd/{{ software_version }}/controller.etcd/member"
|
||||
state: absent
|
||||
|
||||
- name: Move etcd database from temp folder to persistent storage
|
||||
command: mv "{{ etcd_tmp_dir }}/default.etcd/member" "/opt/etcd/{{ software_version }}/controller.etcd/"
|
||||
|
||||
- name: Start etcd
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Remove temporary etcd folder
|
||||
file:
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# The staging area to process the data from the backup tarball
|
||||
staging_dir: /opt/backups
|
||||
@@ -0,0 +1,83 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASK DESCRIPTION:
|
||||
# The tasks below restore the ldap database and configuration files.
|
||||
|
||||
- name: Set parameters for ldap different paths by OS
|
||||
set_fact:
|
||||
ldap_schema_path: "{{ '/etc/openldap/schema' if os_release == 'centos' else '/etc/ldap/schema' }}"
|
||||
|
||||
- name: Check if CentOS openldap configuration is included in the backup
|
||||
shell: "tar -tf {{ platform_backup_fqpn }} | grep -E 'etc\\/openldap\\/.*'"
|
||||
args:
|
||||
warn: false
|
||||
failed_when: false
|
||||
register: bkp_has_centos_ldap_config
|
||||
|
||||
- block:
|
||||
- block:
|
||||
- name: Restore openldap configuration
|
||||
command: tar -C / -xpf {{ platform_backup_fqpn }} --wildcards --overwrite etc/openldap/*
|
||||
args:
|
||||
warn: false
|
||||
|
||||
# TODO (heitormatsui): remove when Centos -> Debian upgrade support become deprecated
|
||||
- block:
|
||||
- name: Copy openldap configuration to staging directory
|
||||
command: cp -Rf /etc/openldap/ {{ staging_dir }}/openldap
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Replace Centos paths with Debian paths on files
|
||||
shell: >-
|
||||
find {{ staging_dir }}/openldap/schema/ -type f | xargs sed -i
|
||||
"s#/usr/lib64/openldap#/usr/lib/ldap#;
|
||||
s#/var/lib/openldap-data#/var/lib/ldap#;
|
||||
s#/etc/openldap#/etc/ldap#"
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Copy /etc/openldap configuration to /etc/ldap if restoring on Debian
|
||||
command: cp -Rf {{ staging_dir }}/openldap/. /etc/ldap
|
||||
when: os_release == "debian"
|
||||
when: bkp_has_centos_ldap_config.rc == 0
|
||||
|
||||
- block:
|
||||
- name: Check if Debian ldap configuration is included in the backup
|
||||
shell: "tar -tf {{ platform_backup_fqpn }} | grep -E 'etc\\/ldap\\/.*'"
|
||||
args:
|
||||
warn: false
|
||||
failed_when: false
|
||||
register: bkp_has_debian_ldap_config
|
||||
|
||||
- name: Restore ldap configuration
|
||||
command: tar -C / -xpf {{ platform_backup_fqpn }} --wildcards --overwrite etc/ldap/*
|
||||
args:
|
||||
warn: false
|
||||
when: bkp_has_debian_ldap_config.rc == 0
|
||||
when: bkp_has_centos_ldap_config.rc != 0
|
||||
|
||||
- name: Extract ldap.db to staging directory
|
||||
command: >-
|
||||
tar -C {{ staging_dir }} -xpf {{ platform_backup_fqpn }} --wildcards
|
||||
--transform='s,.*/,,' '*/ldap.db'
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Restore ldap
|
||||
command: "slapadd -F {{ ldap_schema_path }} -l {{ staging_dir }}/ldap.db"
|
||||
register: slapadd_result
|
||||
failed_when: slapadd_result.rc != 0 and slapadd_result.rc != 1
|
||||
|
||||
always:
|
||||
- name: Delete files from staging dir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- "{{ staging_dir }}/ldap.db"
|
||||
- "{{ staging_dir }}/openldap"
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# The staging area to process the data from the backup tarball
|
||||
staging_dir: /opt/backups
|
||||
@@ -0,0 +1,57 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASK DESCRIPTION:
|
||||
# The tasks below restore the postgres database schema and data.
|
||||
|
||||
- set_fact:
|
||||
postgres_staging_dir: "{{ staging_dir }}/postgres"
|
||||
|
||||
- block:
|
||||
- name: Create staging directory for postgres data
|
||||
file:
|
||||
path: "{{ postgres_staging_dir }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Extract postgres db to staging directory
|
||||
command: >-
|
||||
tar -C {{ staging_dir }}/postgres -xpf {{ platform_backup_fqpn }}
|
||||
--wildcards --transform='s,.*/,,' '*/*\.postgreSql\.*'
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Determine which postgresql database files exist
|
||||
stat:
|
||||
path: "{{ postgres_staging_dir }}/{{ item }}"
|
||||
with_items:
|
||||
- "postgres.postgreSql.config"
|
||||
- "postgres.postgreSql.data"
|
||||
- "template1.postgreSql.data"
|
||||
- "sysinv.postgreSql.data"
|
||||
- "keystone.postgreSql.data"
|
||||
- "fm.postgreSql.data"
|
||||
- "barbican.postgreSql.data"
|
||||
- "helmv2.postgreSql.data"
|
||||
register: pgfiles
|
||||
|
||||
- name: Restore postgres db
|
||||
shell: "psql -f {{ item.stat.path }} {{ item.item.split('.')[0] }}"
|
||||
become_user: postgres
|
||||
with_items: "{{ pgfiles.results }}"
|
||||
when: item.stat.exists
|
||||
|
||||
- import_tasks: restore-postgres-dc.yml
|
||||
|
||||
- name: Remove postgres staging directory
|
||||
file:
|
||||
path: "{{ postgres_staging_dir }}"
|
||||
state: absent
|
||||
|
||||
when: migrate_platform_data is undefined or not migrate_platform_data
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# SUB-TASK DESCRIPTION:
|
||||
# The tasks below restore the postgres Distributed Cloud related data.
|
||||
|
||||
- name: Check if the system is a DC controller
|
||||
command: >-
|
||||
grep -i "distributed_cloud_role\s*=\s*systemcontroller"
|
||||
/etc/platform/platform.conf
|
||||
register: check_dc_controller
|
||||
failed_when: false
|
||||
|
||||
- name: Restore postgres for DC
|
||||
block:
|
||||
|
||||
- name: Determine which postgresql database files exist
|
||||
stat:
|
||||
path: "{{ postgres_staging_dir }}/{{ item }}"
|
||||
with_items:
|
||||
- "dcmanager.postgreSql.data"
|
||||
- "dcorch.postgreSql.data"
|
||||
register: pgfiles_dc
|
||||
|
||||
- name: Restore postgres db for DC systemcontroller
|
||||
shell: "psql -f {{ item.stat.path }} {{ item.item.split('.')[0] }}"
|
||||
become_user: postgres
|
||||
with_items: "{{ pgfiles_dc.results }}"
|
||||
when: item.stat.exists
|
||||
|
||||
when: check_dc_controller.rc == 0
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# The staging area to process the data from the backup tarball
|
||||
staging_dir: /opt/backups
|
||||
@@ -0,0 +1,39 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role stages the backup archives for later usage.
|
||||
|
||||
- name: Transfer backup tarballs to target if the file is off-box
|
||||
include_role:
|
||||
name: backup-restore/transfer-file
|
||||
when: on_box_data|bool == false
|
||||
|
||||
- name: Copy the backup tarballs to {{ target_backup_dir }} if the file is already on-box
|
||||
become: yes
|
||||
block:
|
||||
|
||||
- copy:
|
||||
src: "{{ initial_backup_dir }}/{{ backup_filename }}"
|
||||
dest: "{{ target_backup_dir }}"
|
||||
remote_src: yes
|
||||
|
||||
- copy:
|
||||
src: "{{ initial_backup_dir }}/{{ registry_backup_filename }}"
|
||||
dest: "{{ target_backup_dir }}"
|
||||
remote_src: yes
|
||||
when: registry_backup_filename is defined
|
||||
|
||||
when: on_box_data|bool == true
|
||||
|
||||
- name: Set image platform backup fqpn
|
||||
set_fact:
|
||||
platform_backup_fqpn: "{{ target_backup_dir }}/{{ backup_filename }}"
|
||||
|
||||
- name: Set image registry backup fqpn
|
||||
set_fact:
|
||||
registry_backup_fqpn: "{{ target_backup_dir }}/{{ registry_backup_filename }}"
|
||||
when: registry_backup_filename is defined
|
||||
@@ -18,6 +18,17 @@
|
||||
inspection_target: localhost
|
||||
when: on_box_data|bool == false
|
||||
|
||||
# roles/bootstrap/validate-config/tasks/main.yml#L877
|
||||
# Note that due to Ansible mishandling of boolean values via extra-vars we are
|
||||
# adding supplementary validation here.
|
||||
# See: https://github.com/ansible/ansible/issues/17193
|
||||
- name: Check for Ceph data wipe flag
|
||||
fail:
|
||||
msg: "wipe_ceph_osds is misconfigured. Valid value is either 'true' or 'false'"
|
||||
when: (not wipe_ceph_osds | type_debug == 'bool') and
|
||||
(wipe_ceph_osds != 'true') and
|
||||
(wipe_ceph_osds != 'false')
|
||||
|
||||
- block:
|
||||
- name: Fail if backup_filename is not defined or set
|
||||
fail:
|
||||
@@ -53,7 +64,13 @@
|
||||
when: (on_box_data|bool == false)
|
||||
when: not backup_stat_result.stat.exists
|
||||
|
||||
- name: Verify if registry image backup is provided
|
||||
|
||||
- name: If optimized restore, verify if registry filesystem backup was provided
|
||||
fail:
|
||||
msg: For optimized restore, you must provide 'registry_backup_filename'
|
||||
when: restore_mode == 'optimized' and registry_backup_filename is not defined
|
||||
|
||||
- name: Verify if image registry backup is provided
|
||||
block:
|
||||
- set_fact:
|
||||
registry_backup_initial_path: "{{ initial_backup_dir }}/{{ registry_backup_filename }}"
|
||||
@@ -97,6 +114,6 @@
|
||||
when: (on_box_data|bool == false)
|
||||
when: not ssl_ca_certificate_stat_result.stat.exists
|
||||
|
||||
when: ssl_ca_certificate_file is not none
|
||||
when: ssl_ca_certificate_file is defined and ssl_ca_certificate_file is not none
|
||||
|
||||
delegate_to: "{{ inspection_target }}"
|
||||
|
||||
@@ -240,8 +240,21 @@
|
||||
- "{{ encryption_provider_config | regex_replace('^\\/', '') }}"
|
||||
become_user: root
|
||||
|
||||
- name: Stop etcd
|
||||
service:
|
||||
name: etcd
|
||||
state: stopped
|
||||
|
||||
- name: Restore etcd database
|
||||
import_tasks: restore_etcd.yml
|
||||
include_role:
|
||||
name: backup-restore/restore-etcd
|
||||
vars:
|
||||
platform_backup_fqpn: "{{ restore_data_file }}"
|
||||
|
||||
- name: Start etcd
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
|
||||
when: mode == 'restore'
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
dest: "{{ temp_ssl_ca_dir }}/{{ temp_ssl_ca_file }}"
|
||||
when: (on_box_data is defined) and (on_box_data|bool == false)
|
||||
|
||||
when: ssl_ca_certificate_file is defined
|
||||
when: ssl_ca_certificate_file is defined and ssl_ca_certificate_file is not none
|
||||
|
||||
- name: Extract ssl_ca certificate from backup archive
|
||||
command: >-
|
||||
@@ -101,7 +101,7 @@
|
||||
args:
|
||||
warn: false
|
||||
failed_when: false
|
||||
when: ssl_ca_certificate_file is not defined
|
||||
when: ssl_ca_certificate_file is not defined or ssl_ca_certificate_file is none
|
||||
|
||||
- name: Check that ssl_ca certificate exists
|
||||
stat:
|
||||
|
||||
@@ -14,19 +14,6 @@
|
||||
# parameters in memory.
|
||||
# 5. Create restore_in_progress flag.
|
||||
#
|
||||
# Note that due to Ansible mishandling of boolean values via extra-vars we are
|
||||
# adding supplementary validation here
|
||||
# See: https://github.com/ansible/ansible/issues/17193
|
||||
- name: Check for Ceph data wipe flag
|
||||
fail:
|
||||
msg: "wipe_ceph_osds is misconfigured. Valid value is either 'true' or 'false'"
|
||||
when: (not wipe_ceph_osds | type_debug == 'bool') and
|
||||
(wipe_ceph_osds != 'true') and
|
||||
(wipe_ceph_osds != 'false')
|
||||
|
||||
- name: Perform generic user input validation for restore
|
||||
import_role:
|
||||
name: backup-restore/validate-input
|
||||
|
||||
- block:
|
||||
- name: Look for override backup file in the backup tarball
|
||||
@@ -96,31 +83,9 @@
|
||||
set_fact:
|
||||
restore_data_file: "{{ target_backup_dir }}/{{ backup_filename }}"
|
||||
|
||||
- name: Transfer backup tarballs to target if the file is off-box
|
||||
- name: Stage backup tarballs
|
||||
include_role:
|
||||
name: backup-restore/transfer-file
|
||||
when: on_box_data|bool == false
|
||||
|
||||
- name: Copy the backup tarballs to {{ target_backup_dir }} if the file is already on-box
|
||||
block:
|
||||
|
||||
- copy:
|
||||
src: "{{ initial_backup_dir }}/{{ backup_filename }}"
|
||||
dest: "{{ target_backup_dir }}"
|
||||
remote_src: yes
|
||||
|
||||
- copy:
|
||||
src: "{{ initial_backup_dir }}/{{ registry_backup_filename }}"
|
||||
dest: "{{ target_backup_dir }}"
|
||||
remote_src: yes
|
||||
when: registry_backup_filename is defined
|
||||
|
||||
when: on_box_data|bool == true
|
||||
|
||||
- name: Set image registry backup fqpn
|
||||
set_fact:
|
||||
registry_backup_fqpn: "{{ target_backup_dir }}/{{ registry_backup_filename }}"
|
||||
when: registry_backup_filename is defined
|
||||
name: backup-restore/stage-backup-archives
|
||||
|
||||
- name: Extract override file from backup tarball
|
||||
command: >
|
||||
|
||||
@@ -32,7 +32,6 @@
|
||||
|
||||
- name: Set parameters for ldap different paths by OS
|
||||
set_fact:
|
||||
ldap_schema_path: "{{ '/etc/openldap/schema' if os_release == 'centos' else '/etc/ldap/schema' }}"
|
||||
ldap_permdir: "{{ ldap_permdir_centos if os_release == 'centos' else ldap_permdir_debian }}"
|
||||
|
||||
# User postgres needs access files in this folder during restore
|
||||
@@ -248,13 +247,6 @@
|
||||
warn: false
|
||||
when: migrate_platform_data is not defined or not migrate_platform_data
|
||||
|
||||
- name: Extract ldap.db to staging directory
|
||||
command: >-
|
||||
tar -C {{ staging_dir }} -xpf {{ restore_data_file }} --wildcards
|
||||
--transform='s,.*/,,' '*/ldap.db'
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Stop openldap service
|
||||
shell: "export SYSTEMCTL_SKIP_REDIRECT=1; /etc/init.d/openldap stop"
|
||||
|
||||
@@ -274,68 +266,14 @@
|
||||
|
||||
- name: Restore LDAP configuration
|
||||
block:
|
||||
- name: Check if CentOS openldap configuration is included in the backup
|
||||
shell: "tar -tf {{ restore_data_file }} | grep -E 'etc\\/openldap\\/.*'"
|
||||
args:
|
||||
warn: false
|
||||
failed_when: false
|
||||
register: bkp_has_centos_ldap_config
|
||||
|
||||
- block:
|
||||
- name: Restore openldap configuration
|
||||
command: tar -C / -xpf {{ restore_data_file }} --wildcards --overwrite etc/openldap/*
|
||||
args:
|
||||
warn: false
|
||||
|
||||
# TODO (heitormatsui): remove when Centos -> Debian upgrade support become deprecated
|
||||
- block:
|
||||
- name: Copy openldap configuration to staging directory
|
||||
command: cp -Rf /etc/openldap/ {{ staging_dir }}/openldap
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Replace Centos paths with Debian paths on files
|
||||
shell: >-
|
||||
find {{ staging_dir }}/openldap/schema/ -type f | xargs sed -i
|
||||
"s#/usr/lib64/openldap#/usr/lib/ldap#;
|
||||
s#/var/lib/openldap-data#/var/lib/ldap#;
|
||||
s#/etc/openldap#/etc/ldap#"
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Copy /etc/openldap configuration to /etc/ldap if restoring on Debian
|
||||
command: cp -Rf {{ staging_dir }}/openldap/. /etc/ldap
|
||||
when: os_release == "debian"
|
||||
when: bkp_has_centos_ldap_config.rc == 0
|
||||
|
||||
- block:
|
||||
- name: Check if Debian ldap configuration is included in the backup
|
||||
shell: "tar -tf {{ restore_data_file }} | grep -E 'etc\\/ldap\\/.*'"
|
||||
args:
|
||||
warn: false
|
||||
failed_when: false
|
||||
register: bkp_has_debian_ldap_config
|
||||
|
||||
- name: Restore ldap configuration
|
||||
command: tar -C / -xpf {{ restore_data_file }} --wildcards --overwrite etc/ldap/*
|
||||
args:
|
||||
warn: false
|
||||
when: bkp_has_debian_ldap_config.rc == 0
|
||||
when: bkp_has_centos_ldap_config.rc != 0
|
||||
|
||||
- name: Restore ldap
|
||||
command: "slapadd -F {{ ldap_schema_path }} -l {{ staging_dir }}/ldap.db"
|
||||
- name: Restore ldap data
|
||||
import_role:
|
||||
name: backup-restore/restore-ldap
|
||||
vars:
|
||||
platform_backup_fqpn: "{{ restore_data_file }}"
|
||||
|
||||
- name: Start openldap service
|
||||
shell: "export SYSTEMCTL_SKIP_REDIRECT=1; /etc/init.d/openldap start"
|
||||
always:
|
||||
- name: Delete files from staging dir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- "{{ staging_dir }}/ldap.db"
|
||||
- "{{ staging_dir }}/openldap"
|
||||
|
||||
# In order to determine the home dir pattern of the backed up data
|
||||
# we only check var/home and use that to know how to extract.
|
||||
@@ -511,50 +449,11 @@
|
||||
- sysinv-agent
|
||||
- "{{ 'barbican-api' if os_release == 'debian' else 'openstack-barbican-api' }}"
|
||||
|
||||
- block:
|
||||
- name: Create staging directory for postgres data
|
||||
file:
|
||||
path: "{{ staging_dir }}/postgres"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Extract postgres db to staging directory
|
||||
command: >-
|
||||
tar -C {{ staging_dir }}/postgres -xpf {{ restore_data_file }}
|
||||
--wildcards --transform='s,.*/,,' '*/*\.postgreSql\.*'
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: Restore postgres db
|
||||
shell: "psql -f {{ item }} {{ (item|basename).split('.')[0] }}"
|
||||
become_user: postgres
|
||||
with_items:
|
||||
- "{{ staging_dir }}/postgres/postgres.postgreSql.config"
|
||||
- "{{ staging_dir }}/postgres/postgres.postgreSql.data"
|
||||
- "{{ staging_dir }}/postgres/template1.postgreSql.data"
|
||||
- "{{ staging_dir }}/postgres/sysinv.postgreSql.data"
|
||||
- "{{ staging_dir }}/postgres/keystone.postgreSql.data"
|
||||
- "{{ staging_dir }}/postgres/fm.postgreSql.data"
|
||||
- "{{ staging_dir }}/postgres/barbican.postgreSql.data"
|
||||
- "{{ staging_dir }}/postgres/helmv2.postgreSql.data"
|
||||
|
||||
- name: Restore postgres db for DC systemcontroller
|
||||
shell: "psql -f {{ item }} {{ (item|basename).split('.')[0] }}"
|
||||
become_user: postgres
|
||||
with_items:
|
||||
- "{{ staging_dir }}/postgres/dcmanager.postgreSql.data"
|
||||
- "{{ staging_dir }}/postgres/dcorch.postgreSql.data"
|
||||
when: check_dc_controller.rc == 0
|
||||
|
||||
- name: Remove postgres staging directory
|
||||
file:
|
||||
path: "{{ staging_dir }}/postgres"
|
||||
state: absent
|
||||
|
||||
when: migrate_platform_data is not defined or not migrate_platform_data
|
||||
- name: Restore Postgres
|
||||
import_role:
|
||||
name: backup-restore/restore-postgres
|
||||
vars:
|
||||
platform_backup_fqpn: "{{ restore_data_file }}"
|
||||
|
||||
- block:
|
||||
- name: Check if fernet keys directory exists in the backup tarball
|
||||
|
||||
@@ -6,16 +6,10 @@
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role performs the following tasks:
|
||||
# 1. Validate user input.
|
||||
# 2. Verify that the target is in the right state for images restore.
|
||||
# 3. Transfer the backup tarball to the target if it is off-box, otherwise
|
||||
# 1. Verify that the target is in the right state for images restore.
|
||||
# 2. Transfer the backup tarball to the target if it is off-box, otherwise
|
||||
# copy it to the designated staging directory (/opt/platform-backup).
|
||||
# 5. Create restore_in_progress flag.
|
||||
|
||||
- name: Perform generic user input validation for restore
|
||||
import_role:
|
||||
name: backup-restore/validate-input
|
||||
become: no
|
||||
# 3. Create restore_in_progress flag.
|
||||
|
||||
- name: Check if restoring user images is already in progress
|
||||
stat:
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
---
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# ROLE DESCRIPTION:
|
||||
# This role validates user input to the restore_user_images playbook
|
||||
#
|
||||
- name: Set default target where backup tarball inspection takes place
|
||||
set_fact:
|
||||
inspection_target: "{{ inventory_hostname }}"
|
||||
|
||||
# Set inspection target to Ansible control machine if the backup tarball
|
||||
# is off-box.
|
||||
- name: Update target if backup data are off-box
|
||||
set_fact:
|
||||
inspection_target: localhost
|
||||
when: on_box_data|bool == false
|
||||
|
||||
- block:
|
||||
- name: Fail if backup_filename is not defined or set
|
||||
fail:
|
||||
msg: "Mandatory configuration parameter backup_filename is not defined or set."
|
||||
when: backup_filename is not defined or backup_filename is none
|
||||
|
||||
- name: Fail if the backup file is off-box and initial_backup_dir is not specified
|
||||
fail:
|
||||
msg: "Parameter initial_backup_dir must be specified if the backup tar file is off box."
|
||||
when: (initial_backup_dir is not defined or initial_backup_dir is none) and
|
||||
(on_box_data|bool == false)
|
||||
|
||||
- name: Set the initial_backup_dir to /opt/platform-backup if not specified and backup file is on the host
|
||||
set_fact:
|
||||
initial_backup_dir: /opt/platform-backup
|
||||
when: (initial_backup_dir is not defined or initial_backup_dir is none) and
|
||||
(on_box_data|bool == true)
|
||||
|
||||
- name: Check if backup file exists
|
||||
stat:
|
||||
path: "{{ initial_backup_dir }}/{{ backup_filename }}"
|
||||
register: backup_stat_result
|
||||
|
||||
- block:
|
||||
- name: Fail if backup file does not exist on the target
|
||||
fail:
|
||||
msg: "Backup file {{ initial_backup_dir }}/{{ backup_filename }} does not exist on the target."
|
||||
when: (on_box_data|bool == true)
|
||||
|
||||
- name: Fail if the backup file does not exist locally
|
||||
fail:
|
||||
msg: "Backup file {{ initial_backup_dir }}/{{ backup_filename }} does not exist on this machine."
|
||||
when: (on_box_data|bool == false)
|
||||
when: not backup_stat_result.stat.exists
|
||||
|
||||
delegate_to: "{{ inspection_target }}"
|
||||
Reference in New Issue
Block a user