Add HAcluster Ansible role
Adds HAcluster Ansible role. This role contains High Availability clustering solution composed of Corosync, Pacemaker and Pacemaker Remote. HAcluster is added as a helper role for Masakari which requires it for its host monitoring, allowing to provide HA to instances on a failed compute host. Kolla hacluster images merged in [1]. [1] https://review.opendev.org/#/c/668765/ Change-Id: I91e5c1840ace8f567daf462c4eb3ec1f0c503823 Implements: blueprint ansible-pacemaker-support Co-Authored-By: Radosław Piliszek <radoslaw.piliszek@gmail.com> Co-Authored-By: Mark Goddard <mark@stackhpc.com>
This commit is contained in:
parent
0b0dd35837
commit
9f578c85e0
@ -599,6 +599,7 @@ enable_freezer: "no"
|
||||
enable_gnocchi: "no"
|
||||
enable_gnocchi_statsd: "no"
|
||||
enable_grafana: "no"
|
||||
enable_hacluster: "no"
|
||||
enable_heat: "{{ enable_openstack_core | bool }}"
|
||||
enable_horizon: "{{ enable_openstack_core | bool }}"
|
||||
enable_horizon_blazar: "{{ enable_blazar | bool }}"
|
||||
|
@ -69,6 +69,12 @@ storage
|
||||
[elasticsearch:children]
|
||||
control
|
||||
|
||||
[hacluster:children]
|
||||
control
|
||||
|
||||
[hacluster-remote:children]
|
||||
compute
|
||||
|
||||
[haproxy:children]
|
||||
network
|
||||
|
||||
|
@ -93,6 +93,12 @@ storage
|
||||
[elasticsearch:children]
|
||||
control
|
||||
|
||||
[hacluster:children]
|
||||
control
|
||||
|
||||
[hacluster-remote:children]
|
||||
compute
|
||||
|
||||
[haproxy:children]
|
||||
network
|
||||
|
||||
|
@ -182,6 +182,7 @@
|
||||
- { name: "glance-tls-proxy", enabled: "{{ glance_enable_tls_backend | bool }}" }
|
||||
- { name: "gnocchi", enabled: "{{ enable_gnocchi | bool }}" }
|
||||
- { name: "grafana", enabled: "{{ enable_grafana | bool }}" }
|
||||
- { name: "hacluster", enabled: "{{ enable_hacluster | bool }}" }
|
||||
- { name: "haproxy", enabled: "{{ enable_haproxy | bool }}" }
|
||||
- { name: "heat", enabled: "{{ enable_heat | bool }}" }
|
||||
- { name: "horizon", enabled: "{{ enable_horizon | bool }}" }
|
||||
|
@ -0,0 +1,3 @@
|
||||
"/var/log/kolla/hacluster/*.log"
|
||||
{
|
||||
}
|
100
ansible/roles/hacluster/defaults/main.yml
Normal file
100
ansible/roles/hacluster/defaults/main.yml
Normal file
@ -0,0 +1,100 @@
|
||||
---
|
||||
project_name: "hacluster"
|
||||
|
||||
hacluster_services:
|
||||
hacluster-corosync:
|
||||
container_name: "hacluster_corosync"
|
||||
group: "hacluster"
|
||||
enabled: true
|
||||
image: "{{ hacluster_corosync_image_full }}"
|
||||
volumes: "{{ hacluster_corosync_default_volumes + hacluster_corosync_extra_volumes }}"
|
||||
ipc_mode: "host"
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
- IPC_LOCK
|
||||
- NET_ADMIN
|
||||
dimensions: "{{ hacluster_corosync_dimensions }}"
|
||||
hacluster-pacemaker:
|
||||
container_name: "hacluster_pacemaker"
|
||||
group: "hacluster"
|
||||
enabled: true
|
||||
image: "{{ hacluster_pacemaker_image_full }}"
|
||||
environment:
|
||||
PCMK_logfile: /var/log/kolla/hacluster/pacemaker.log
|
||||
PCMK_debug: "{{ 'on' if openstack_logging_debug | bool else 'off' }}"
|
||||
volumes: "{{ hacluster_pacemaker_default_volumes + hacluster_pacemaker_extra_volumes }}"
|
||||
ipc_mode: "host"
|
||||
dimensions: "{{ hacluster_pacemaker_dimensions }}"
|
||||
hacluster-pacemaker-remote:
|
||||
container_name: "hacluster_pacemaker_remote"
|
||||
group: "hacluster-remote"
|
||||
enabled: true
|
||||
image: "{{ hacluster_pacemaker_remote_image_full }}"
|
||||
volumes: "{{ hacluster_pacemaker_remote_default_volumes + hacluster_pacemaker_remote_extra_volumes }}"
|
||||
ipc_mode: "host"
|
||||
dimensions: "{{ hacluster_pacemaker_remote_dimensions }}"
|
||||
|
||||
####################
|
||||
# HAProxy
|
||||
####################
|
||||
|
||||
|
||||
####################
|
||||
# Docker
|
||||
####################
|
||||
|
||||
hacluster_tag: "{{ openstack_tag }}"
|
||||
hacluster_corosync_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-hacluster-corosync"
|
||||
hacluster_corosync_tag: "{{ openstack_tag }}"
|
||||
hacluster_corosync_image_full: "{{ hacluster_corosync_image }}:{{ hacluster_corosync_tag }}"
|
||||
|
||||
hacluster_pacemaker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-hacluster-pacemaker"
|
||||
hacluster_pacemaker_tag: "{{ openstack_tag }}"
|
||||
hacluster_pacemaker_image_full: "{{ hacluster_pacemaker_image }}:{{ hacluster_pacemaker_tag }}"
|
||||
|
||||
hacluster_pacemaker_remote_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-hacluster-pacemaker-remote"
|
||||
hacluster_pacemaker_remote_tag: "{{ openstack_tag }}"
|
||||
hacluster_pacemaker_remote_image_full: "{{ hacluster_pacemaker_remote_image }}:{{ hacluster_pacemaker_remote_tag }}"
|
||||
|
||||
hacluster_corosync_dimensions: "{{ default_container_dimensions }}"
|
||||
hacluster_pacemaker_dimensions: "{{ default_container_dimensions }}"
|
||||
hacluster_pacemaker_remote_dimensions: "{{ default_container_dimensions }}"
|
||||
|
||||
hacluster_corosync_default_volumes:
|
||||
- "{{ node_config_directory }}/hacluster-corosync/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "{{ '/etc/timezone:/etc/timezone:ro' if kolla_base_distro in ['debian', 'ubuntu'] else '' }}"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "hacluster_corosync:/var/lib/corosync"
|
||||
hacluster_pacemaker_default_volumes:
|
||||
- "{{ node_config_directory }}/hacluster-pacemaker/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "{{ '/etc/timezone:/etc/timezone:ro' if kolla_base_distro in ['debian', 'ubuntu'] else '' }}"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "hacluster_pacemaker:/var/lib/pacemaker"
|
||||
hacluster_pacemaker_remote_default_volumes:
|
||||
- "{{ node_config_directory }}/hacluster-pacemaker-remote/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "{{ '/etc/timezone:/etc/timezone:ro' if kolla_base_distro in ['debian', 'ubuntu'] else '' }}"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "hacluster_pacemaker_remote:/var/lib/pacemaker"
|
||||
|
||||
hacluster_extra_volumes: "{{ default_extra_volumes }}"
|
||||
hacluster_corosync_extra_volumes: "{{ hacluster_extra_volumes }}"
|
||||
hacluster_pacemaker_extra_volumes: "{{ hacluster_extra_volumes }}"
|
||||
hacluster_pacemaker_remote_extra_volumes: "{{ hacluster_extra_volumes }}"
|
||||
|
||||
|
||||
####################
|
||||
# Corosync options
|
||||
####################
|
||||
|
||||
# this is UDP port
|
||||
hacluster_corosync_port: 5405
|
||||
|
||||
####################
|
||||
# Pacemaker options
|
||||
####################
|
||||
|
||||
# this is TCP port
|
||||
hacluster_pacemaker_remote_port: 3121
|
50
ansible/roles/hacluster/handlers/main.yml
Normal file
50
ansible/roles/hacluster/handlers/main.yml
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: Restart hacluster-corosync container
|
||||
vars:
|
||||
service_name: "hacluster-corosync"
|
||||
service: "{{ hacluster_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
ipc_mode: "{{ service.ipc_mode }}"
|
||||
cap_add: "{{ service.cap_add }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart hacluster-pacemaker container
|
||||
vars:
|
||||
service_name: "hacluster-pacemaker"
|
||||
service: "{{ hacluster_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
environment: "{{ service.environment }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
ipc_mode: "{{ service.ipc_mode }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
||||
|
||||
- name: Restart hacluster-pacemaker-remote container
|
||||
vars:
|
||||
service_name: "hacluster-pacemaker-remote"
|
||||
service: "{{ hacluster_services[service_name] }}"
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "recreate_or_restart_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image }}"
|
||||
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
|
||||
ipc_mode: "{{ service.ipc_mode }}"
|
||||
dimensions: "{{ service.dimensions }}"
|
||||
when:
|
||||
- kolla_action != "config"
|
42
ansible/roles/hacluster/tasks/bootstrap.yml
Normal file
42
ansible/roles/hacluster/tasks/bootstrap.yml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: Ensure config directories exist
|
||||
file:
|
||||
path: "{{ node_custom_config }}/{{ item }}"
|
||||
state: directory
|
||||
delegate_to: localhost
|
||||
changed_when: False
|
||||
check_mode: no
|
||||
run_once: True
|
||||
with_items:
|
||||
- hacluster-corosync
|
||||
- hacluster-pacemaker
|
||||
|
||||
- name: Check if Corosync authkey file exists
|
||||
stat:
|
||||
path: "{{ node_custom_config }}/hacluster-corosync/authkey"
|
||||
delegate_to: localhost
|
||||
run_once: True
|
||||
register: hacluster_corosync_authkey_file
|
||||
|
||||
- name: Check if Pacemaker authkey file exists
|
||||
stat:
|
||||
path: "{{ node_custom_config }}/hacluster-pacemaker/authkey"
|
||||
delegate_to: localhost
|
||||
run_once: True
|
||||
register: hacluster_pacemaker_authkey_file
|
||||
|
||||
- name: Generating Corosync authkey file
|
||||
command: "dd if=/dev/urandom of={{ node_custom_config }}/hacluster-corosync/authkey bs=4096 count=1"
|
||||
delegate_to: localhost
|
||||
changed_when: False
|
||||
check_mode: no
|
||||
run_once: True
|
||||
when: not hacluster_corosync_authkey_file.stat.exists
|
||||
|
||||
- name: Generating Pacemaker authkey file
|
||||
command: "dd if=/dev/urandom of={{ node_custom_config }}/hacluster-pacemaker/authkey bs=4096 count=1"
|
||||
delegate_to: localhost
|
||||
changed_when: False
|
||||
check_mode: no
|
||||
run_once: True
|
||||
when: not hacluster_pacemaker_authkey_file.stat.exists
|
34
ansible/roles/hacluster/tasks/bootstrap_service.yml
Normal file
34
ansible/roles/hacluster/tasks/bootstrap_service.yml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Ensure stonith is disabled
|
||||
vars:
|
||||
service: "{{ hacluster_services['hacluster-pacemaker'] }}"
|
||||
command: docker exec {{ service.container_name }} crm_attribute --type crm_config --name stonith-enabled --update false
|
||||
run_once: true
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
|
||||
- name: Ensure remote node is added
|
||||
vars:
|
||||
pacemaker_service: "{{ hacluster_services['hacluster-pacemaker'] }}"
|
||||
pacemaker_remote_service: "{{ hacluster_services['hacluster-pacemaker-remote'] }}"
|
||||
shell: >
|
||||
docker exec {{ pacemaker_service.container_name }}
|
||||
cibadmin --modify --scope resources -X '
|
||||
<resources>
|
||||
<primitive id="{{ ansible_hostname }}" class="ocf" provider="pacemaker" type="remote">
|
||||
<instance_attributes id="{{ ansible_hostname }}-instance_attributes">
|
||||
<nvpair id="{{ ansible_hostname }}-instance_attributes-server" name="server" value="{{ 'api' | kolla_address }}"/>
|
||||
</instance_attributes>
|
||||
<operations>
|
||||
<op id="{{ ansible_hostname }}-monitor" name="monitor" interval="60" timeout="30"/>
|
||||
</operations>
|
||||
</primitive>
|
||||
</resources>
|
||||
'
|
||||
become: true
|
||||
delegate_to: "{{ groups[pacemaker_service.group][0] }}"
|
||||
when:
|
||||
- inventory_hostname in groups[pacemaker_remote_service.group]
|
||||
- pacemaker_remote_service.enabled | bool
|
25
ansible/roles/hacluster/tasks/check-containers.yml
Normal file
25
ansible/roles/hacluster/tasks/check-containers.yml
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
- name: Check hacluster containers
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "compare_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "{{ service.container_name }}"
|
||||
image: "{{ service.image | default(omit) }}"
|
||||
volumes: "{{ service.volumes | default(omit) }}"
|
||||
dimensions: "{{ service.dimensions | default(omit) }}"
|
||||
volumes_from: "{{ service.volumes_from | default(omit) }}"
|
||||
privileged: "{{ service.privileged | default(omit) }}"
|
||||
cap_add: "{{ service.cap_add | default(omit) }}"
|
||||
environment: "{{ service.environment | default(omit) }}"
|
||||
ipc_mode: "{{ service.ipc_mode | default(omit) }}"
|
||||
pid_mode: "{{ service.pid_mode | default(omit) }}"
|
||||
security_opt: "{{ service.security_opt | default(omit) }}"
|
||||
labels: "{{ service.labels | default(omit) }}"
|
||||
command: "{{ service.command | default(omit) }}"
|
||||
vars:
|
||||
service_name: "{{ item.key }}"
|
||||
service: "{{ item.value }}"
|
||||
with_dict: "{{ hacluster_services | select_services_enabled_and_mapped_to_host }}"
|
||||
notify:
|
||||
- "Restart {{ service_name }} container"
|
1
ansible/roles/hacluster/tasks/check.yml
Normal file
1
ansible/roles/hacluster/tasks/check.yml
Normal file
@ -0,0 +1 @@
|
||||
---
|
96
ansible/roles/hacluster/tasks/config.yml
Normal file
96
ansible/roles/hacluster/tasks/config.yml
Normal file
@ -0,0 +1,96 @@
|
||||
---
|
||||
- name: Ensuring config directories exist
|
||||
become: true
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item.key }}"
|
||||
state: "directory"
|
||||
owner: "{{ config_owner_user }}"
|
||||
group: "{{ config_owner_group }}"
|
||||
mode: "0770"
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
with_dict: "{{ hacluster_services }}"
|
||||
|
||||
- name: Copying over config.json files for services
|
||||
become: true
|
||||
template:
|
||||
src: "{{ item.key }}.json.j2"
|
||||
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
|
||||
mode: "0660"
|
||||
register: config_jsons
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
with_dict: "{{ hacluster_services }}"
|
||||
notify:
|
||||
- "Restart {{ item.key }} container"
|
||||
|
||||
- name: Copying over corosync.conf into hacluster-corosync
|
||||
vars:
|
||||
service: "{{ hacluster_services['hacluster-corosync'] }}"
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ node_config_directory }}/hacluster-corosync/corosync.conf"
|
||||
mode: "0660"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
with_first_found:
|
||||
- "{{ node_custom_config }}/hacluster-corosync/{{ inventory_hostname }}/corosync.conf"
|
||||
- "{{ node_custom_config }}/hacluster-corosync/corosync.conf"
|
||||
- "hacluster_corosync.conf.j2"
|
||||
notify:
|
||||
- Restart hacluster-corosync container
|
||||
|
||||
- name: Copying over Corosync authkey file
|
||||
vars:
|
||||
service: "{{ hacluster_services['hacluster-corosync'] }}"
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ node_config_directory }}/hacluster-corosync/authkey"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
with_first_found:
|
||||
- "{{ node_custom_config }}/hacluster-corosync/{{ inventory_hostname }}/authkey"
|
||||
- "{{ node_custom_config }}/hacluster-corosync/authkey"
|
||||
notify:
|
||||
- Restart hacluster-corosync container
|
||||
|
||||
- name: Copying over Pacemaker authkey file
|
||||
vars:
|
||||
service: "{{ hacluster_services['hacluster-pacemaker'] }}"
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ node_config_directory }}//hacluster-pacemaker/authkey"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
with_first_found:
|
||||
- "{{ node_custom_config }}/hacluster-pacemaker/{{ inventory_hostname }}/authkey"
|
||||
- "{{ node_custom_config }}/hacluster-pacemaker/authkey"
|
||||
notify:
|
||||
- Restart hacluster-pacemaker container
|
||||
|
||||
- name: Copying over Pacemaker authkey file into hacluster-pacemaker-remote
|
||||
vars:
|
||||
service: "{{ hacluster_services['hacluster-pacemaker-remote'] }}"
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ node_config_directory }}/hacluster-pacemaker-remote/authkey"
|
||||
mode: "0600"
|
||||
become: true
|
||||
when:
|
||||
- inventory_hostname in groups[service.group]
|
||||
- service.enabled | bool
|
||||
with_first_found:
|
||||
- "{{ node_custom_config }}/hacluster-pacemaker/{{ inventory_hostname }}/authkey"
|
||||
- "{{ node_custom_config }}/hacluster-pacemaker/authkey"
|
||||
notify:
|
||||
- Restart hacluster-pacemaker-remote container
|
2
ansible/roles/hacluster/tasks/deploy-containers.yml
Normal file
2
ansible/roles/hacluster/tasks/deploy-containers.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
- import_tasks: check-containers.yml
|
11
ansible/roles/hacluster/tasks/deploy.yml
Normal file
11
ansible/roles/hacluster/tasks/deploy.yml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
- import_tasks: bootstrap.yml
|
||||
|
||||
- import_tasks: config.yml
|
||||
|
||||
- import_tasks: check-containers.yml
|
||||
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- import_tasks: bootstrap_service.yml
|
2
ansible/roles/hacluster/tasks/main.yml
Normal file
2
ansible/roles/hacluster/tasks/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
- include_tasks: "{{ kolla_action }}.yml"
|
24
ansible/roles/hacluster/tasks/precheck.yml
Normal file
24
ansible/roles/hacluster/tasks/precheck.yml
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Get container facts
|
||||
become: true
|
||||
kolla_container_facts:
|
||||
name:
|
||||
- hacluster_pacemaker_remote
|
||||
register: container_facts
|
||||
|
||||
# NOTE(yoctozepto): Corosync runs over UDP so one cannot use wait_for to check
|
||||
# for it being up or down (TCP-only). In fact, such prechecks should only really
|
||||
# check if the port is taken already by the host and not contact it.
|
||||
|
||||
# NOTE(yoctozepto): The below is a slight simplification because
|
||||
# pacemaker_remoted always listens on all addresses (wildcard listen).
|
||||
- name: Check free port for Pacemaker Remote
|
||||
wait_for:
|
||||
host: "{{ api_interface_address }}"
|
||||
port: "{{ hacluster_pacemaker_remote_port }}"
|
||||
connect_timeout: 1
|
||||
timeout: 1
|
||||
state: stopped
|
||||
when:
|
||||
- container_facts['hacluster_pacemaker_remote'] is not defined
|
||||
- inventory_hostname in groups['hacluster-remote']
|
11
ansible/roles/hacluster/tasks/pull.yml
Normal file
11
ansible/roles/hacluster/tasks/pull.yml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Pulling hacluster images
|
||||
become: true
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ item.value.image }}"
|
||||
when:
|
||||
- inventory_hostname in groups[item.value.group]
|
||||
- item.value.enabled | bool
|
||||
with_dict: "{{ hacluster_services }}"
|
2
ansible/roles/hacluster/tasks/reconfigure.yml
Normal file
2
ansible/roles/hacluster/tasks/reconfigure.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
- import_tasks: deploy.yml
|
6
ansible/roles/hacluster/tasks/stop.yml
Normal file
6
ansible/roles/hacluster/tasks/stop.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- import_role:
|
||||
role: service-stop
|
||||
vars:
|
||||
project_services: "{{ hacluster_services }}"
|
||||
service_name: "{{ project_name }}"
|
2
ansible/roles/hacluster/tasks/upgrade.yml
Normal file
2
ansible/roles/hacluster/tasks/upgrade.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
- import_tasks: deploy.yml
|
17
ansible/roles/hacluster/templates/hacluster-corosync.json.j2
Normal file
17
ansible/roles/hacluster/templates/hacluster-corosync.json.j2
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"command": "/usr/sbin/corosync -f",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/corosync.conf",
|
||||
"dest": "/etc/corosync/corosync.conf",
|
||||
"owner": "root",
|
||||
"perm": "0400"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/authkey",
|
||||
"dest": "/etc/corosync/authkey",
|
||||
"owner": "root",
|
||||
"perm": "0400"
|
||||
}
|
||||
]
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
{
|
||||
"command": "/usr/sbin/pacemaker_remoted -l /var/log/kolla/hacluster/pacemaker-remoted.log{% if openstack_logging_debug | bool %} -VV{% endif %} -p {{ hacluster_pacemaker_remote_port }}",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/authkey",
|
||||
"dest": "/etc/pacemaker/authkey",
|
||||
"owner": "root",
|
||||
"perm": "0400"
|
||||
}
|
||||
]
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
{
|
||||
"command": "/usr/sbin/pacemakerd -f",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/authkey",
|
||||
"dest": "/etc/pacemaker/authkey",
|
||||
"owner": "hacluster:haclient",
|
||||
"perm": "0400"
|
||||
}
|
||||
]
|
||||
}
|
36
ansible/roles/hacluster/templates/hacluster_corosync.conf.j2
Normal file
36
ansible/roles/hacluster/templates/hacluster_corosync.conf.j2
Normal file
@ -0,0 +1,36 @@
|
||||
totem {
|
||||
version: 2
|
||||
cluster_name: kolla-hacluster
|
||||
crypto_cipher: aes256
|
||||
crypto_hash: sha384
|
||||
secauth: yes
|
||||
transport: knet
|
||||
# NOTE(yoctozepto): despite the name, this controls knet recv port
|
||||
mcastport: {{ hacluster_corosync_port }}
|
||||
}
|
||||
|
||||
nodelist {
|
||||
{% for host in groups['hacluster'] | sort %}
|
||||
node {
|
||||
ring0_addr: {{ 'api' | kolla_address(host) }}
|
||||
name: {{ hostvars[host]['ansible_hostname'] }}
|
||||
nodeid: {{ loop.index }}
|
||||
}
|
||||
{% endfor %}
|
||||
}
|
||||
|
||||
quorum {
|
||||
provider: corosync_votequorum
|
||||
{% if groups['hacluster'] | length == 2 %}
|
||||
two_node: 1
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
logging {
|
||||
debug: {{ 'on' if openstack_logging_debug | bool else 'off' }}
|
||||
to_logfile: yes
|
||||
logfile: /var/log/kolla/hacluster/corosync.log
|
||||
to_stderr: no
|
||||
to_syslog: no
|
||||
timestamp: on
|
||||
}
|
@ -35,6 +35,7 @@
|
||||
- enable_gnocchi_{{ enable_gnocchi | bool }}
|
||||
- enable_grafana_{{ enable_grafana | bool }}
|
||||
- enable_haproxy_{{ enable_haproxy | bool }}
|
||||
- enable_hacluster_{{ enable_hacluster | bool }}
|
||||
- enable_heat_{{ enable_heat | bool }}
|
||||
- enable_horizon_{{ enable_horizon | bool }}
|
||||
- enable_influxdb_{{ enable_influxdb | bool }}
|
||||
@ -731,6 +732,18 @@
|
||||
tags: kuryr,
|
||||
when: enable_kuryr | bool }
|
||||
|
||||
- name: Apply role hacluster
|
||||
gather_facts: false
|
||||
hosts:
|
||||
- hacluster
|
||||
- hacluster-remote
|
||||
- '&enable_hacluster_True'
|
||||
serial: '{{ kolla_serial|default("0") }}'
|
||||
roles:
|
||||
- { role: hacluster,
|
||||
tags: hacluster,
|
||||
when: enable_hacluster | bool }
|
||||
|
||||
- name: Apply role heat
|
||||
gather_facts: false
|
||||
hosts:
|
||||
|
@ -256,6 +256,7 @@
|
||||
# These roles are required for Kolla to be operation, however a savvy deployer
|
||||
# could disable some of these required roles and run their own services.
|
||||
#enable_glance: "{{ enable_openstack_core | bool }}"
|
||||
#enable_hacluster: "no"
|
||||
#enable_haproxy: "yes"
|
||||
#enable_keepalived: "{{ enable_haproxy | bool }}"
|
||||
#enable_keystone: "{{ enable_openstack_core | bool }}"
|
||||
|
@ -0,0 +1,9 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Adds HAcluster Ansible role. This role contains High Availability
|
||||
clustering solution composed of Corosync, Pacemaker and Pacemaker Remote.
|
||||
|
||||
HAcluster is added as a helper role for Masakari which requires it for
|
||||
its host monitoring, allowing to provide HA to instances on a failed
|
||||
compute host.
|
@ -78,7 +78,7 @@ function prepare_images {
|
||||
GATE_IMAGES+=",^octavia"
|
||||
fi
|
||||
if [[ $SCENARIO == "masakari" ]]; then
|
||||
GATE_IMAGES+=",^masakari"
|
||||
GATE_IMAGES+=",^masakari-,^hacluster-"
|
||||
fi
|
||||
|
||||
if [[ $SCENARIO == "swift" ]]; then
|
||||
|
@ -106,6 +106,7 @@ ironic_dnsmasq_dhcp_range: "10.42.0.2,10.42.0.254"
|
||||
|
||||
{% if scenario == "masakari" %}
|
||||
enable_masakari: "yes"
|
||||
enable_hacluster: "yes"
|
||||
{% endif %}
|
||||
|
||||
{% if scenario == "cells" %}
|
||||
|
@ -125,6 +125,32 @@ storage
|
||||
[elasticsearch:children]
|
||||
control
|
||||
|
||||
# NOTE(yoctozepto): Until we are able to isolate network namespaces in k-a,
|
||||
# we are forced to separate remotes from full members.
|
||||
# This is not as bad as it sounds, because it would be enforced in
|
||||
# non-containerised environments anyway.
|
||||
#[hacluster:children]
|
||||
#control
|
||||
[hacluster]
|
||||
{% for host in hostvars %}
|
||||
{% if 'ternary' not in host %}
|
||||
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user=kolla ansible_ssh_private_key_file={{ ansible_env.HOME ~ '/.ssh/id_rsa_kolla' }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
# NOTE(yoctozepto): Until we are able to isolate network namespaces in k-a,
|
||||
# we are forced to separate remotes from full members.
|
||||
# This is not as bad as it sounds, because it would be enforced in
|
||||
# non-containerised environments anyway.
|
||||
#[hacluster-remote:children]
|
||||
#compute
|
||||
[hacluster-remote]
|
||||
{% for host in hostvars %}
|
||||
{% if 'ternary' in host %}
|
||||
{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user=kolla ansible_ssh_private_key_file={{ ansible_env.HOME ~ '/.ssh/id_rsa_kolla' }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[haproxy:children]
|
||||
network
|
||||
|
||||
|
@ -7,6 +7,52 @@ set -o pipefail
|
||||
# Enable unbuffered output for Ansible in Jenkins.
|
||||
export PYTHONUNBUFFERED=1
|
||||
|
||||
function test_hacluster_logged {
|
||||
local cluster_failure
|
||||
cluster_failure=0
|
||||
|
||||
# NOTE(yoctozepto): repeated -V in commands below is used to get 'debug'
|
||||
# output; the right amount differs between command sets; the next level is
|
||||
# 'trace' which is overly verbose; PCMK_debug=no is used to revert the env
|
||||
# var setting from the container which would cause these commands to log up
|
||||
# to 'trace' (likely a pacemaker bug)
|
||||
|
||||
if ! sudo docker exec hacluster_pacemaker cibadmin -VVVVVV --query --local; then
|
||||
cluster_failure=1
|
||||
fi
|
||||
|
||||
local mon_output
|
||||
|
||||
if ! mon_output=$(sudo docker exec -e PCMK_debug=no hacluster_pacemaker crm_mon -VVVVV --one-shot); then
|
||||
cluster_failure=1
|
||||
fi
|
||||
|
||||
if ! sudo docker exec -e PCMK_debug=no hacluster_pacemaker crm_verify -VVVVV --live-check; then
|
||||
cluster_failure=1
|
||||
fi
|
||||
|
||||
# NOTE(yoctozepto): crm_mon output should include:
|
||||
# * Online: [ primary secondary ]
|
||||
# * RemoteOnline: [ ternary1 ternary2 ]
|
||||
|
||||
if ! echo "$mon_output" | grep 'Online: \[ primary secondary \]'; then
|
||||
echo 'Full members missing' >&2
|
||||
cluster_failure=1
|
||||
fi
|
||||
|
||||
if ! echo "$mon_output" | grep 'RemoteOnline: \[ ternary1 ternary2 \]'; then
|
||||
echo 'Remote members missing' >&2
|
||||
cluster_failure=1
|
||||
fi
|
||||
|
||||
if [[ $cluster_failure -eq 1 ]]; then
|
||||
echo "HAcluster failed"
|
||||
return 1
|
||||
else
|
||||
echo "HAcluster healthy"
|
||||
fi
|
||||
}
|
||||
|
||||
function test_masakari_logged {
|
||||
# Source OpenStack credentials
|
||||
. /etc/kolla/admin-openrc.sh
|
||||
@ -14,23 +60,14 @@ function test_masakari_logged {
|
||||
# Activate virtualenv to access Masakari client
|
||||
. ~/openstackclient-venv/bin/activate
|
||||
|
||||
# Get the first Nova compute
|
||||
if ! HYPERVISOR=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | head -n1); then
|
||||
echo "Unable to get Nova hypervisor list"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create Masakari segment
|
||||
if ! openstack segment create test_segment auto COMPUTE; then
|
||||
echo "Unable to create Masakari segment"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Add Nova compute to Masakari segment
|
||||
if ! openstack segment host create $HYPERVISOR COMPUTE SSH test_segment; then
|
||||
echo "Unable to add Nova hypervisor to Masakari segment"
|
||||
return 1
|
||||
fi
|
||||
openstack segment host create ternary1 COMPUTE SSH test_segment
|
||||
openstack segment host create ternary2 COMPUTE SSH test_segment
|
||||
|
||||
# Delete Masakari segment
|
||||
if ! openstack segment delete test_segment; then
|
||||
@ -44,6 +81,7 @@ function test_masakari_logged {
|
||||
|
||||
function test_masakari {
|
||||
echo "Testing Masakari"
|
||||
test_hacluster_logged > /tmp/logs/ansible/test-hacluster 2>&1
|
||||
test_masakari_logged > /tmp/logs/ansible/test-masakari 2>&1
|
||||
result=$?
|
||||
if [[ $result != 0 ]]; then
|
||||
|
@ -150,6 +150,7 @@
|
||||
voting: false
|
||||
files:
|
||||
- ^ansible/roles/masakari/
|
||||
- ^ansible/roles/hacluster/
|
||||
- ^tests/test-masakari.sh
|
||||
- ^tests/test-dashboard.sh
|
||||
vars:
|
||||
|
@ -295,7 +295,7 @@
|
||||
- job:
|
||||
name: kolla-ansible-ubuntu-source-masakari
|
||||
parent: kolla-ansible-masakari-base
|
||||
nodeset: kolla-ansible-focal
|
||||
nodeset: kolla-ansible-focal-masakari
|
||||
vars:
|
||||
base_distro: ubuntu
|
||||
install_type: source
|
||||
@ -303,7 +303,7 @@
|
||||
- job:
|
||||
name: kolla-ansible-centos8s-source-masakari
|
||||
parent: kolla-ansible-masakari-base
|
||||
nodeset: kolla-ansible-centos8s
|
||||
nodeset: kolla-ansible-centos8s-masakari
|
||||
vars:
|
||||
base_distro: centos
|
||||
install_type: source
|
||||
|
@ -154,3 +154,27 @@
|
||||
- secondary3
|
||||
- secondary4
|
||||
- secondary5
|
||||
|
||||
- nodeset:
|
||||
name: kolla-ansible-focal-masakari
|
||||
nodes:
|
||||
- name: primary
|
||||
label: ubuntu-focal
|
||||
- name: secondary
|
||||
label: ubuntu-focal
|
||||
- name: ternary1
|
||||
label: ubuntu-focal
|
||||
- name: ternary2
|
||||
label: ubuntu-focal
|
||||
|
||||
- nodeset:
|
||||
name: kolla-ansible-centos8s-masakari
|
||||
nodes:
|
||||
- name: primary
|
||||
label: centos-8-stream
|
||||
- name: secondary
|
||||
label: centos-8-stream
|
||||
- name: ternary1
|
||||
label: centos-8-stream
|
||||
- name: ternary2
|
||||
label: centos-8-stream
|
||||
|
Loading…
Reference in New Issue
Block a user