tripleo-heat-templates/extraconfig/services/openshift-master.yaml
Martin André bb1a1209ac Rework the generated openshift-ansible playbook
The `prerequisites.yml` playbook should only be explicitly run on
initial deployment to prepare the nodes. It is already included in the
scaleup playbooks for the new nodes so there is no need to include it
again. Re-running the `prerequisites.yml` playbook reconfigures the
container runtime and may cause outage, it is supposed to be run only
once.

Make update and upgrade playbooks exclusive. There is no need to run
both of them.

Add comments to clarify the intent for each playbooks.

Change-Id: I30278360fcc1ffa9bd7ce7cb77d023629fb6fa47
Closes-Bug: #1804790
2019-01-10 09:02:34 +01:00

538 lines
24 KiB
YAML

heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
StackAction:
type: string
description: >
Heat action on performed top-level stack. Note StackUpdateType is
set to UPGRADE when a major-version upgrade is in progress.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
OpenShiftAnsiblePlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml'
description: Path to OpenShift-Ansible deploy playbook.
type: string
OpenShiftPrerequisitesPlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
description: Path to OpenShift-Ansible prerequisites playbook.
type: string
OpenShiftMasterScaleupPlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/openshift-master/scaleup.yml'
description: Path to OpenShift-Ansible master scale-up playbook.
type: string
OpenShiftEtcdScaleupPlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/openshift-etcd/scaleup.yml'
description: Path to OpenShift-Ansible etcd scale-up playbook.
type: string
OpenShiftWorkerScaleupPlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/openshift-node/scaleup.yml'
description: Path to OpenShift-Ansible node scale-up playbook.
type: string
OpenShiftGlobalVariables:
default: {}
description: Global Ansible variables for OpenShift-Ansible installer.
type: json
OpenShiftMasterNodeVars:
default: {}
description: OpenShift node vars specific for the master nodes
type: json
OpenShiftNodeGroupName:
default: node-config-all-in-one
description: The group the nodes belong to.
type: string
tags:
- role_specific
DockerInsecureRegistryAddress:
description: Optional. The IP Address and Port of an insecure docker
namespace that will be configured in /etc/sysconfig/docker.
The value can be multiple addresses separated by commas.
type: comma_delimited_list
default: []
DockerOpenShiftAnsibleImage:
description: Openshift-ansible container image.
type: string
DockerOpenShiftControlPlaneImage:
description: Control Plane container image for openshift.
type: string
DockerOpenShiftCockpitImage:
description: Cockpit container image for openshift
type: string
DockerOpenShiftNodeImage:
description: Node container image for openshift
type: string
DockerOpenShiftEtcdImage:
description: etcd container image for openshift
type: string
resources:
OpenShiftNode:
type: ./openshift-node.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- OpenShiftNodeGroupName: OpenShiftNodeGroupName
- values: {get_param: [RoleParameters]}
- values:
OpenShiftNodeGroupName: {get_param: OpenShiftNodeGroupName}
outputs:
role_data:
description: Role data for the Openshift Service
value:
service_name: openshift_master
config_settings:
map_merge:
- get_attr: [OpenShiftNode, role_data, config_settings]
- tripleo::keepalived::virtual_router_id_base: 100
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
list_concat:
- get_attr: [OpenShiftNode, role_data, external_deploy_tasks]
- - name: openshift_master step 1
when: step == '1'
tags: openshift
block:
# NOTE(mandre) Remove inventory directory as it may contain
# obsolete files that can mess up the current deployment
- name: remove inventory directory if it exists
file:
path: "{{playbook_dir}}/openshift/inventory"
state: absent
- name: create openshift inventory directory
file:
path: "{{playbook_dir}}/openshift/inventory"
state: directory
- name: openshift_master step 3
when: step == '3'
tags: openshift
block:
- name: set role facts for generating inventory
set_fact:
tripleo_role_name: {get_param: RoleName}
tripleo_node_group_name: {get_attr: [RoleParametersValue, value, OpenShiftNodeGroupName]}
- name: set openshift global vars fact
set_fact:
openshift_global_vars:
map_merge:
- openshift_release: '3.11'
openshift_version: '3.11'
openshift_image_tag:
yaql:
expression:
$.data.image.rightSplit(":", 1)[1]
data:
image: {get_param: DockerOpenShiftControlPlaneImage}
openshift_enable_excluders: false
openshift_deployment_type: "{{tripleo_openshift_deployment_type}}"
openshift_use_dnsmasq: true
openshift_use_external_openvswitch: true
openshift_docker_selinux_enabled: false
# Disable services we're not using for now
openshift_enable_service_catalog: false
template_service_broker_install: false
containerized: true
# Needed for containerized deployment
skip_version: true
# Fatal and Errors only
debug_level: 0
# Local Registry
oreg_url:
yaql:
expression:
$.data.image.replace("-control-plane:", "-${component}:")
data:
image: {get_param: DockerOpenShiftControlPlaneImage}
etcd_image: {get_param: DockerOpenShiftEtcdImage}
osm_etcd_image: {get_param: DockerOpenShiftEtcdImage}
osm_image: {get_param: DockerOpenShiftControlPlaneImage}
osn_image: {get_param: DockerOpenShiftNodeImage}
openshift_cockpit_deployer_image: {get_param: DockerOpenShiftCockpitImage}
openshift_docker_insecure_registries: {get_param: DockerInsecureRegistryAddress}
openshift_master_bootstrap_auto_approve: true
osm_controller_args: {"experimental-cluster-signing-duration": ["20m"]}
- {get_param: OpenShiftGlobalVariables}
tripleo_stack_action: {get_param: StackAction}
openshift_master_node_vars: {get_param: OpenShiftMasterNodeVars}
openshift_master_cluster_vars:
openshift_master_cluster_hostname: {get_param: [EndpointMap, OpenshiftInternal, host]}
openshift_master_cluster_public_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]}
# NOTE(flaper87): Check if the node service is running in the
# openshift nodes so we can flag the node as new later on.
- name: Check if node service is running
command: "systemctl is-active --quiet {{tripleo_openshift_service_type}}-node"
register: node_services
delegate_to: "{{item}}"
with_items: "{{ groups[tripleo_role_name] | default([]) }}"
failed_when: false
# We consider new_node all the nodes that exited with a non-zero
# status in the previous task *IF* this is a stack update.
# Openshift-ansible expects nodes to be in the new_nodes group for
# scale up operation only.
- set_fact:
nodes:
- new_node: "{{tripleo_stack_action == 'UPDATE' and node_services.results | selectattr('item', 'equalto', item) | selectattr('rc', 'greaterthan', 0) | list | count > 0}}"
hostname: "{{item}}"
register: all_master_nodes
with_items: "{{groups[tripleo_role_name] | default([]) }}"
- set_fact:
master_nodes: "{{all_master_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', False) | list}}"
new_master_nodes: "{{all_master_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', True) | list}}"
- name: generate openshift inventory for openshift_master service
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if (master_nodes + new_master_nodes) | count > 0%}
all:
children:
masters:
hosts:
{% for host in master_nodes -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% if new_master_nodes | count > 0 -%}
new_masters:
hosts:
{% for host in new_master_nodes -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% endif %}
{% endif %}
- name: generate openshift inventory for groups
copy:
dest: "{{playbook_dir}}/openshift/inventory/groups.yml"
content: |
all:
children:
etcd:
children:
masters: {}
OSEv3:
children:
masters: {}
etcd: {}
nodes: {}
new_masters: {}
new_etcd: {}
new_nodes: {}
{% if groups['openshift_glusterfs'] | default([]) -%}
glusterfs: {}
glusterfs_registry: {}
{%- endif %}
- name: combine cluster setting
set_fact:
openshift_global_vars: "{{ openshift_master_cluster_vars | combine(openshift_global_vars) }}"
when: 'tripleo_node_group_name != "node-config-all-in-one"'
- name: generate openshift global vars
copy:
dest: "{{playbook_dir}}/openshift/global_vars.yml"
content: "{{openshift_global_vars|to_nice_yaml}}"
- name: set openshift ansible playbook paths
set_fact:
openshift_ansible_playbook_path: {get_param: OpenShiftAnsiblePlaybook}
openshift_prerequisites_playbook_path: {get_param: OpenShiftPrerequisitesPlaybook}
openshift_master_scaleup_playbook_path: {get_param: OpenShiftMasterScaleupPlaybook}
openshift_etcd_scaleup_playbook_path: {get_param: OpenShiftEtcdScaleupPlaybook}
openshift_worker_scaleup_playbook_path: {get_param: OpenShiftWorkerScaleupPlaybook}
openshift_ansible_image: {get_param: DockerOpenShiftAnsibleImage}
# NOTE(flaper87): We'll use openshift_ansible_scaleup_playbook_path
# if there are new master or new worker nodes and we are doing an
# UPDATE. For all the other cases, we shall use the deploy playbook.
- name: generate openshift playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# NOTE(flaper87): The NetworkManager setup has been moved
# into openshift-ansible but it's not been released yet.
# This code will go away as soon as an rpm with the required
# roles hits the repo.
- name: OpenShift networking preparation
hosts: all
tasks:
- name: install NetworkManager
package:
name: NetworkManager
state: present
- name: generate nm dispatcher script
copy:
dest: "/etc/NetworkManager/dispatcher.d/99-os-net-config-origin-dns.sh"
owner: root
mode: 0755
content: >-
#!/bin/bash -x
DEVS=$(nmcli device | grep unmanaged | awk '{print $1}')
for dev in $DEVS;
do
temp="${dev%\"}"
temp="${temp#\"}"
export DEVICE_IFACE=$temp
/etc/NetworkManager/dispatcher.d/99-origin-dns.sh $DEVICE_IFACE up
done
- name: Enable NetworkManager
service:
name: NetworkManager
state: restarted
enabled: yes
{% if tripleo_stack_action == 'CREATE' %}
# Prerequisites playbook is explicitly needed only for
# initial install
- include: "{{openshift_prerequisites_playbook_path}}"
- include: "{{openshift_ansible_playbook_path}}"
{% elif tripleo_stack_action == 'UPDATE' %}
{% if has_new_nodes %}
- name: Restart dnsmasq to pick up new nodes
hosts: all
tasks:
- name: Restart dnsmasq
service:
name: dnsmasq
state: restarted
{% if new_master_nodes | count > 0 %}
# Scale up nodes (including masters)
- include: "{{openshift_master_scaleup_playbook_path}}"
{% else %}
# Scale up workers/infra nodes
- include: "{{openshift_worker_scaleup_playbook_path}}"
{% endif %}
{% endif %}
# Re-run the deploy playbook to apply potential change
# changes to existing nodes
- include: "{{openshift_ansible_playbook_path}}"
{% endif %}
- name: set openshift command
set_fact:
openshift_command: >-
{%- if openshift_command is defined -%}
{{openshift_command}}
{%- else -%}
sudo /usr/bin/tripleo-deploy-openshift
--config-download-dir {{playbook_dir}}
--image {{openshift_ansible_image}}
{%- endif -%}
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log
exit ${PIPESTATUS[0]}
# NOTE(mandre) Scale up the etcd node in a separate ansible run
# because osa expects a different inventory for etcd scale up.
# The newly added nodes are not new anymore from the point of
# view of osa and need to be moved from new_masters and
# new_nodes group to masters and nodes groups respectively. In
# addition they need to be added to new_etcd groups.
- when: tripleo_stack_action == 'UPDATE' and new_master_nodes | count > 0
block:
- name: generate updated openshift inventory for openshift_master service
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if (master_nodes + new_master_nodes) | count > 0%}
all:
children:
masters:
hosts:
{% for host in (master_nodes + new_master_nodes) -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% if new_master_nodes | count > 0 -%}
new_etcd:
hosts:
{% for host in new_master_nodes -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% endif %}
{% endif %}
- name: generate updated openshift inventory for {{tripleo_role_name}} role groups
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_groups.yml"
content: |
{% if (master_nodes + new_master_nodes) | count > 0%}
all:
children:
nodes:
hosts:
{% for host in (master_nodes + new_master_nodes) -%}
{{host.hostname}}:
{% endfor %}
{% endif %}
- name: generate openshift validation playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# Scale up etcd nodes
- include: "{{openshift_etcd_scaleup_playbook_path}}"
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook-etcd.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook-etcd.log
exit ${PIPESTATUS[0]}
- name: generate openshift validation playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
- name: Simple validation OpenShift is actually deployed
hosts: masters
tasks:
- name: Check oc status
command: oc status --suggest
register: oc_status
become: true
- name: Register failure if oc status fails
command: echo true
register: oc_status_failed
when: '"fail" in oc_status.stdout'
- debug:
var: oc_status.stdout_lines
- name: Check oc get dc/router
command: "oc get dc/router -o jsonpath='{.status.readyReplicas}'"
register: oc_get_router
become: true
- name: Register failure if oc get dc/router fails
command: echo true
register: oc_get_router_failed
when: 'oc_get_router.stdout|int < 1'
- debug:
var: oc_get_router.stdout
- name: Check oc get dc/docker-registry
command: "oc get dc/docker-registry -o jsonpath='{.status.readyReplicas}'"
register: oc_get_registry
become: true
- name: Register failure if oc get dc/docker-registry fails
command: echo true
register: oc_get_registry_failed
when: 'oc_get_registry.stdout|int < 1'
- debug:
var: oc_get_registry.stdout
- name: Check oc get nodes
command: oc get nodes --all-namespaces
register: oc_get_nodes
become: true
- name: Register failure if oc get nodes fails
command: echo true
register: oc_get_nodes_failed
when: '"NotReady" in oc_get_nodes.stdout'
- debug:
var: oc_get_nodes.stdout_lines
- name: Fail the playbook if any validations failed
fail:
when: >
oc_status_failed.changed or
oc_get_nodes_failed.changed or
oc_get_router_failed.changed or
oc_get_registry_failed.changed
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook-validation.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook-validation.log
exit ${PIPESTATUS[0]}