tripleo-heat-templates/deployment/openshift/openshift-master-baremetal-...

636 lines
29 KiB
YAML

heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
StackAction:
type: string
description: >
Heat action on performed top-level stack. Note StackUpdateType is
set to UPGRADE when a major-version upgrade is in progress.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
OpenShiftAnsiblePath:
default: '/usr/share/ansible/openshift-ansible/'
description: Path to OpenShift-Ansible.
type: string
OpenShiftGlobalVariables:
default: {}
description: Global Ansible variables for OpenShift-Ansible installer.
type: json
OpenShiftMasterNodeVars:
default: {}
description: OpenShift node vars specific for the master nodes
type: json
OpenShiftNodeGroupName:
default: node-config-all-in-one
description: The group the nodes belong to.
type: string
tags:
- role_specific
DockerInsecureRegistryAddress:
description: Optional. The IP Address and Port of an insecure docker
namespace that will be configured in /etc/sysconfig/docker.
The value can be multiple addresses separated by commas.
type: comma_delimited_list
default: []
ContainerOpenShiftAnsibleImage:
description: Openshift-ansible container image.
type: string
ContainerOpenShiftControlPlaneImage:
description: Control Plane container image for openshift.
type: string
ContainerOpenShiftCockpitImage:
description: Cockpit container image for openshift
type: string
ContainerOpenShiftNodeImage:
description: Node container image for openshift
type: string
ContainerOpenShiftEtcdImage:
description: etcd container image for openshift
type: string
ContainerOpenShiftAnsibleServiceBrokerImage:
description: Ansible Service Broker image for openshift
type: string
ContainerOpenShiftConsoleImage:
description: console container image for openshift
type: string
ContainerOpenShiftPrometheusNodeExporterImage:
description: prometheus node exporter container image for openshift
type: string
ContainerOpenShiftKubeRbacProxyImage:
description: kube rbac proxy container image for openshift
type: string
ContainerOpenShiftClusterMonitorOperatorImage:
description: cluster monitoring operator container image for openshift
type: string
ContainerOpenShiftConfigmapReloaderImage:
description: configmap reloader container image for openshift
type: string
ContainerOpenShiftPrometheusOperatorImage:
description: prometheus operator container image for openshift
type: string
ContainerOpenShiftPrometheusConfigReloaderImage:
description: prometheus config reloader container image for openshift
type: string
ContainerOpenShiftPrometheusImage:
description: prometheus container image for openshift
type: string
ContainerOpenShiftPrometheusAlertmanagerImage:
description: prometheus alertmanager container image for openshift
type: string
ContainerOpenShiftOauthProxyImage:
description: oauth proxy container image for openshift
type: string
ContainerOpenShiftKubeStateMetricsImage:
description: kube state metrics container image for openshift
type: string
ContainerOpenShiftGrafanaImage:
description: grafana container image for openshift
type: string
resources:
OpenShiftNode:
type: ./openshift-node.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- OpenShiftNodeGroupName: OpenShiftNodeGroupName
- values: {get_param: [RoleParameters]}
- values:
OpenShiftNodeGroupName: {get_param: OpenShiftNodeGroupName}
outputs:
role_data:
description: Role data for the Openshift Service
value:
service_name: openshift_master
config_settings:
map_merge:
- get_attr: [OpenShiftNode, role_data, config_settings]
- tripleo::keepalived::virtual_router_id_base: 100
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
list_concat:
- get_attr: [OpenShiftNode, role_data, external_deploy_tasks]
- - name: openshift_master step 1
when: step|int == 1
tags: openshift
become: true
block:
# NOTE(mandre) Remove inventory directory as it may contain
# obsolete files that can mess up the current deployment
- name: remove inventory directory if it exists
file:
path: "{{playbook_dir}}/openshift/inventory"
state: absent
- name: remove global_gluster_vars
file:
path: "{{playbook_dir}}/openshift/global_gluster_vars.yml"
state: absent
- name: create openshift inventory directory
file:
path: "{{playbook_dir}}/openshift/inventory"
state: directory
owner: "{{ ansible_user }}"
- name: openshift_master step 3
when: step|int == 3
tags: openshift
block:
- name: set role facts for generating inventory
set_fact:
tripleo_role_name: {get_param: RoleName}
tripleo_node_group_name: {get_attr: [RoleParametersValue, value, OpenShiftNodeGroupName]}
- name: set openshift global vars fact
set_fact:
openshift_global_vars:
map_merge:
- openshift_release: '3.11'
openshift_version: '3.11'
openshift_image_tag:
yaql:
expression:
$.data.image.rightSplit(":", 1)[1]
data:
image: {get_param: ContainerOpenShiftControlPlaneImage}
openshift_deployment_type: "{{tripleo_openshift_deployment_type}}"
openshift_use_external_openvswitch: true
openshift_master_bootstrap_auto_approve: true
# Local Registry
openshift_docker_insecure_registries: {get_param: DockerInsecureRegistryAddress}
oreg_url:
yaql:
expression:
$.data.image.replace("-control-plane:", "-${component}:")
data:
image: {get_param: ContainerOpenShiftControlPlaneImage}
etcd_image: {get_param: ContainerOpenShiftEtcdImage}
osm_etcd_image: {get_param: ContainerOpenShiftEtcdImage}
osm_image: {get_param: ContainerOpenShiftControlPlaneImage}
osn_image: {get_param: ContainerOpenShiftNodeImage}
openshift_cockpit_deployer_image: {get_param: ContainerOpenShiftCockpitImage}
ansible_service_broker_image: {get_param: ContainerOpenShiftAnsibleServiceBrokerImage}
openshift_console_image_name: {get_param: ContainerOpenShiftConsoleImage}
openshift_cluster_monitoring_operator_image: {get_param: ContainerOpenShiftClusterMonitorOperatorImage}
openshift_cluster_monitoring_operator_node_exporter_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusNodeExporterImage}
openshift_cluster_monitoring_operator_kube_rbac_proxy_image:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftKubeRbacProxyImage}
openshift_cluster_monitoring_operator_configmap_reloader_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftConfigmapReloaderImage}
openshift_cluster_monitoring_operator_prometheus_operator_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusOperatorImage}
openshift_cluster_monitoring_operator_prometheus_reloader_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusConfigReloaderImage}
openshift_cluster_monitoring_operator_prometheus_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusImage}
openshift_cluster_monitoring_operator_alertmanager_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusAlertmanagerImage}
openshift_cluster_monitoring_operator_proxy_image:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftOauthProxyImage}
openshift_cluster_monitoring_operator_kube_state_metrics_image:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftKubeStateMetricsImage}
openshift_cluster_monitoring_operator_grafana_image:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftGrafanaImage}
- {get_param: OpenShiftGlobalVariables}
tripleo_stack_action: {get_param: StackAction}
openshift_master_node_vars: {get_param: OpenShiftMasterNodeVars}
openshift_master_cluster_vars:
openshift_master_cluster_hostname: {get_param: [EndpointMap, OpenshiftInternal, host]}
openshift_master_cluster_public_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]}
- name: generate openshift inventory for openshift_master service
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
masters:
hosts:
{% for host in role_nodes[tripleo_role_name] -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% if new_role_nodes[tripleo_role_name] | count > 0 -%}
new_masters:
hosts:
{% for host in new_role_nodes[tripleo_role_name] -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% endif %}
{% endif %}
- name: generate openshift inventory for groups
copy:
dest: "{{playbook_dir}}/openshift/inventory/groups.yml"
content: |
all:
children:
etcd:
children:
masters: {}
OSEv3:
children:
masters: {}
etcd: {}
nodes: {}
new_masters: {}
new_etcd: {}
new_nodes: {}
{% if groups['openshift_glusterfs'] | default([]) -%}
glusterfs: {}
glusterfs_registry: {}
{%- endif %}
- name: combine cluster setting
set_fact:
openshift_global_vars: "{{ openshift_master_cluster_vars | combine(openshift_global_vars) }}"
when: 'tripleo_node_group_name != "node-config-all-in-one"'
- name: generate openshift global vars
copy:
dest: "{{playbook_dir}}/openshift/global_vars.yml"
content: "{{openshift_global_vars|to_nice_yaml}}"
- name: set openshift ansible path
set_fact:
openshift_ansible_path: {get_param: OpenShiftAnsiblePath}
- name: set openshift ansible playbook paths
set_fact:
openshift_ansible_deploy_playbook_path: "{{ openshift_ansible_path }}/playbooks/deploy_cluster.yml"
openshift_prerequisites_playbook_path: "{{ openshift_ansible_path }}/playbooks/prerequisites.yml"
openshift_master_scaleup_playbook_path: "{{ openshift_ansible_path }}/playbooks/openshift-master/scaleup.yml"
openshift_etcd_scaleup_playbook_path: "{{ openshift_ansible_path }}/playbooks/openshift-etcd/scaleup.yml"
openshift_worker_scaleup_playbook_path: "{{ openshift_ansible_path }}/playbooks/openshift-node/scaleup.yml"
openshift_ansible_image: {get_param: ContainerOpenShiftAnsibleImage}
# NOTE(flaper87): We'll use openshift_ansible_scaleup_playbook_path
# if there are new master or new worker nodes and we are doing an
# UPDATE. For all the other cases, we shall use the deploy playbook.
- name: generate openshift playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# NOTE(flaper87): The NetworkManager setup has been moved
# into openshift-ansible but it's not been released yet.
# This code will go away as soon as an rpm with the required
# roles hits the repo.
- name: OpenShift networking preparation
hosts: all
tasks:
- name: install NetworkManager
package:
name: NetworkManager
state: present
- name: generate nm dispatcher script
copy:
dest: "/etc/NetworkManager/dispatcher.d/99-os-net-config-origin-dns.sh"
owner: root
mode: 0755
content: >-
#!/bin/bash -x
DEVS=$(nmcli device | grep unmanaged | awk '{print $1}')
for dev in $DEVS;
do
temp="${dev%\"}"
temp="${temp#\"}"
export DEVICE_IFACE=$temp
/etc/NetworkManager/dispatcher.d/99-origin-dns.sh $DEVICE_IFACE up
done
- name: Enable NetworkManager
service:
name: NetworkManager
state: restarted
enabled: yes
{% if tripleo_stack_action == 'CREATE' %}
# Prerequisites playbook is explicitly needed only for
# initial install
- import_playbook: "{{openshift_prerequisites_playbook_path}}"
- import_playbook: "{{openshift_ansible_deploy_playbook_path}}"
{% elif tripleo_stack_action == 'UPDATE' %}
{% if has_new_nodes %}
- name: Restart dnsmasq to pick up new nodes
hosts: all
tasks:
- name: Restart dnsmasq
service:
name: dnsmasq
state: restarted
{% if new_role_nodes[tripleo_role_name] | count > 0 %}
# Scale up nodes (including masters)
- import_playbook: "{{openshift_master_scaleup_playbook_path}}"
{% else %}
# Scale up workers/infra nodes
- import_playbook: "{{openshift_worker_scaleup_playbook_path}}"
{% endif %}
{% endif %}
{% endif %}
- name: set openshift command
set_fact:
openshift_command: >-
{%- if openshift_command is defined -%}
{{openshift_command}}
{%- else -%}
sudo /usr/bin/tripleo-deploy-openshift
--config-download-dir {{playbook_dir}}
--image {{openshift_ansible_image}}
{%- endif -%}
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log
exit ${PIPESTATUS[0]}
# NOTE(mandre) Scale up the etcd node in a separate ansible run
# because osa expects a different inventory for etcd scale up.
# The newly added nodes are not new anymore from the point of
# view of osa and need to be moved from new_masters and
# new_nodes group to masters and nodes groups respectively. In
# addition they need to be added to new_etcd groups.
- when: tripleo_stack_action == 'UPDATE' and new_role_nodes[tripleo_role_name] | count > 0
block:
- name: generate updated openshift inventory for openshift_master service
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
masters:
hosts:
{% for host in (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% if new_role_nodes[tripleo_role_name] | count > 0 -%}
new_etcd:
hosts:
{% for host in new_role_nodes[tripleo_role_name] -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% endif %}
{% endif %}
- name: generate updated openshift inventory for {{tripleo_role_name}} role groups
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_groups.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
nodes:
hosts:
{% for host in (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) -%}
{{host.hostname}}:
{% endfor %}
{% endif %}
- name: generate openshift playbook for etcd scaleup
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# Scale up etcd nodes
- import_playbook: "{{openshift_etcd_scaleup_playbook_path}}"
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook-etcd.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook-etcd.log
exit ${PIPESTATUS[0]}
- name: generate post-deployment inventory for master nodes
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
masters:
hosts:
{% for host in (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% endif %}
- when: tripleo_stack_action == 'UPDATE'
block:
- name: generate openshift playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# Re-run the deploy playbook to apply potential change
# changes to existing nodes
- import_playbook: "{{openshift_ansible_deploy_playbook_path}}"
- name: Restart masters
hosts: masters
serial: 1
tasks:
- import_role:
name: "{{ openshift_ansible_path }}/roles/openshift_control_plane"
tasks_from: restart.yml
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log
exit ${PIPESTATUS[0]}
- name: generate openshift validation playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
- name: Simple validation OpenShift is actually deployed
hosts: masters
tasks:
- name: Check oc status
command: oc status --suggest
register: oc_status
become: true
- name: Register failure if oc status fails
command: echo true
register: oc_status_failed
when: '"fail" in oc_status.stdout'
- debug:
var: oc_status.stdout_lines
- name: Check oc get dc/router
command: "oc get dc/router -o jsonpath='{.status.readyReplicas}'"
register: oc_get_router
become: true
- name: Register failure if oc get dc/router fails
command: echo true
register: oc_get_router_failed
when: 'oc_get_router.stdout|int < 1'
- debug:
var: oc_get_router.stdout
- name: Check oc get dc/docker-registry
command: "oc get dc/docker-registry -o jsonpath='{.status.readyReplicas}'"
register: oc_get_registry
become: true
- name: Register failure if oc get dc/docker-registry fails
command: echo true
register: oc_get_registry_failed
when: 'oc_get_registry.stdout|int < 1'
- debug:
var: oc_get_registry.stdout
- name: Check oc get nodes
command: oc get nodes --all-namespaces
register: oc_get_nodes
become: true
- name: Register failure if oc get nodes fails
command: echo true
register: oc_get_nodes_failed
when: '"NotReady" in oc_get_nodes.stdout'
- debug:
var: oc_get_nodes.stdout_lines
- name: Fail the playbook if any validations failed
fail:
when: >
oc_status_failed.changed or
oc_get_nodes_failed.changed or
oc_get_router_failed.changed or
oc_get_registry_failed.changed
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook-validation.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook-validation.log
exit ${PIPESTATUS[0]}