tripleo-heat-templates/extraconfig/services/openshift-master.yaml
Carlos Camacho 44ef2a3ec1 Change template names to rocky
The new master branch should point now to rocky.

So, HOT templates should specify that they might contain features
for rocky release [1]

Also, this submission updates the yaml validation to use only latest
heat_version alias. There are cases in which we will need to set
the version for specific templates i.e. mixed versions, so there
is added a variable to assign specific templates to specific heat_version
aliases, avoiding the introductions of error by bulk replacing the
the old version in new releases.

[1]: https://docs.openstack.org/heat/latest/template_guide/hot_spec.html#rocky
Change-Id: Ib17526d9cc453516d99d4659ee5fa51a5aa7fb4b
2018-05-09 08:28:42 +02:00

263 lines
11 KiB
YAML

heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
OpenShiftGlobalVariables:
default: {}
description: Global Ansible variables for OpenShift-Ansible installer.
type: json
OpenShiftAnsiblePlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml'
description: Path to OpenShift-Ansible playbook.
type: string
OpenShiftMasterNodeVars:
default: {}
description: OpenShift node vars specific for the master nodes
type: json
OpenShiftWorkerNodeVars:
default: {}
description: OpenShift node vars specific for the worker nodes
type: json
OpenShiftGlusterDisks:
default:
- /dev/vdb
- /dev/vdc
- /dev/vdd
description: List of disks for openshift_glusterfs service to use
type: comma_delimited_list
tags:
- role_specific
resources:
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- OpenShiftGlusterDisks: OpenShiftGlusterDisks
- values: {get_param: [RoleParameters]}
- values:
OpenShiftGlusterDisks: {get_param: OpenShiftGlusterDisks}
outputs:
role_data:
description: Role data for the Openshift Service
value:
service_name: openshift_master
config_settings:
tripleo.openshift_master.firewall_rules:
'200 openshift-master api':
dport: 6443
proto: tcp
'200 openshift-master etcd':
dport:
- 2379
- 2380
proto: tcp
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
- name: openshift_master step 2
when: step == '2'
block:
- name: create openshift temp dirs
file:
path: "{{item}}"
state: directory
with_items:
- "{{playbook_dir}}/openshift"
- name: set openshift global vars fact
set_fact:
openshift_global_vars: {get_param: OpenShiftGlobalVariables}
openshift_master_node_vars: {get_param: OpenShiftMasterNodeVars}
openshift_worker_node_vars: {get_param: OpenShiftWorkerNodeVars}
openshift_gluster_disks: {get_attr: [RoleParametersValue, value, OpenShiftGlusterDisks]}
- name: generate openshift inventory
copy:
dest: "{{playbook_dir}}/openshift/inventory.yml"
content: |
masters:
hosts:
{% for host in groups['openshift_master'] -%}
{{ hostvars.raw_get(host)['ansible_hostname'] }}:
ansible_user: {{ hostvars.raw_get(host)['ansible_user'] | default(hostvars.raw_get(host)['ansible_ssh_user']) | default('root') }}
ansible_host: {{ hostvars.raw_get(host)['ansible_host'] | default(host) }}
ansible_become: true
etcd_ip: {{hostvars.raw_get(host)['ctlplane_ip']}}
openshift_ip: {{hostvars.raw_get(host)['ctlplane_ip']}}
openshift_public_ip: {{hostvars.raw_get(host)['external_ip'] | default(hostvars.raw_get(host)['ctlplane_ip'])}}
openshift_hostname: {{hostvars.raw_get(host)['ctlplane_ip']}}
openshift_public_hostname: {{hostvars.raw_get(host)['external_ip'] | default(hostvars.raw_get(host)['ctlplane_ip'])}}
{% if openshift_master_node_vars -%}
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{%- endif %}
{% endfor %}
nodes:
hosts:
{% for host in groups['openshift_worker'] -%}
{{ hostvars.raw_get(host)['ansible_hostname'] }}:
ansible_user: {{ hostvars.raw_get(host)['ansible_user'] | default(hostvars.raw_get(host)['ansible_ssh_user']) | default('root') }}
ansible_host: {{ hostvars.raw_get(host)['ansible_host'] | default(host) }}
ansible_become: true
etcd_ip: {{hostvars.raw_get(host)['ctlplane_ip']}}
openshift_ip: {{hostvars.raw_get(host)['ctlplane_ip']}}
openshift_public_ip: {{hostvars.raw_get(host)['ctlplane_ip']}}
openshift_hostname: {{hostvars.raw_get(host)['ctlplane_ip']}}
openshift_public_hostname: {{hostvars.raw_get(host)['ctlplane_ip']}}
openshift_schedulable: true
openshift_node_labels:
region: 'infra'
zone: 'default'
{% if openshift_worker_node_vars -%}
{{openshift_worker_node_vars | to_nice_yaml() | indent(6)}}
{%- endif %}
{% endfor %}
{% if groups['openshift_glusterfs'] | default([]) %}
glusterfs:
hosts:
{% for host in groups['openshift_glusterfs'] | default([]) -%}
{{ hostvars.raw_get(host)['ansible_hostname'] }}:
glusterfs_ip: {{hostvars.raw_get(host)['storage_ip']}}
glusterfs_devices:
{{openshift_gluster_disks | to_nice_yaml() | indent(8) }}
ansible_user: {{ hostvars.raw_get(host)['ansible_user'] | default(hostvars.raw_get(host)['ansible_ssh_user']) | default('root') }}
ansible_host: {{ hostvars.raw_get(host)['ansible_host'] | default(host) }}
ansible_become: true
openshift_schedulable: true
{% endfor %}
{% endif %}
etcd:
children:
masters: {}
OSEv3:
children:
masters: {}
nodes: {}
{% if groups['openshift_glusterfs'] | default([]) %}glusterfs: {}{% endif %}
- name: generate openshift global defaults
copy:
dest: "{{playbook_dir}}/openshift/global_defaults.yml"
content: |
containerized: true
openshift_master_cluster_method: native
openshift_use_dnsmasq: true
openshift_use_external_openvswitch: true
- name: generate openshift global vars
copy:
dest: "{{playbook_dir}}/openshift/global_vars.yml"
content: "{{openshift_global_vars|to_nice_yaml}}"
- name: set openshift ansible playbook path
set_fact:
openshift_ansible_playbook_path: {get_param: OpenShiftAnsiblePlaybook}
- name: generate openshift playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# NOTE(flaper87): The NetworkManager setup has been moved
# into openshift-ansible but it's not been released yet.
# This code will go away as soon as an rpm with the required
# roles hits the repo.
- name: OpenShift networking preparation
hosts: all
tasks:
- name: install NetworkManager
package:
name: NetworkManager
state: present
- name: generate nm dispatcher script
copy:
dest: "/etc/NetworkManager/dispatcher.d/99-os-net-config-origin-dns.sh"
owner: root
mode: 0755
content: >-
#!/bin/bash -x
DEVS=$(jq '.network_config[] | {name}.name' /etc/os-net-config/config.json)
# NOTE(flaper87): We should, eventually, set the name on the vlan objects
# inside the os-net-config config.json file and use that.
VLANS=$(jq '.network_config[] | .members // [] | del( .[] | select( .type != "vlan" ) ) | .[].type + (.[].vlan_id|tostring)' /etc/os-net-config/config.json)
for dev in $DEVS $VLANS;
do
temp="${dev%\"}"
temp="${temp#\"}"
export DEVICE_IFACE=$temp
/etc/NetworkManager/dispatcher.d/99-origin-dns.sh $DEVICE_IFACE up
done
- name: Enable NetworkManager
service:
name: NetworkManager
state: restarted
enabled: yes
- include: "{{openshift_ansible_playbook_path}}"
- name: set openshift command
set_fact:
openshift_command: >-
{%- if openshift_command is defined -%}
{{openshift_command}}
{%- else -%}
ANSIBLE_HOST_KEY_CHECKING=False
ansible-playbook
-i '{{playbook_dir}}/openshift/inventory.yml'
--extra-vars '@{{playbook_dir}}/openshift/global_defaults.yml'
--extra-vars '@{{playbook_dir}}/openshift/global_vars.yml'
'{{playbook_dir}}/openshift/playbook.yml'
{%- endif -%}
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log
exit ${PIPESTATUS[0]}