heat_template_version: rocky description: External tasks definition for OpenShift parameters: StackAction: type: string description: > Heat action on performed top-level stack. Note StackUpdateType is set to UPGRADE when a major-version upgrade is in progress. constraints: - allowed_values: ['CREATE', 'UPDATE'] RoleNetIpMap: default: {} type: json ServiceData: default: {} description: Dictionary packing service data type: json ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults. type: json DefaultPasswords: default: {} type: json RoleName: default: '' description: Role name on which the service is applied type: string RoleParameters: default: {} description: Parameters specific to the role type: json EndpointMap: default: {} description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json OpenShiftGlobalVariables: default: {} description: Global Ansible variables for OpenShift-Ansible installer. type: json OpenShiftAnsiblePlaybook: default: '/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml' description: Path to OpenShift-Ansible playbook. type: string OpenShiftMasterScaleupPlaybook: default: '/usr/share/ansible/openshift-ansible/playbooks/openshift-master/scaleup.yml' description: Path to OpenShift-Ansible playbook. type: string OpenShiftUpgradePlaybook: default: '/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml' description: Path to OpenShift-Ansible Upgrade playbook. type: string OpenShiftMasterNodeVars: default: {} description: OpenShift node vars specific for the master nodes type: json OpenShiftWorkerNodeVars: default: {} description: OpenShift node vars specific for the worker nodes type: json DockerInsecureRegistryAddress: description: Optional. The IP Address and Port of an insecure docker namespace that will be configured in /etc/sysconfig/docker. The value can be multiple addresses separated by commas. type: comma_delimited_list default: [] DockerOpenShiftBaseImage: description: Base container image for openshift. type: string DockerOpenShiftCockpitImage: description: Cockpit container image for openshift type: string DockerOpenShiftNodeImage: description: Node container image for openshift type: string DockerOpenShiftEtcdImage: description: etcd container image for openshift type: string outputs: role_data: description: Role data for the Openshift Service value: service_name: openshift_master config_settings: tripleo.openshift_master.firewall_rules: '200 openshift-master api': dport: 6443 proto: tcp '200 openshift-master etcd': dport: - 2379 - 2380 proto: tcp upgrade_tasks: [] step_config: '' external_deploy_tasks: - name: openshift_master step 2 when: step == '2' tags: openshift block: - name: create openshift temp dirs file: path: "{{item}}" state: directory with_items: - "{{playbook_dir}}/openshift/inventory" - name: set openshift global vars fact set_fact: openshift_global_vars: map_merge: - openshift_release: '3.9' openshift_version: '3.9.0' openshift_image_tag: yaql: expression: $.data.image.rightSplit(":", 1)[1] data: image: {get_param: DockerOpenShiftBaseImage} openshift_enable_excluders: false openshift_deployment_type: origin openshift_use_external_openvswitch: true openshift_docker_selinux_enabled: false # Disable services we're not using for now openshift_enable_service_catalog: false template_service_broker_install: false # Needed for containerized deployment skip_version: true # Fatal and Errors only debug_level: 0 openshift_master_cluster_method: native openshift_master_cluster_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]} openshift_master_cluster_public_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]} # Local Registry openshift_examples_modify_imagestreams: true oreg_url: yaql: expression: $.data.image.rightSplit(":", 1).join("-${component}:") data: image: {get_param: DockerOpenShiftBaseImage} etcd_image: {get_param: DockerOpenShiftEtcdImage} osm_etcd_image: {get_param: DockerOpenShiftEtcdImage} osm_image: yaql: expression: $.data.image.rightSplit(":", 1)[0] data: image: {get_param: DockerOpenShiftBaseImage} osn_image: yaql: expression: $.data.image.rightSplit(":", 1)[0] data: image: {get_param: DockerOpenShiftNodeImage} openshift_cockpit_deployer_prefix: yaql: expression: $.data.image.rightSplit("/", 1)[0] + "/" data: image: {get_param: DockerOpenShiftCockpitImage} openshift_cockpit_deployer_basename: yaql: expression: $.data.image.rightSplit(":", 1)[0].rightSplit("/", 1)[1] data: image: {get_param: DockerOpenShiftCockpitImage} openshift_cockpit_deployer_version: yaql: expression: $.data.image.rightSplit(":", 1)[1] data: image: {get_param: DockerOpenShiftCockpitImage} openshift_web_console_prefix: yaql: expression: $.data.image.rightSplit(":", 1)[0] + "-" data: image: {get_param: DockerOpenShiftBaseImage} openshift_docker_additional_registries: {get_param: DockerInsecureRegistryAddress} - {get_param: OpenShiftGlobalVariables} tripleo_role_name: {get_param: RoleName} tripleo_stack_action: {get_param: StackAction} openshift_master_node_vars: {get_param: OpenShiftMasterNodeVars} openshift_worker_node_vars: {get_param: OpenShiftWorkerNodeVars} openshift_master_network: {get_param: [ServiceNetMap, OpenshiftMasterNetwork]} # NOTE(flaper87): Check if origin-node is running # in the openshift nodes so we can flag the node # as new later on. # This task ignores errors because docker inspect # exits with 1 if origin-node doesn't exist. Perhaps # we could use failed_when instead of ignoring the # errors. Future improvement. - name: Check if origin-node is running become: true shell: > docker inspect atomic-enterprise-master-api > /dev/null 2>&1 || docker inspect origin-master-api > /dev/null 2>&1 || echo "false" register: origin_nodes delegate_to: "{{item}}" with_items: "{{ groups[tripleo_role_name] | default([]) }}" # NOTE(flaper87): Create all the nodes objects # now, as yaml dicts, instead of formatting # everything as part of a template. # We consider new_node all the nodes that # exited with 1 in the previous task. # # Future Improvement: Use hostvars[] syntax # instead of raw_get to reduce verbosity. - set_fact: nodes: - new_node: "{{origin_nodes.results | selectattr('item', 'equalto', item) | selectattr('stdout', 'equalto', 'false') | list | count > 0}}" hostname: "{{item}}" ansible_user: "{{ hostvars[item]['ansible_user'] | default(hostvars[item]['ansible_ssh_user']) | default('root') }}" ansible_host: "{{ hostvars[item]['ansible_host'] | default(item) }}" ansible_become: true etcd_ip: "{{hostvars[item][openshift_master_network + '_ip']}}" openshift_ip: "{{hostvars[item][openshift_master_network + '_ip']}}" openshift_master_bind_addr: "{{hostvars[item][openshift_master_network + '_ip']}}" openshift_public_ip: "{{hostvars[item][openshift_master_network + '_ip']}}" openshift_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}" openshift_public_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}" register: all_master_nodes with_items: "{{groups[tripleo_role_name] | default([]) }}" - set_fact: master_nodes: "{{all_master_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | list}}" new_masters: "{{all_master_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', True) | list}}" # NOTE(flaper87): Every master node will be in the masters group # but only new master nodes will be in the new_masters section, which # will be created only if there are nodes to add. We'll add `new_masters` # to the OSEv3 group regardless to simplify the implementation. Ansible # will ignore the section if it doesn't exist or if it's empty - name: generate openshift inventory for openshift_master service copy: dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml" content: | {% if master_nodes | count > 0%} masters: hosts: {% for host in master_nodes %} {{host.hostname}}: {{host | combine(openshift_master_node_vars) | to_nice_yaml() | indent(6)}} {% endfor %} {% endif %} {% if new_masters | count > 0 %} new_masters: hosts: {% for host in new_masters %} {{host.hostname}}: {{host | combine(openshift_master_node_vars) | to_nice_yaml() | indent(6)}} {% endfor %} new_etcd: children: new_masters: {} {% endif %} etcd: children: masters: {} OSEv3: children: masters: {} nodes: {} new_masters: {} new_nodes: {} {% if groups['openshift_glusterfs'] | default([]) %}glusterfs: {}{% endif %} - name: generate openshift global defaults copy: dest: "{{playbook_dir}}/openshift/global_defaults.yml" content: | containerized: true openshift_master_cluster_method: native openshift_use_dnsmasq: true openshift_use_external_openvswitch: true - name: generate openshift global vars copy: dest: "{{playbook_dir}}/openshift/global_vars.yml" content: "{{openshift_global_vars|to_nice_yaml}}" - name: set openshift ansible playbook paths set_fact: openshift_ansible_playbook_path: {get_param: OpenShiftAnsiblePlaybook} openshift_master_scaleup_playbook_path: {get_param: OpenShiftMasterScaleupPlaybook} openshift_upgrade_playbook_path: {get_param: OpenShiftUpgradePlaybook} # NOTE(flaper87): We'll use openshift_ansible_scaleup_playbook_path # if there are new master or new worker nodes and we are doing an # UPDATE. For all the other cases, we shall use the deploy playbook. - name: generate openshift playbook copy: dest: "{{playbook_dir}}/openshift/playbook.yml" content: | # NOTE(flaper87): The NetworkManager setup has been moved # into openshift-ansible but it's not been released yet. # This code will go away as soon as an rpm with the required # roles hits the repo. - name: OpenShift networking preparation hosts: all tasks: - name: install NetworkManager package: name: NetworkManager state: present - name: generate nm dispatcher script copy: dest: "/etc/NetworkManager/dispatcher.d/99-os-net-config-origin-dns.sh" owner: root mode: 0755 content: >- #!/bin/bash -x DEVS=$(nmcli device | grep unmanaged | awk '{print $1}') for dev in $DEVS; do temp="${dev%\"}" temp="${temp#\"}" export DEVICE_IFACE=$temp /etc/NetworkManager/dispatcher.d/99-origin-dns.sh $DEVICE_IFACE up done - name: Enable NetworkManager service: name: NetworkManager state: restarted enabled: yes {% if tripleo_stack_action == 'UPDATE' and new_masters | count > 0 %} - include: "{{openshift_master_scaleup_playbook_path}}" {% endif %} {% if tripleo_stack_action == 'UPDATE' and new_nodes | count > 0 %} - include: "{{openshift_worker_scaleup_playbook_path}}" {% endif %} {% if tripleo_stack_action == 'UPDATE' and openshift_upgrade %} - include: "{{openshift_upgrade_playbook_path}}" {% endif %} {% if tripleo_stack_action == 'CREATE' or (tripleo_stack_action == 'UPDATE' and (new_masters + new_nodes) | count == 0) %} - include: "{{openshift_ansible_playbook_path}}" {% endif %} - name: set openshift command set_fact: openshift_command: >- {%- if openshift_command is defined -%} {{openshift_command}} {%- else -%} ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i '{{playbook_dir}}/openshift/inventory' --extra-vars '@{{playbook_dir}}/openshift/global_defaults.yml' --extra-vars '@{{playbook_dir}}/openshift/global_vars.yml' '{{playbook_dir}}/openshift/playbook.yml' {%- endif -%} - name: print openshift command debug: var: openshift_command - name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log) shell: | {{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log exit ${PIPESTATUS[0]} external_upgrade_tasks: - name: set OpenShift upgrade facts tags: openshift set_fact: openshift_upgrade: true