Removal of OpenShift deployed by TripleO support

OpenShift deployed by TripleO support has been removed in a downstream
version of Stein which make the upstream support difficult to maintain.
OpenShift can be deployed using OpenShift-Ansible and
users who desire to deploy OpenShift 3.11 onto bare metal nodes can
still do so using openshift-ansible directly. The provisioning of
the Operating System on baremetal can be done with OpenStack Ironic on
the Overcloud or also can be done with deployed-servers, achieving the
same result.

Change-Id: I6a73f7f22dda69bef324ffdaecdcd6be693c1257
This commit is contained in:
Emilien Macchi 2019-08-06 11:30:17 -04:00
parent 29a499d665
commit c845595ba3
25 changed files with 11 additions and 1952 deletions

View File

@ -68,8 +68,6 @@ and should be executed according to the following table:
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| - | scn000 | scn001 | scn002 | scn003 | scn004 | scn006 | scn007 | scn009 | scn010 | non-ha | ovh-ha |
+================+========+========+========+========+========+========+========+========+========+========+========+
| openshift | | | | | | | | X | | | |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| keystone | X | X | X | X | X | X | X | | X | X | X |
+----------------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| glance | | rbd | swift | file | rgw | file | file | | rbd | file | file |

View File

@ -1,35 +0,0 @@
resource_registry:
OS::TripleO::Services::DisableUnbound: disable-unbound.yaml
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Services::OpenShift::Master: ../../deployment/openshift/openshift-master-baremetal-ansible.yaml
OS::TripleO::Services::OpenShift::Worker: ../../deployment/openshift/openshift-master-baremetal-ansible.yaml
OS::TripleO::Services::OpenShift::Infra: ../../deployment/openshift/openshift-infra-baremetal-ansible.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::DisableUnbound
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Sshd
# NOTE(mandre) In all-in-one HAproxy conflicts with the one openshift deploys
# - OS::TripleO::Services::HAproxy
# - OS::TripleO::Services::Keepalived
- OS::TripleO::Services::OpenShift::Master
- OS::TripleO::Services::OpenShift::Worker
- OS::TripleO::Services::OpenShift::Infra
Debug: true
OpenShiftNodeGroupName: 'node-config-all-in-one'
OpenShiftGlobalVariables:
# NOTE(flaper87): Needed for the gate
openshift_disable_check: package_availability,package_version,disk_availability,docker_storage,memory_availability

View File

@ -1,137 +0,0 @@
heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
OpenShiftGlusterNodeVars:
default: {}
description: OpenShift node vars specific for the gluster nodes
type: json
OpenShiftGlusterDisks:
default:
- /dev/vdb
description: List of disks for openshift_glusterfs service to use
type: comma_delimited_list
tags:
- role_specific
OpenShiftNodeGroupName:
default: node-config-all-in-one
description: The group the nodes belong to.
type: string
tags:
- role_specific
ContainerOpenShiftGlusterFSImage:
description: Container image to use for GlusterFS pod
type: string
ContainerOpenShiftGlusterFSBlockImage:
description: Container image to use for glusterblock-provisioner pod
type: string
ContainerOpenShiftGlusterFSHeketiImage:
description: Container image to use for heketi pods
type: string
resources:
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- OpenShiftGlusterDisks: OpenShiftGlusterDisks
OpenShiftNodeGroupName: OpenShiftNodeGroupName
- values: {get_param: [RoleParameters]}
- values:
OpenShiftGlusterDisks: {get_param: OpenShiftGlusterDisks}
OpenShiftNodeGroupName: {get_param: OpenShiftNodeGroupName}
outputs:
role_data:
description: Role data for the Openshift Service
value:
# This service template essentially tags the nodes that we want
# as cns. The actual installation is performed in
# openshift-master service template.
service_name: openshift_glusterfs
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
- name: openshift_cns step 2 Generate Inventory
when: step|int == 2
block:
- name: set openshift global vars fact
set_fact:
openshift_gluster_disks: {get_attr: [RoleParametersValue, value, OpenShiftGlusterDisks]}
tripleo_node_group_name: {get_attr: [RoleParametersValue, value, OpenShiftNodeGroupName]}
tripleo_role_name: {get_param: RoleName}
- name: set openshift gluster global vars fact
set_fact:
openshift_gluster_global_vars:
map_merge:
- openshift_storage_glusterfs_storageclass_default: true
openshift_hosted_registry_storage_kind: glusterfs
- {get_param: OpenShiftGlusterNodeVars}
- openshift_storage_glusterfs_image: {get_param: ContainerOpenShiftGlusterFSImage}
openshift_storage_glusterfs_block_image: {get_param: ContainerOpenShiftGlusterFSBlockImage}
openshift_storage_glusterfs_heketi_image: {get_param: ContainerOpenShiftGlusterFSHeketiImage}
- name: generate openshift gluster global vars
copy:
dest: "{{playbook_dir}}/openshift/global_gluster_vars.yml"
content: "{{openshift_gluster_global_vars|to_nice_yaml}}"
- name: generate openshift inventory for Role
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_glusterfs.yml"
content: |
{% if tripleo_node_group_name == "node-config-infra" or tripleo_node_group_name == 'node-config-all-in-one' -%}
glusterfs_registry:
hosts:
{% for host in groups[tripleo_role_name] | default([]) -%}
{{ hostvars.raw_get(host)['ansible_hostname'] }}:
glusterfs_ip: {{hostvars.raw_get(host)['storage_ip']}}
glusterfs_devices:
{{openshift_gluster_disks | to_nice_yaml() | indent(8) }}
{% endfor %}
{% endif %}
{% if tripleo_node_group_name != "node-config-infra" or tripleo_node_group_name == 'node-config-all-in-one'-%}
glusterfs:
hosts:
{% for host in groups[tripleo_role_name] | default([]) -%}
{{ hostvars.raw_get(host)['ansible_hostname'] }}:
glusterfs_ip: {{hostvars.raw_get(host)['storage_ip']}}
glusterfs_devices:
{{openshift_gluster_disks | to_nice_yaml() | indent(8) }}
{% endfor %}
{% endif %}

View File

@ -1,82 +0,0 @@
heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
# TODO(mandre) This is unused. Remove it or make it OpenShiftNodeVars
OpenShiftWorkerNodeVars:
default: {}
description: OpenShift node vars specific for the worker nodes
type: json
resources:
OpenShiftWorker:
type: ./openshift-worker-baremetal-ansible.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Openshift Service
value:
service_name: openshift_infra
config_settings: {get_attr: [OpenShiftWorker, role_data, config_settings]}
service_config_settings:
haproxy:
tripleo::openshift_infra::haproxy_endpoints:
openshift-router-http:
base_service_name: openshift_infra
public_virtual_ip: "%{hiera('public_virtual_ip')}"
internal_ip: "%{hiera('openshift_infra_vip')}"
service_port: 80
listen_options:
balance: 'source'
member_options: [ 'check', 'inter 2000', 'rise 2', 'fall 5' ]
haproxy_listen_bind_param: ['transparent']
openshift-router-https:
base_service_name: openshift_infra
public_virtual_ip: "%{hiera('public_virtual_ip')}"
internal_ip: "%{hiera('openshift_infra_vip')}"
service_port: 443
listen_options:
balance: 'source'
member_options: [ 'check', 'inter 2000', 'rise 2', 'fall 5' ]
haproxy_listen_bind_param: ['transparent']
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
- get_attr: [OpenShiftWorker, role_data, external_deploy_tasks]

View File

@ -1,635 +0,0 @@
heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
StackAction:
type: string
description: >
Heat action on performed top-level stack. Note StackUpdateType is
set to UPGRADE when a major-version upgrade is in progress.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
OpenShiftAnsiblePath:
default: '/usr/share/ansible/openshift-ansible/'
description: Path to OpenShift-Ansible.
type: string
OpenShiftGlobalVariables:
default: {}
description: Global Ansible variables for OpenShift-Ansible installer.
type: json
OpenShiftMasterNodeVars:
default: {}
description: OpenShift node vars specific for the master nodes
type: json
OpenShiftNodeGroupName:
default: node-config-all-in-one
description: The group the nodes belong to.
type: string
tags:
- role_specific
DockerInsecureRegistryAddress:
description: Optional. The IP Address and Port of an insecure docker
namespace that will be configured in /etc/sysconfig/docker.
The value can be multiple addresses separated by commas.
type: comma_delimited_list
default: []
ContainerOpenShiftAnsibleImage:
description: Openshift-ansible container image.
type: string
ContainerOpenShiftControlPlaneImage:
description: Control Plane container image for openshift.
type: string
ContainerOpenShiftCockpitImage:
description: Cockpit container image for openshift
type: string
ContainerOpenShiftNodeImage:
description: Node container image for openshift
type: string
ContainerOpenShiftEtcdImage:
description: etcd container image for openshift
type: string
ContainerOpenShiftAnsibleServiceBrokerImage:
description: Ansible Service Broker image for openshift
type: string
ContainerOpenShiftConsoleImage:
description: console container image for openshift
type: string
ContainerOpenShiftPrometheusNodeExporterImage:
description: prometheus node exporter container image for openshift
type: string
ContainerOpenShiftKubeRbacProxyImage:
description: kube rbac proxy container image for openshift
type: string
ContainerOpenShiftClusterMonitorOperatorImage:
description: cluster monitoring operator container image for openshift
type: string
ContainerOpenShiftConfigmapReloaderImage:
description: configmap reloader container image for openshift
type: string
ContainerOpenShiftPrometheusOperatorImage:
description: prometheus operator container image for openshift
type: string
ContainerOpenShiftPrometheusConfigReloaderImage:
description: prometheus config reloader container image for openshift
type: string
ContainerOpenShiftPrometheusImage:
description: prometheus container image for openshift
type: string
ContainerOpenShiftPrometheusAlertmanagerImage:
description: prometheus alertmanager container image for openshift
type: string
ContainerOpenShiftOauthProxyImage:
description: oauth proxy container image for openshift
type: string
ContainerOpenShiftKubeStateMetricsImage:
description: kube state metrics container image for openshift
type: string
ContainerOpenShiftGrafanaImage:
description: grafana container image for openshift
type: string
resources:
OpenShiftNode:
type: ./openshift-node-baremetal-ansible.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- OpenShiftNodeGroupName: OpenShiftNodeGroupName
- values: {get_param: [RoleParameters]}
- values:
OpenShiftNodeGroupName: {get_param: OpenShiftNodeGroupName}
outputs:
role_data:
description: Role data for the Openshift Service
value:
service_name: openshift_master
config_settings:
map_merge:
- get_attr: [OpenShiftNode, role_data, config_settings]
- tripleo::keepalived::virtual_router_id_base: 100
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
list_concat:
- get_attr: [OpenShiftNode, role_data, external_deploy_tasks]
- - name: openshift_master step 1
when: step|int == 1
tags: openshift
become: true
block:
# NOTE(mandre) Remove inventory directory as it may contain
# obsolete files that can mess up the current deployment
- name: remove inventory directory if it exists
file:
path: "{{playbook_dir}}/openshift/inventory"
state: absent
- name: remove global_gluster_vars
file:
path: "{{playbook_dir}}/openshift/global_gluster_vars.yml"
state: absent
- name: create openshift inventory directory
file:
path: "{{playbook_dir}}/openshift/inventory"
state: directory
owner: "{{ ansible_user }}"
- name: openshift_master step 3
when: step|int == 3
tags: openshift
block:
- name: set role facts for generating inventory
set_fact:
tripleo_role_name: {get_param: RoleName}
tripleo_node_group_name: {get_attr: [RoleParametersValue, value, OpenShiftNodeGroupName]}
- name: set openshift global vars fact
set_fact:
openshift_global_vars:
map_merge:
- openshift_release: '3.11'
openshift_version: '3.11'
openshift_image_tag:
yaql:
expression:
$.data.image.rightSplit(":", 1)[1]
data:
image: {get_param: ContainerOpenShiftControlPlaneImage}
openshift_deployment_type: "{{tripleo_openshift_deployment_type}}"
openshift_use_external_openvswitch: true
openshift_master_bootstrap_auto_approve: true
# Local Registry
openshift_docker_insecure_registries: {get_param: DockerInsecureRegistryAddress}
oreg_url:
yaql:
expression:
$.data.image.replace("-control-plane:", "-${component}:")
data:
image: {get_param: ContainerOpenShiftControlPlaneImage}
etcd_image: {get_param: ContainerOpenShiftEtcdImage}
osm_etcd_image: {get_param: ContainerOpenShiftEtcdImage}
osm_image: {get_param: ContainerOpenShiftControlPlaneImage}
osn_image: {get_param: ContainerOpenShiftNodeImage}
openshift_cockpit_deployer_image: {get_param: ContainerOpenShiftCockpitImage}
ansible_service_broker_image: {get_param: ContainerOpenShiftAnsibleServiceBrokerImage}
openshift_console_image_name: {get_param: ContainerOpenShiftConsoleImage}
openshift_cluster_monitoring_operator_image: {get_param: ContainerOpenShiftClusterMonitorOperatorImage}
openshift_cluster_monitoring_operator_node_exporter_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusNodeExporterImage}
openshift_cluster_monitoring_operator_kube_rbac_proxy_image:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftKubeRbacProxyImage}
openshift_cluster_monitoring_operator_configmap_reloader_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftConfigmapReloaderImage}
openshift_cluster_monitoring_operator_prometheus_operator_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusOperatorImage}
openshift_cluster_monitoring_operator_prometheus_reloader_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusConfigReloaderImage}
openshift_cluster_monitoring_operator_prometheus_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusImage}
openshift_cluster_monitoring_operator_alertmanager_repo:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftPrometheusAlertmanagerImage}
openshift_cluster_monitoring_operator_proxy_image:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftOauthProxyImage}
openshift_cluster_monitoring_operator_kube_state_metrics_image:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftKubeStateMetricsImage}
openshift_cluster_monitoring_operator_grafana_image:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0]
data:
image: {get_param: ContainerOpenShiftGrafanaImage}
- {get_param: OpenShiftGlobalVariables}
tripleo_stack_action: {get_param: StackAction}
openshift_master_node_vars: {get_param: OpenShiftMasterNodeVars}
openshift_master_cluster_vars:
openshift_master_cluster_hostname: {get_param: [EndpointMap, OpenshiftInternal, host]}
openshift_master_cluster_public_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]}
- name: generate openshift inventory for openshift_master service
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
masters:
hosts:
{% for host in role_nodes[tripleo_role_name] -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% if new_role_nodes[tripleo_role_name] | count > 0 -%}
new_masters:
hosts:
{% for host in new_role_nodes[tripleo_role_name] -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% endif %}
{% endif %}
- name: generate openshift inventory for groups
copy:
dest: "{{playbook_dir}}/openshift/inventory/groups.yml"
content: |
all:
children:
etcd:
children:
masters: {}
OSEv3:
children:
masters: {}
etcd: {}
nodes: {}
new_masters: {}
new_etcd: {}
new_nodes: {}
{% if groups['openshift_glusterfs'] | default([]) -%}
glusterfs: {}
glusterfs_registry: {}
{%- endif %}
- name: combine cluster setting
set_fact:
openshift_global_vars: "{{ openshift_master_cluster_vars | combine(openshift_global_vars) }}"
when: 'tripleo_node_group_name != "node-config-all-in-one"'
- name: generate openshift global vars
copy:
dest: "{{playbook_dir}}/openshift/global_vars.yml"
content: "{{openshift_global_vars|to_nice_yaml}}"
- name: set openshift ansible path
set_fact:
openshift_ansible_path: {get_param: OpenShiftAnsiblePath}
- name: set openshift ansible playbook paths
set_fact:
openshift_ansible_deploy_playbook_path: "{{ openshift_ansible_path }}/playbooks/deploy_cluster.yml"
openshift_prerequisites_playbook_path: "{{ openshift_ansible_path }}/playbooks/prerequisites.yml"
openshift_master_scaleup_playbook_path: "{{ openshift_ansible_path }}/playbooks/openshift-master/scaleup.yml"
openshift_etcd_scaleup_playbook_path: "{{ openshift_ansible_path }}/playbooks/openshift-etcd/scaleup.yml"
openshift_worker_scaleup_playbook_path: "{{ openshift_ansible_path }}/playbooks/openshift-node/scaleup.yml"
openshift_ansible_image: {get_param: ContainerOpenShiftAnsibleImage}
# NOTE(flaper87): We'll use openshift_ansible_scaleup_playbook_path
# if there are new master or new worker nodes and we are doing an
# UPDATE. For all the other cases, we shall use the deploy playbook.
- name: generate openshift playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# NOTE(flaper87): The NetworkManager setup has been moved
# into openshift-ansible but it's not been released yet.
# This code will go away as soon as an rpm with the required
# roles hits the repo.
- name: OpenShift networking preparation
hosts: all
tasks:
- name: install NetworkManager
package:
name: NetworkManager
state: present
- name: generate nm dispatcher script
copy:
dest: "/etc/NetworkManager/dispatcher.d/99-os-net-config-origin-dns.sh"
owner: root
mode: 0755
content: >-
#!/bin/bash -x
DEVS=$(nmcli device | grep unmanaged | awk '{print $1}')
for dev in $DEVS;
do
temp="${dev%\"}"
temp="${temp#\"}"
export DEVICE_IFACE=$temp
/etc/NetworkManager/dispatcher.d/99-origin-dns.sh $DEVICE_IFACE up
done
- name: Enable NetworkManager
service:
name: NetworkManager
state: restarted
enabled: yes
{% if tripleo_stack_action == 'CREATE' %}
# Prerequisites playbook is explicitly needed only for
# initial install
- import_playbook: "{{openshift_prerequisites_playbook_path}}"
- import_playbook: "{{openshift_ansible_deploy_playbook_path}}"
{% elif tripleo_stack_action == 'UPDATE' %}
{% if has_new_nodes %}
- name: Restart dnsmasq to pick up new nodes
hosts: all
tasks:
- name: Restart dnsmasq
service:
name: dnsmasq
state: restarted
{% if new_role_nodes[tripleo_role_name] | count > 0 %}
# Scale up nodes (including masters)
- import_playbook: "{{openshift_master_scaleup_playbook_path}}"
{% else %}
# Scale up workers/infra nodes
- import_playbook: "{{openshift_worker_scaleup_playbook_path}}"
{% endif %}
{% endif %}
{% endif %}
- name: set openshift command
set_fact:
openshift_command: >-
{%- if openshift_command is defined -%}
{{openshift_command}}
{%- else -%}
sudo /usr/bin/tripleo-deploy-openshift
--config-download-dir {{playbook_dir}}
--image {{openshift_ansible_image}}
{%- endif -%}
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log
exit ${PIPESTATUS[0]}
# NOTE(mandre) Scale up the etcd node in a separate ansible run
# because osa expects a different inventory for etcd scale up.
# The newly added nodes are not new anymore from the point of
# view of osa and need to be moved from new_masters and
# new_nodes group to masters and nodes groups respectively. In
# addition they need to be added to new_etcd groups.
- when: tripleo_stack_action == 'UPDATE' and new_role_nodes[tripleo_role_name] | count > 0
block:
- name: generate updated openshift inventory for openshift_master service
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
masters:
hosts:
{% for host in (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% if new_role_nodes[tripleo_role_name] | count > 0 -%}
new_etcd:
hosts:
{% for host in new_role_nodes[tripleo_role_name] -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% endif %}
{% endif %}
- name: generate updated openshift inventory for {{tripleo_role_name}} role groups
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_groups.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
nodes:
hosts:
{% for host in (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) -%}
{{host.hostname}}:
{% endfor %}
{% endif %}
- name: generate openshift playbook for etcd scaleup
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# Scale up etcd nodes
- import_playbook: "{{openshift_etcd_scaleup_playbook_path}}"
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook-etcd.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook-etcd.log
exit ${PIPESTATUS[0]}
- name: generate post-deployment inventory for master nodes
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
masters:
hosts:
{% for host in (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) -%}
{{host.hostname}}:
{% endfor %}
vars:
{{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
{% endif %}
- when: tripleo_stack_action == 'UPDATE'
block:
- name: generate openshift playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# Re-run the deploy playbook to apply potential change
# changes to existing nodes
- import_playbook: "{{openshift_ansible_deploy_playbook_path}}"
- name: Restart masters
hosts: masters
serial: 1
tasks:
- import_role:
name: "{{ openshift_ansible_path }}/roles/openshift_control_plane"
tasks_from: restart.yml
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log
exit ${PIPESTATUS[0]}
- name: generate openshift validation playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
- name: Simple validation OpenShift is actually deployed
hosts: masters
tasks:
- name: Check oc status
command: oc status --suggest
register: oc_status
become: true
- name: Register failure if oc status fails
command: echo true
register: oc_status_failed
when: '"fail" in oc_status.stdout'
- debug:
var: oc_status.stdout_lines
- name: Check oc get dc/router
command: "oc get dc/router -o jsonpath='{.status.readyReplicas}'"
register: oc_get_router
become: true
- name: Register failure if oc get dc/router fails
command: echo true
register: oc_get_router_failed
when: 'oc_get_router.stdout|int < 1'
- debug:
var: oc_get_router.stdout
- name: Check oc get dc/docker-registry
command: "oc get dc/docker-registry -o jsonpath='{.status.readyReplicas}'"
register: oc_get_registry
become: true
- name: Register failure if oc get dc/docker-registry fails
command: echo true
register: oc_get_registry_failed
when: 'oc_get_registry.stdout|int < 1'
- debug:
var: oc_get_registry.stdout
- name: Check oc get nodes
command: oc get nodes --all-namespaces
register: oc_get_nodes
become: true
- name: Register failure if oc get nodes fails
command: echo true
register: oc_get_nodes_failed
when: '"NotReady" in oc_get_nodes.stdout'
- debug:
var: oc_get_nodes.stdout_lines
- name: Fail the playbook if any validations failed
fail:
when: >
oc_status_failed.changed or
oc_get_nodes_failed.changed or
oc_get_router_failed.changed or
oc_get_registry_failed.changed
- name: print openshift command
debug:
var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook-validation.log)
shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook-validation.log
exit ${PIPESTATUS[0]}

View File

@ -1,195 +0,0 @@
heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
StackAction:
type: string
description: >
Heat action on performed top-level stack. Note StackUpdateType is
set to UPGRADE when a major-version upgrade is in progress.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
OpenShiftNodeGroupName:
default: node-config-all-in-one
description: The group the nodes belong to.
type: string
tags:
- role_specific
OpenShiftDeploymentType:
default: 'origin'
description: The OpenShift-Ansible deployment type.
type: string
constraints:
- allowed_values: ['origin', 'openshift-enterprise']
resources:
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- OpenShiftNodeGroupName: OpenShiftNodeGroupName
- values: {get_param: [RoleParameters]}
- values:
OpenShiftNodeGroupName: {get_param: OpenShiftNodeGroupName}
outputs:
role_data:
description: Role data for the Openshift Service
value:
service_name: openshift_node
config_settings: {}
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
- name: openshift_node step 2
when: step|int == 2
tags: openshift
block:
- name: set role facts for generating inventory
set_fact:
tripleo_role_name: {get_param: RoleName}
tripleo_stack_action: {get_param: StackAction}
tripleo_node_group_name: {get_attr: [RoleParametersValue, value, OpenShiftNodeGroupName]}
openshift_master_network: {get_param: [ServiceNetMap, OpenshiftMasterNetwork]}
tripleo_openshift_deployment_type: {get_param: OpenShiftDeploymentType}
- set_fact:
tripleo_openshift_service_type: >-
{%- if tripleo_openshift_deployment_type == 'origin' -%}
origin
{%- else -%}
atomic-openshift
{%- endif -%}
# NOTE(flaper87): Check if the node service is running in the
# openshift nodes so we can flag the node as new later on.
- name: Check if node service is running
command: "systemctl is-active --quiet {{tripleo_openshift_service_type}}-node"
register: node_services
delegate_to: "{{item}}"
with_items: "{{ groups[tripleo_role_name] | default([]) }}"
failed_when: false
# NOTE(flaper87): Create all the nodes objects now, as yaml dicts,
# instead of formatting everything as part of a template.
# We consider new_node all the nodes that exited with a non-zero
# status in the previous task *IF* this is a stack update.
# Openshift-ansible expects nodes to be in the new_nodes group for
# scale up operation only.
- set_fact:
nodes:
- new_node: "{{ tripleo_stack_action == 'UPDATE' and node_services.results | selectattr('item', 'equalto', item) | selectattr('rc', 'greaterthan', 0) | list | count > 0 }}"
hostname: "{{item}}"
ansible_user: "{{ hostvars[item]['ansible_user'] | default(hostvars[item]['ansible_ssh_user']) | default('root') }}"
ansible_host: "{{ hostvars[item]['ansible_host'] | default(item) }}"
ansible_become: true
openshift_node_group_name: '{{tripleo_node_group_name}}'
etcd_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_master_bind_addr: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_public_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_public_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}"
register: all_role_nodes
with_items: "{{groups[tripleo_role_name] | default([]) }}"
- set_fact:
role_nodes: "{{ role_nodes|default({}) | combine( {tripleo_role_name: all_role_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', False) | list}) }}"
new_role_nodes: "{{ new_role_nodes|default({}) | combine( {tripleo_role_name: all_role_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', True) | list}) }}"
- set_fact:
has_new_nodes: "{{ (has_new_nodes | default(False)) or new_role_nodes[tripleo_role_name] | count > 0 }}"
- name: generate openshift inventory for {{tripleo_role_name}} role hosts
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_hosts.yml"
content: |
{% if role_nodes[tripleo_role_name] | count > 0%}
all:
hosts:
{% for host in (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) -%}
{{host.hostname}}:
{{host | to_nice_yaml() | indent(6)}}
{% endfor %}
{% endif %}
- name: generate openshift inventory for {{tripleo_role_name}} role groups
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_groups.yml"
content: |
{% if role_nodes[tripleo_role_name] | count > 0%}
all:
children:
{% if role_nodes[tripleo_role_name] | count > 0 -%}
nodes:
hosts:
{% for host in role_nodes[tripleo_role_name] -%}
{{host.hostname}}:
{% endfor %}
{% endif %}
{% if new_role_nodes[tripleo_role_name] | count > 0 -%}
new_nodes:
hosts:
{% for host in new_role_nodes[tripleo_role_name] -%}
{{host.hostname}}:
{% endfor %}
{% endif %}
{% endif %}
- name: openshift_node step 4
when: step|int == 4
tags: openshift
block:
- name: set role facts for generating inventory
set_fact:
tripleo_role_name: {get_param: RoleName}
- name: generate post-deployment inventory for {{tripleo_role_name}} role groups
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_groups.yml"
content: |
{% if (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) | count > 0%}
all:
children:
nodes:
hosts:
{% for host in (role_nodes[tripleo_role_name] + new_role_nodes[tripleo_role_name]) -%}
{{host.hostname}}:
{% endfor %}
{% endif %}

View File

@ -1,61 +0,0 @@
heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
# TODO(mandre) This is unused. Remove it or make it OpenShiftNodeVars
OpenShiftWorkerNodeVars:
default: {}
description: OpenShift node vars specific for the worker nodes
type: json
resources:
OpenShiftNode:
type: ./openshift-node-baremetal-ansible.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Openshift Service
value:
service_name: openshift_worker
config_settings: {get_attr: [OpenShiftNode, role_data, config_settings]}
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
- get_attr: [OpenShiftNode, role_data, external_deploy_tasks]

View File

@ -1,2 +0,0 @@
resource_registry:
OS::TripleO::Services::OpenShift::GlusterFS: ../deployment/openshift/openshift-cns-baremetal-ansible.yaml

View File

@ -1,7 +0,0 @@
resource_registry:
OS::TripleO::Services::HAproxy: ../deployment/haproxy/haproxy-container-puppet.yaml
OS::TripleO::Services::Keepalived: ../deployment/keepalived/keepalived-container-puppet.yaml
OS::TripleO::Services::OpenShift::Infra: ../deployment/openshift/openshift-infra-baremetal-ansible.yaml
OS::TripleO::Services::OpenShift::Master: ../deployment/openshift/openshift-master-baremetal-ansible.yaml
OS::TripleO::Services::OpenShift::Worker: ../deployment/openshift/openshift-worker-baremetal-ansible.yaml
OS::TripleO::Services::Podman: ../deployment/podman/podman-baremetal-ansible.yaml

View File

@ -88,12 +88,6 @@ parameter_defaults:
OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
OpenDaylightAdmin: {protocol: http, port: '8081', host: IP_ADDRESS}
OpenDaylightInternal: {protocol: http, port: '8081', host: IP_ADDRESS}
OpenshiftAdmin: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftInternal: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftPublic: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftRouterAdmin: {protocol: http, port: '80', host: IP_ADDRESS}
OpenshiftRouterInternal: {protocol: http, port: '80', host: IP_ADDRESS}
OpenshiftRouterPublic: {protocol: http, port: '80', host: IP_ADDRESS}
OvnDbInternal: {protocol: tcp, port: '6642', host: IP_ADDRESS}
PankoAdmin: {protocol: http, port: '8977', host: IP_ADDRESS}
PankoInternal: {protocol: http, port: '8977', host: IP_ADDRESS}

View File

@ -84,12 +84,6 @@ parameter_defaults:
OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
OpenDaylightAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
OpenDaylightInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
OpenshiftAdmin: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftInternal: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftPublic: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftRouterAdmin: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OpenshiftRouterInternal: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OpenshiftRouterPublic: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OvnDbInternal: {protocol: tcp, port: '6642', host: 'IP_ADDRESS'}
PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}

View File

@ -84,12 +84,6 @@ parameter_defaults:
OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
OpenDaylightAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
OpenDaylightInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
OpenshiftAdmin: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftInternal: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftPublic: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftRouterAdmin: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OpenshiftRouterInternal: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OpenshiftRouterPublic: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OvnDbInternal: {protocol: tcp, port: '6642', host: 'IP_ADDRESS'}
PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}

View File

@ -84,12 +84,6 @@ parameter_defaults:
OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
OpenDaylightAdmin: {protocol: 'https', port: '8081', host: 'CLOUDNAME'}
OpenDaylightInternal: {protocol: 'https', port: '8081', host: 'CLOUDNAME'}
OpenshiftAdmin: {protocol: 'https', port: '8443', host: 'CLOUDNAME'}
OpenshiftInternal: {protocol: 'https', port: '8443', host: 'CLOUDNAME'}
OpenshiftPublic: {protocol: 'https', port: '8443', host: 'CLOUDNAME'}
OpenshiftRouterAdmin: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
OpenshiftRouterInternal: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
OpenshiftRouterPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
OvnDbInternal: {protocol: tcp, port: '6642', host: 'IP_ADDRESS'}
PankoAdmin: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
PankoInternal: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}

View File

@ -264,24 +264,6 @@ NovaVNCProxy:
net_param: NovaApi
port: 6080
Openshift:
Internal:
net_param: OpenshiftMaster
Public:
net_param: Public
Admin:
net_param: OpenshiftMaster
port: 8443
OpenshiftRouter:
Internal:
net_param: OpenshiftInfra
Public:
net_param: Public
Admin:
net_param: OpenshiftInfra
port: 80
Swift:
Internal:
net_param: SwiftProxy

View File

@ -88,12 +88,6 @@ parameters:
OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
OpenDaylightAdmin: {protocol: http, port: '8081', host: IP_ADDRESS}
OpenDaylightInternal: {protocol: http, port: '8081', host: IP_ADDRESS}
OpenshiftAdmin: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftInternal: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftPublic: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftRouterAdmin: {protocol: http, port: '80', host: IP_ADDRESS}
OpenshiftRouterInternal: {protocol: http, port: '80', host: IP_ADDRESS}
OpenshiftRouterPublic: {protocol: http, port: '80', host: IP_ADDRESS}
OvnDbInternal: {protocol: tcp, port: '6642', host: IP_ADDRESS}
PankoAdmin: {protocol: http, port: '8977', host: IP_ADDRESS}
PankoInternal: {protocol: http, port: '8977', host: IP_ADDRESS}
@ -6804,492 +6798,6 @@ outputs:
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenDaylightInternal, port]
OpenshiftAdmin:
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftMasterNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
get_param: [EndpointMap, OpenshiftAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
port:
get_param: [EndpointMap, OpenshiftAdmin, port]
protocol:
get_param: [EndpointMap, OpenshiftAdmin, protocol]
uri:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftAdmin, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftMasterNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftAdmin, port]
uri_no_suffix:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftAdmin, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftMasterNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftAdmin, port]
OpenshiftInternal:
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftMasterNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
get_param: [EndpointMap, OpenshiftInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
port:
get_param: [EndpointMap, OpenshiftInternal, port]
protocol:
get_param: [EndpointMap, OpenshiftInternal, protocol]
uri:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftInternal, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftMasterNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftInternal, port]
uri_no_suffix:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftInternal, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftMasterNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftMasterNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftInternal, port]
OpenshiftPublic:
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftPublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
get_param: [EndpointMap, OpenshiftPublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- get_param: [ServiceNetMap, PublicNetwork]
port:
get_param: [EndpointMap, OpenshiftPublic, port]
protocol:
get_param: [EndpointMap, OpenshiftPublic, protocol]
uri:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftPublic, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftPublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftPublic, port]
uri_no_suffix:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftPublic, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftPublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftPublic, port]
OpenshiftRouterAdmin:
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftInfraNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
port:
get_param: [EndpointMap, OpenshiftRouterAdmin, port]
protocol:
get_param: [EndpointMap, OpenshiftRouterAdmin, protocol]
uri:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftRouterAdmin, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftInfraNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftRouterAdmin, port]
uri_no_suffix:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftRouterAdmin, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftInfraNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftRouterAdmin, port]
OpenshiftRouterInternal:
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftInfraNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
port:
get_param: [EndpointMap, OpenshiftRouterInternal, port]
protocol:
get_param: [EndpointMap, OpenshiftRouterInternal, protocol]
uri:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftRouterInternal, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftInfraNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftRouterInternal, port]
uri_no_suffix:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftRouterInternal, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, OpenshiftInfraNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, OpenshiftInfraNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftRouterInternal, port]
OpenshiftRouterPublic:
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterPublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterPublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- get_param: [ServiceNetMap, PublicNetwork]
port:
get_param: [EndpointMap, OpenshiftRouterPublic, port]
protocol:
get_param: [EndpointMap, OpenshiftRouterPublic, protocol]
uri:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftRouterPublic, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterPublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftRouterPublic, port]
uri_no_suffix:
make_url:
scheme:
get_param: [EndpointMap, OpenshiftRouterPublic, protocol]
host:
str_replace:
template:
get_param: [EndpointMap, OpenshiftRouterPublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
port:
get_param: [EndpointMap, OpenshiftRouterPublic, port]
OvnDbInternal:
host:
str_replace:

View File

@ -92,8 +92,6 @@ parameters:
DesignateApiNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
BINDNetwork: {{ _service_nets.get('external', 'ctlplane') }}
EtcdNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
OpenshiftMasterNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
OpenshiftInfraNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
# HaproxyNetwork currently only controls the haproxy.stats network binding
HaproxyNetwork: ctlplane
# We special-case the default ResolveNetwork for the Ceph roles

View File

@ -1,75 +0,0 @@
# List of networks, used for j2 templating of enabled networks
#
# Supported values:
#
# name: Name of the network (mandatory)
# name_lower: lowercase version of name used for filenames
# (optional, defaults to name.lower())
# service_net_map_replace: if name_lower is set to a custom name this should be set
# to original default (optional). This field is only necessary when
# changing the default network names, not when adding a new custom network.
# enabled: Is the network enabled (optional, defaults to true)
# NOTE: False will use noop.yaml for unused legacy networks to support upgrades.
# vlan: vlan for the network (optional)
# vip: Enable creation of a virtual IP on this network
# ip_subnet: IP/CIDR, e.g. '192.168.24.0/24' or '2001:db8:fd00:1000::/64'
# (optional, may use parameter defaults instead)
# allocation_pools: IP range list e.g. [{'start':'10.0.0.4', 'end':'10.0.0.250'}]
# gateway_ip: gateway for the network (optional, may use parameter defaults)
# routes: Optional, list of networks that should be routed via network gateway.
# Example: [{'destination':'10.0.0.0/16', 'nexthop':'10.0.0.1'}]
# A single /16 supernet route could be used for 255 smaller /24 subnets.
# ipv6_subnet: Optional, sets default IPv6 subnet if IPv4 is already defined.
# ipv6_allocation_pools: Set default IPv6 allocation pools if IPv4 allocation pools
# are already defined.
# gateway_ipv6: Set an IPv6 gateway if IPv4 gateway already defined.
# ipv6: If ip_subnet not defined, this specifies that the network is IPv6-only.
# NOTE: IP-related values set parameter defaults in templates, may be overridden,
# either by operators, or e.g in environments/network-isolation-v6.yaml where we
# set some default IPv6 addresses.
# compat_name: for existing stack you may need to override the default
# transformation for the resource's name.
#
# Example:
# - name Example
# vip: false
# ip_subnet: '10.0.2.0/24'
# allocation_pools: [{'start': '10.0.2.4', 'end': '10.0.2.250'}]
# gateway_ip: '10.0.2.254'
#
# To support backward compatibility, two versions of the network definitions
# will be created, network/<network>.yaml and network/<network>_v6.yaml. Only
# one of these files may be used in the deployment at a time, since the
# parameters used for configuration are the same in both files. In the
# future, this behavior may be changed to create only one file for custom
# networks. You may specify IPv6 addresses for ip_subnet, allocation_pools,
# and gateway_ip if no IPv4 addresses are used for a custom network, or set
# ipv6: true, and the network/<network>.yaml file will be configured as IPv6.
#
# For configuring both IPv4 and IPv6 on the same interface, use two separate
# networks, and then assign both IPs to the same interface in a custom NIC
# configuration templates.
#
# The ordering of the networks below will determine the order in which NICs
# are assigned in the network/config/multiple-nics templates, beginning with
# NIC2, Control Plane is always NIC1.
- name: Storage
vip: true
vlan: 30
name_lower: storage
ip_subnet: '172.16.1.0/24'
allocation_pools: [{'start': '172.16.1.4', 'end': '172.16.1.250'}]
- name: InternalApi
name_lower: internal_api
vip: true
vlan: 20
ip_subnet: '172.16.2.0/24'
allocation_pools: [{'start': '172.16.2.4', 'end': '172.16.2.250'}]
- name: External
vip: true
name_lower: external
vlan: 10
ip_subnet: '10.0.0.0/24'
allocation_pools: [{'start': '10.0.0.4', 'end': '10.0.0.250'}]
gateway_ip: '10.0.0.1'

View File

@ -0,0 +1,11 @@
---
other:
- |
OpenShift deployed by TripleO support has been removed in a downstream
version of Stein which make the upstream support difficult to maintain.
OpenShift can be deployed using OpenShift-Ansible and
users who desire to deploy OpenShift 3.11 onto bare metal nodes can
still do so using openshift-ansible directly. The provisioning of
the Operating System on baremetal can be done with OpenStack Ironic on
the Overcloud or also can be done with deployed-servers, achieving the
same result.

View File

@ -1,42 +0,0 @@
###############################################################################
# Role: OpenShiftAllInOne #
###############################################################################
- name: OpenShiftAllInOne
description: |
OpenShiftAllInOne role
CountDefault: 1
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-all-in-one'
DockerSkipUpdateReconfiguration: true
tags:
- primary
- controller
- openshift
networks:
External:
subnet: external_subnet
InternalApi:
subnet: internal_api_subnet
Storage:
subnet: storage_subnet
# For systems with both IPv4 and IPv6, you may specify a gateway network for
# each, such as ['ControlPlane', 'External']
default_route_networks: ['External']
update_serial: 25
ServicesDefault:
- OS::TripleO::Services::BootParams
- OS::TripleO::Services::Docker
- OS::TripleO::Services::OpenShift::GlusterFS
- OS::TripleO::Services::OpenShift::Infra
- OS::TripleO::Services::OpenShift::Master
- OS::TripleO::Services::OpenShift::Worker
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Rhsm
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::TripleoPackages
# NOTE(mandre) In all-in-one HAproxy conflicts with the one openshift deploys
# - OS::TripleO::Services::HAproxy
# - OS::TripleO::Services::Keepalived

View File

@ -1,31 +0,0 @@
###############################################################################
# Role: OpenShiftInfra #
###############################################################################
- name: OpenShiftInfra
description: |
OpenShiftInfra role, a specialized worker that only runs infra pods.
CountDefault: 1
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-infra'
DockerSkipUpdateReconfiguration: true
tags:
- openshift
networks:
InternalApi:
subnet: internal_api_subnet
Storage:
subnet: storage_subnet
# For systems with both IPv4 and IPv6, you may specify a gateway network for
# each, such as ['ControlPlane', 'External']
default_route_networks: ['ControlPlane']
update_serial: 25
ServicesDefault:
- OS::TripleO::Services::BootParams
- OS::TripleO::Services::Docker
- OS::TripleO::Services::OpenShift::GlusterFS
- OS::TripleO::Services::OpenShift::Infra
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Rhsm
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::TripleoFirewall

View File

@ -1,38 +0,0 @@
###############################################################################
# Role: OpenShiftMaster #
###############################################################################
- name: OpenShiftMaster
description: |
OpenShiftMaster role
CountDefault: 1
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-master'
DockerSkipUpdateReconfiguration: true
tags:
- primary
- controller
- openshift
networks:
External:
subnet: external_subnet
InternalApi:
subnet: internal_api_subnet
Storage:
subnet: storage_subnet
# For systems with both IPv4 and IPv6, you may specify a gateway network for
# each, such as ['ControlPlane', 'External']
default_route_networks: ['External']
update_serial: 25
ServicesDefault:
- OS::TripleO::Services::BootParams
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Docker
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::OpenShift::Master
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Rhsm
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::TripleoPackages

View File

@ -1,31 +0,0 @@
###############################################################################
# Role: OpenShiftWorker #
###############################################################################
- name: OpenShiftWorker
description: |
OpenShiftWorker role
CountDefault: 1
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-compute'
DockerSkipUpdateReconfiguration: true
tags:
- openshift
networks:
InternalApi:
subnet: internal_api_subnet
Storage:
subnet: storage_subnet
# For systems with both IPv4 and IPv6, you may specify a gateway network for
# each, such as ['ControlPlane', 'External']
default_route_networks: ['ControlPlane']
update_serial: 25
ServicesDefault:
- OS::TripleO::Services::BootParams
- OS::TripleO::Services::Docker
- OS::TripleO::Services::OpenShift::GlusterFS
- OS::TripleO::Services::OpenShift::Worker
- OS::TripleO::Services::Podman
- OS::TripleO::Services::Rhsm
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::TripleoFirewall

View File

@ -192,12 +192,6 @@ environments:
OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
OpenDaylightAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
OpenDaylightInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
OpenshiftAdmin: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftInternal: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftPublic: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftRouterAdmin: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OpenshiftRouterInternal: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OpenshiftRouterPublic: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OvnDbInternal: {protocol: tcp, port: '6642', host: 'IP_ADDRESS'}
PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
@ -309,12 +303,6 @@ environments:
OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
OpenDaylightAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
OpenDaylightInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
OpenshiftAdmin: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftInternal: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftPublic: {protocol: 'http', port: '8443', host: 'IP_ADDRESS'}
OpenshiftRouterAdmin: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OpenshiftRouterInternal: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OpenshiftRouterPublic: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
OvnDbInternal: {protocol: tcp, port: '6642', host: 'IP_ADDRESS'}
PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
@ -426,12 +414,6 @@ environments:
OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
OpenDaylightAdmin: {protocol: 'https', port: '8081', host: 'CLOUDNAME'}
OpenDaylightInternal: {protocol: 'https', port: '8081', host: 'CLOUDNAME'}
OpenshiftAdmin: {protocol: 'https', port: '8443', host: 'CLOUDNAME'}
OpenshiftInternal: {protocol: 'https', port: '8443', host: 'CLOUDNAME'}
OpenshiftPublic: {protocol: 'https', port: '8443', host: 'CLOUDNAME'}
OpenshiftRouterAdmin: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
OpenshiftRouterInternal: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
OpenshiftRouterPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
OvnDbInternal: {protocol: tcp, port: '6642', host: 'IP_ADDRESS'}
PankoAdmin: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
PankoInternal: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
@ -553,12 +535,6 @@ environments:
OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
OpenDaylightAdmin: {protocol: http, port: '8081', host: IP_ADDRESS}
OpenDaylightInternal: {protocol: http, port: '8081', host: IP_ADDRESS}
OpenshiftAdmin: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftInternal: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftPublic: {protocol: http, port: '8443', host: IP_ADDRESS}
OpenshiftRouterAdmin: {protocol: http, port: '80', host: IP_ADDRESS}
OpenshiftRouterInternal: {protocol: http, port: '80', host: IP_ADDRESS}
OpenshiftRouterPublic: {protocol: http, port: '80', host: IP_ADDRESS}
OvnDbInternal: {protocol: tcp, port: '6642', host: IP_ADDRESS}
PankoAdmin: {protocol: http, port: '8977', host: IP_ADDRESS}
PankoInternal: {protocol: http, port: '8977', host: IP_ADDRESS}

View File

@ -1130,11 +1130,8 @@ def validate(filename, param_map):
# NOTE(hjensas): The routed network data example is very different ...
# We need to develop a more advanced validator, probably using a schema
# definition instead.
# NOTE(mandre): Same goes for the openshift network data where it
# contains only a subset of the overcloud networks.
if (filename.startswith('./network_data_') and
not filename.endswith(('routed.yaml',
'openshift.yaml',
'undercloud.yaml'))):
result = validate_network_data_file(filename)
retval |= result

View File

@ -75,22 +75,6 @@
- ^deployed-server/.*$
- ^common/.*$
- zuul.d/*
- tripleo-ci-centos-7-scenario009-multinode-oooq-container:
dependencies: *deps_unit_lint
files:
- ^coe/openshift.*$
- ^environments/openshift.*$
- ^deployment/openshift/.*$
- ^(deployment|docker|puppet)/.*haproxy.*$
- ^(deployment|docker|puppet)/.*keepalived.*$
- ^(deployment|docker|puppet)/.*mistral.*$
- ci/environments/scenario009-multinode.yaml
- ^ci/common/.*$
- ci/pingtests/scenario009-multinode.yaml
- ^network/endpoints/.*$
- ^deployed-server/.*$
- ^common/.*$
- zuul.d/*
- tripleo-ci-centos-7-scenario010-multinode-oooq-container:
dependencies: *deps_unit_lint
files: