
In discussion of other changes, I realised that the bridge bootstrap job is running via zuul/run-production-playbook.yaml. This means it uses the Ansible installed on bridge to run against itself -- which isn't much of a bootstrap. What should happen is that the bootstrap-bridge.yaml playbook, which sets up ansible and keys on the bridge node, should run directly from the executor against the bridge node. To achieve this we reparent the job to opendev-infra-prod-setup-keys, which sets up the executor to be able to log into the bridge node. We then add the host dynamically and run the bootstrap-bridge.yaml playbook against it. This is similar to the gate testing path; where bootstrap-bridge.yaml is run from the exeuctor against the ephemeral bridge testing node before the nested-Ansible is used. The root key deployment is updated to use the nested Ansible directly, so that it can read the variable from the on-host secrets. Change-Id: Iebaeed5028050d890ab541818f405978afd60124
55 lines
1.8 KiB
YAML
55 lines
1.8 KiB
YAML
- hosts: bridge.openstack.org:!disabled
|
|
name: "Bridge: configure the bastion host"
|
|
roles:
|
|
- iptables
|
|
- edit-secrets-script
|
|
- install-docker
|
|
tasks:
|
|
# Skip as no arm64 support available; only used for gate testing,
|
|
# where we can't mix arm64 and x86 nodes, so need a minimally
|
|
# working bridge to drive the tests for mirrors/nodepool
|
|
# etc. things.
|
|
- name: Install openshift/kubectl
|
|
when: ansible_architecture != 'aarch64'
|
|
block:
|
|
- include_role:
|
|
name: install-osc-container
|
|
- include_role:
|
|
name: install-kubectl
|
|
- include_role:
|
|
name: configure-kubectl
|
|
|
|
- include_role:
|
|
name: configure-openstacksdk
|
|
vars:
|
|
openstacksdk_config_template: clouds/bridge_all_clouds.yaml.j2
|
|
|
|
- name: Get rid of all-clouds.yaml
|
|
file:
|
|
state: absent
|
|
path: '/etc/openstack/all-clouds.yaml'
|
|
|
|
- name: Install rackspace DNS backup tool
|
|
include_role:
|
|
name: rax-dns-backup
|
|
|
|
- name: Automated Zuul cluster reboots and updates
|
|
# Note this is run via cron because a zuul job can't run this playbook
|
|
# as the playbook relies on all jobs ending for graceful stops on the
|
|
# executors.
|
|
cron:
|
|
name: "Zuul cluster restart"
|
|
# Start Sundays at 00:01 UTC.
|
|
# Estimated completion time Sunday at 18:00 UTC.
|
|
minute: 1
|
|
hour: 0
|
|
weekday: 6
|
|
job: "flock -n /var/run/zuul_reboot.lock /usr/local/bin/ansible-playbook -f 20 /home/zuul/src/opendev.org/opendev/system-config/playbooks/zuul_reboot.yaml >> /var/log/ansible/zuul_reboot.log 2>&1"
|
|
|
|
- name: Rotate Zuul restart logs
|
|
include_role:
|
|
name: logrotate
|
|
vars:
|
|
logrotate_file_name: /var/log/ansible/zuul_reboot.log
|
|
logrotate_frequency: weekly
|