9e2d9f6aef
Currently "openstack" command on bridge doesn't work, because we need cinder client pinned to an older version for RAX support. The upstream container uses the latest versions of everything and it fails to parse the "volume_api_version: 2" pin for RAX in the config file. In general, the version of openstackclient we can probably most likely rely on to work is the one from the launch-node virtualenv. It also means we just have one place to manage a broadly-compatible version, instead of trying to manage versions in separate containers, etc. This converts the /usr/local/bin/openstack command from calling into the container, to calling into the launch venv. Change-Id: I604d5c17268a8219d51d432ba21feeb2e752a693
67 lines
2.4 KiB
YAML
67 lines
2.4 KiB
YAML
- hosts: bastion:!disabled
|
|
name: "Bridge: configure the bastion host"
|
|
roles:
|
|
- iptables
|
|
- edit-secrets-script
|
|
- install-docker
|
|
tasks:
|
|
# Skip as no arm64 support available; only used for gate testing,
|
|
# where we can't mix arm64 and x86 nodes, so need a minimally
|
|
# working bridge to drive the tests for mirrors/nodepool
|
|
# etc. things.
|
|
- name: Install openshift/kubectl
|
|
when: ansible_architecture != 'aarch64'
|
|
block:
|
|
- include_role:
|
|
name: install-kubectl
|
|
- include_role:
|
|
name: configure-kubectl
|
|
|
|
- include_role:
|
|
name: configure-openstacksdk
|
|
vars:
|
|
openstacksdk_config_template: clouds/bridge_all_clouds.yaml.j2
|
|
|
|
- name: Get rid of all-clouds.yaml
|
|
file:
|
|
state: absent
|
|
path: '/etc/openstack/all-clouds.yaml'
|
|
|
|
- name: Install rackspace DNS backup tool
|
|
include_role:
|
|
name: rax-dns-backup
|
|
|
|
# NOTE: we have hard-coded the active bridge here because we only want
|
|
# to install this on the currently active production bridge that will
|
|
# execute this reboot cycle (we don't have two bastion hosts usually,
|
|
# but if we are bootstrapping a new one there may be a period where
|
|
# both have credentials). For testing we also allow it to install on
|
|
# the system-config-run host -- but it will not have the credentials
|
|
# to actually do anything there if it does fire.
|
|
- hosts: bridge01.opendev.org:bridge99.opendev.org:!disabled
|
|
name: Install reboot jobs
|
|
tasks:
|
|
- name: Automated Zuul cluster reboots and updates
|
|
# Note this is run via cron because a zuul job can't run this playbook
|
|
# as the playbook relies on all jobs ending for graceful stops on the
|
|
# executors.
|
|
cron:
|
|
name: "Zuul cluster restart"
|
|
# Start Sundays at 00:01 UTC.
|
|
# Estimated completion time Sunday at 18:00 UTC.
|
|
minute: 1
|
|
hour: 0
|
|
weekday: 6
|
|
job: "flock -n /var/run/zuul_reboot.lock /usr/local/bin/ansible-playbook -f 20 /home/zuul/src/opendev.org/opendev/system-config/playbooks/zuul_reboot.yaml >> /var/log/ansible/zuul_reboot.log 2>&1"
|
|
|
|
- name: Rotate Zuul restart logs
|
|
include_role:
|
|
name: logrotate
|
|
vars:
|
|
logrotate_file_name: /var/log/ansible/zuul_reboot.log
|
|
logrotate_frequency: weekly
|
|
|
|
- name: Install node launcher
|
|
include_role:
|
|
name: install-launch-node
|