project-config/nodepool/nl04.openstack.org.yaml
Ian Wienand 19e7cf09d9 Disable OVH BHS1 region
This reverts commit 756a8f43f7c5458fb7b4c831e6c1ef65918dc6f6, which
was where we re-enabled OVH BHS1 after maintenance.  I strongly
suspect that this has something to do with the issues ...

It appears that VM's in BHS1 can not communicate with the mirror

From a sample host 158.69.64.62 to mirror01.bhs1.ovh.openstack.org

---
 root@ubuntu-bionic-ovh-bhs1-0002154210:~# ip addr
 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
 2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether fa:16:3e:1b:4b:32 brd ff:ff:ff:ff:ff:ff
    inet 158.69.64.62/19 brd 158.69.95.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe1b:4b32/64 scope link
       valid_lft forever preferred_lft forever

 root@ubuntu-bionic-ovh-bhs1-0002154210:~# traceroute -n mirror01.bhs1.ovh.openstack.org
 traceroute to mirror01.bhs1.ovh.openstack.org (158.69.80.87), 30 hops max, 60 byte packets
  1  158.69.64.62  2140.650 ms !H  2140.627 ms !H  2140.615 ms !H

 root@ubuntu-bionic-ovh-bhs1-0002154210:~# ping mirror01.bhs1.ovh.openstack.org
 PING mirror01.bhs1.ovh.openstack.org (158.69.80.87) 56(84) bytes of data.
 From ubuntu-bionic-ovh-bhs1-0002154210 (158.69.64.62) icmp_seq=1 Destination Host Unreachable
 From ubuntu-bionic-ovh-bhs1-0002154210 (158.69.64.62) icmp_seq=2 Destination Host Unreachable
 From ubuntu-bionic-ovh-bhs1-0002154210 (158.69.64.62) icmp_seq=3 Destination Host Unreachable
 --- mirror01.bhs1.ovh.openstack.org ping statistics ---
 4 packets transmitted, 0 received, +3 errors, 100% packet loss, time 3049ms
---

However, *external* access to the mirror host and all other hosts
seems fine.  It appears to be an internal OVH BHS1 networking issue.

I have raised ticket #9721374795 with OVH about this issue.  It needs
to be escalated so is currently pending (further details should come
to infra-root@openstack.org).

In the mean time, all jobs are failing in the region.  Disable it
until we have a solution.

Change-Id: I748ca1c10d98cc2d7acf2e1821d4d0f886db86eb
2018-09-20 15:55:45 +10:00

129 lines
3.7 KiB
YAML

elements-dir: /etc/nodepool/elements
images-dir: /opt/nodepool_dib
zookeeper-servers:
- host: nodepool.openstack.org
port: 2181
# NOTE(pabelanger): To avoid a race conditions between multiple launchers, only
# nl01.o.o will manage min-ready of our labels. If nl01.o.o is ever disabled,
# another launcher will need to assume this logic.
labels:
- name: centos-7
- name: debian-stretch
- name: fedora-28
- name: gentoo-17-0-systemd
- name: opensuse-423
- name: opensuse-150
- name: opensuse-tumbleweed
- name: ubuntu-bionic
- name: ubuntu-trusty
- name: ubuntu-xenial
providers:
- name: ovh-bhs1
region-name: 'BHS1'
cloud: ovh
boot-timeout: 120
launch-timeout: 600
rate: 0.1
diskimages: &provider_diskimages
- name: centos-7
config-drive: true
- name: debian-stretch
config-drive: true
- name: fedora-28
config-drive: true
- name: gentoo-17-0-systemd
config-drive: true
- name: opensuse-423
config-drive: true
- name: opensuse-150
config-drive: true
- name: opensuse-tumbleweed
config-drive: true
- name: ubuntu-bionic
config-drive: true
- name: ubuntu-trusty
config-drive: true
- name: ubuntu-xenial
config-drive: true
pools:
- name: main
max-servers: 0
labels: &provider_pools_labels
- name: centos-7
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: centos-7
key-name: infra-root-keys-2018-06-15
- name: debian-stretch
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: debian-stretch
key-name: infra-root-keys-2018-06-15
- name: fedora-28
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: fedora-28
key-name: infra-root-keys-2018-06-15
- name: gentoo-17-0-systemd
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: gentoo-17-0-systemd
key-name: infra-root-keys-2018-06-15
- name: opensuse-423
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: opensuse-423
key-name: infra-root-keys-2018-06-15
- name: opensuse-150
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: opensuse-150
key-name: infra-root-keys-2018-06-15
- name: opensuse-tumbleweed
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: opensuse-tumbleweed
key-name: infra-root-keys-2018-06-15
- name: ubuntu-bionic
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: ubuntu-bionic
key-name: infra-root-keys-2018-06-15
- name: ubuntu-trusty
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: ubuntu-trusty
key-name: infra-root-keys-2018-06-15
- name: ubuntu-xenial
min-ram: 8000
flavor-name: ssd-osFoundation-3
diskimage: ubuntu-xenial
key-name: infra-root-keys-2018-06-15
- name: ovh-gra1
region-name: 'GRA1'
cloud: ovh
boot-timeout: 120
launch-timeout: 600
rate: 0.1
diskimages: *provider_diskimages
pools:
- name: main
max-servers: 79
labels: *provider_pools_labels
diskimages:
- name: centos-7
- name: debian-stretch
- name: fedora-28
- name: gentoo-17-0-systemd
- name: opensuse-423
- name: opensuse-150
- name: opensuse-tumbleweed
- name: ubuntu-bionic
- name: ubuntu-trusty
- name: ubuntu-xenial