Don't hardcode OC user name for various checks.

During oc upgrade we check if UC is configured with SSL, and
if yes - check that OC nodes can access it.
The user we have is hardcoded to 'heat-admin' which doesn't work
for pre-provisioned scenarios.

Change-Id: I9e22373a7be48e707b393023cac24ece94546228
(cherry picked from commit bb6980d388)
This commit is contained in:
Yurii Prokulevych 2018-06-04 14:38:08 +02:00 committed by Lukas Bezdicka
parent 2d32930020
commit 8dfdb40d1d
6 changed files with 25 additions and 16 deletions

View File

@ -12,8 +12,10 @@
register: ctrl_ip
- name: test undercloud keystone reachability
vars:
oc_user: "{{ (overcloud_ssh_user == '') | ternary('heat-admin', overcloud_ssh_user) }}"
shell: |
ssh -q -o StrictHostKeyChecking=no heat-admin@{{ ctrl_ip.stdout }} curl --silent {{ keystone_endpoint.stdout }}
ssh -q -o StrictHostKeyChecking=no {{ oc_user }}@{{ ctrl_ip.stdout }} curl --silent {{ keystone_endpoint.stdout }}
register: uc_keystone_conn
ignore_errors: true
@ -87,9 +89,11 @@
register: node_ip
- name: copy certificate to the overcloud nodes and update the trusted store
vars:
oc_user: "{{ (overcloud_ssh_user == '') | ternary('heat-admin', overcloud_ssh_user) }}"
shell: |
scp -q -o StrictHostKeyChecking=no {{ working_dir }}/undercloud.pem heat-admin@{{ item }}:
ssh -q -o StrictHostKeyChecking=no heat-admin@{{ item }} 'sudo cp undercloud.pem /etc/pki/ca-trust/source/anchors/; sudo update-ca-trust extract'
scp -q -o StrictHostKeyChecking=no {{ working_dir }}/undercloud.pem {{ oc_user }}@{{ item }}:
ssh -q -o StrictHostKeyChecking=no {{ oc_user }}@{{ item }} 'sudo cp undercloud.pem /etc/pki/ca-trust/source/anchors/; sudo update-ca-trust extract'
with_items:
- "{{ node_ip.stdout_lines }}"
when: uc_keystone_conn|failed

View File

@ -1,4 +1,5 @@
source {{ undercloud_rc }}
OC_USER="{{ (overcloud_ssh_user == '') | ternary('heat-admin', overcloud_ssh_user) }}"
NODE_IP=$(openstack server show {{ node_name | splitext | first }} -f json | jq -r .addresses | grep -oP '[0-9.]+')
## wait for galera resource to come back up
@ -6,7 +7,7 @@ timeout_seconds={{ node_reboot_timeout }}
elapsed_seconds=0
while true; do
echo "Waiting for galera pcs resource to start"
GALERA_RES=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo pcs status --full' | grep ocf::heartbeat:galera | grep -vi FAILED | grep -i master | wc -l)
GALERA_RES=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo pcs status --full' | grep ocf::heartbeat:galera | grep -vi FAILED | grep -i master | wc -l)
if [[ $GALERA_RES = 1 ]] || [[ $GALERA_RES > 2 ]]; then
echo "${GALERA_RES} instances of galera are started"
break
@ -15,17 +16,17 @@ while true; do
(( elapsed_seconds += 3 ))
if [ $elapsed_seconds -ge $timeout_seconds ]; then
echo "WARNING: galera pcs resource didn't get started after reboot. Trying to workaround BZ#1499677"
GVWSTATE_SIZE=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo touch /var/lib/mysql/gvwstate.dat; sudo wc -c /var/lib/mysql/gvwstate.dat' | awk {'print $1'})
GVWSTATE_SIZE=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo touch /var/lib/mysql/gvwstate.dat; sudo wc -c /var/lib/mysql/gvwstate.dat' | awk {'print $1'})
if [ $GVWSTATE_SIZE -eq 0 ]; then
echo "Removing gvwstate.dat"
ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo rm -f /var/lib/mysql/gvwstate.dat'
ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo rm -f /var/lib/mysql/gvwstate.dat'
echo "Cleanup galera resource"
ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo pcs resource cleanup galera'
ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo pcs resource cleanup galera'
timeout_seconds={{ node_reboot_timeout }}
elapsed_seconds=0
while true; do
echo "Waiting for galera pcs resource to start"
GALERA_RES=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo pcs status --full' | grep ocf::heartbeat:galera | grep -i master | wc -l)
GALERA_RES=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo pcs status --full' | grep ocf::heartbeat:galera | grep -i master | wc -l)
if [[ $GALERA_RES = 1 ]] || [[ $GALERA_RES > 2 ]]; then
break
fi

View File

@ -1,15 +1,16 @@
source {{ undercloud_rc }}
OC_USER="{{ (overcloud_ssh_user == '') | ternary('heat-admin', overcloud_ssh_user) }}"
NODE_IP=$(openstack server show {{ node_name | splitext | first }} -f json | jq -r .addresses | grep -oP '[0-9.]+')
## in case of external loadbalancer haproxy resource is not running on controller nodes
EXT_LB=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo hiera -c /etc/puppet/hiera.yaml enable_load_balancer')
EXT_LB=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo hiera -c /etc/puppet/hiera.yaml enable_load_balancer')
if [[ $EXT_LB != 'false' ]]; then
## wait for haproxy resource to come back up
timeout_seconds={{ node_reboot_timeout }}
elapsed_seconds=0
while true; do
echo "Waiting for haproxy pcs resource to start"
HAPROXY_RES=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo pcs status --full' | grep haproxy-bundle | grep -i started | wc -l)
HAPROXY_RES=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo pcs status --full' | grep haproxy-bundle | grep -i started | wc -l)
if [[ $HAPROXY_RES = 1 ]] || [[ $HAPROXY_RES > 2 ]]; then
echo "${HAPROXY_RES} instances of haproxy-bundle are started"
break

View File

@ -1,4 +1,5 @@
source {{ undercloud_rc }}
OC_USER="{{ (overcloud_ssh_user == '') | ternary('heat-admin', overcloud_ssh_user) }}"
NODE_IP=$(openstack server show {{ node_name | splitext | first }} -f json | jq -r .addresses | grep -oP '[0-9.]+')
## wait for rabbitmq resource to come back up
@ -6,7 +7,7 @@ timeout_seconds={{ node_reboot_timeout }}
elapsed_seconds=0
while true; do
echo "Waiting for rabbitmq pcs resource to start"
RABBIT_RES=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo pcs status --full' | grep ocf::heartbeat:rabbitmq-cluster | grep -vi FAILED | grep -i started | wc -l)
RABBIT_RES=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo pcs status --full' | grep ocf::heartbeat:rabbitmq-cluster | grep -vi FAILED | grep -i started | wc -l)
if [[ $RABBIT_RES = 1 ]] || [[ $RABBIT_RES > 2 ]]; then
echo "${RABBIT_RES} instances of rabbitmq pcs resource are started"
break

View File

@ -1,9 +1,10 @@
source {{ undercloud_rc }}
OC_USER="{{ (overcloud_ssh_user == '') | ternary('heat-admin', overcloud_ssh_user) }}"
NODE_IP=$(openstack server show {{ node_name | splitext | first }} -f json | jq -r .addresses | grep -oP '[0-9.]+')
{% if controller_reboot %}
OVS_RUNNING=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo ovs-vsctl show' | grep ovs_version | awk -F \" {'print $2'} | awk -F "." '{print $1"."$2}')
OVS_INSTALLED=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo rpm --queryformat %{VERSION} -q openvswitch' | awk -F "." '{print $1"."$2}')
OVS_RUNNING=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo ovs-vsctl show' | grep ovs_version | awk -F \" {'print $2'} | awk -F "." '{print $1"."$2}')
OVS_INSTALLED=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo rpm --queryformat %{VERSION} -q openvswitch' | awk -F "." '{print $1"."$2}')
if [[ $OVS_RUNNING != $OVS_INSTALLED ]]; then
echo "Upgraded OVS detected"
fi
@ -13,7 +14,7 @@ NOVA_ID=$(openstack server list | grep {{ node_name | splitext | first }} | awk
IRONIC_ID=$(ironic node-list | grep $NOVA_ID | awk {'print $2'})
ironic node-set-power-state $IRONIC_ID reboot
{% else %}
ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo shutdown -r now'
ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo shutdown -r now'
{% endif %}
timeout_seconds={{ node_reboot_timeout }}
@ -38,7 +39,7 @@ timeout_seconds={{ node_reboot_timeout }}
elapsed_seconds=0
while true; do
echo "Waiting for {{ node_name }} to boot ..."
PCS_STATUS=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo pcs status' | grep ^Online)
PCS_STATUS=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo pcs status' | grep ^Online)
if [[ $PCS_STATUS == *{{ node_name }}* ]]; then
break
fi

View File

@ -1,4 +1,5 @@
source {{ undercloud_rc }}
OC_USER="{{ (overcloud_ssh_user == '') | ternary('heat-admin', overcloud_ssh_user) }}"
NODE_IP=$(openstack server show {{ node_name | splitext | first }} -f json | jq -r .addresses | grep -oP '[0-9.]+')
## wait for redis resource to come back up
@ -6,7 +7,7 @@ timeout_seconds={{ node_reboot_timeout }}
elapsed_seconds=0
while true; do
echo "Waiting for redis pcs resource to start"
REDIS_RES=$(ssh -q -o StrictHostKeyChecking=no heat-admin@$NODE_IP 'sudo pcs status --full' | grep ocf::heartbeat:redis | grep -vi FAILED | grep -i master | wc -l)
REDIS_RES=$(ssh -q -o StrictHostKeyChecking=no $OC_USER@$NODE_IP 'sudo pcs status --full' | grep ocf::heartbeat:redis | grep -vi FAILED | grep -i master | wc -l)
if [[ $REDIS_RES = 1 ]]; then
echo "Redis master is ready"
break