Use Heat template to verify tests
This commit introduces the usage of heat to verify each HA test made. The test template involves: - Orchestration (heat); - Volumes (cinder); - Images (glance); - Network, public and private (neutron); - Server (nova); Summary of what this commit does: - Get rid of the workarounds; - Get rid of undercloud options and its ha-test-suite local copy (now useless); - Get rid of the old environment file needed to spawn instances; - Get rid of instance test from ha-test-suite; - Add Heat template verification method; Change-Id: I2dd9d67f494717654e39c60ac5fb067afb9e1835
This commit is contained in:
@@ -1,8 +1,8 @@
|
|||||||
validate-ha
|
validate-ha
|
||||||
===========
|
===========
|
||||||
|
|
||||||
This role acts on an already deployed tripleo environment, testing all HA
|
This role acts on an already deployed tripleo environment, testing HA related
|
||||||
related functionalities of the installation.
|
functionalities of the installation.
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
@@ -13,6 +13,7 @@ This role tests also instances spawning and to make this working the
|
|||||||
definition of the floating network must be passed.
|
definition of the floating network must be passed.
|
||||||
It can be contained in a config file, like this:
|
It can be contained in a config file, like this:
|
||||||
|
|
||||||
|
private_network_cidr: "192.168.1.0/24"
|
||||||
public_physical_network: "floating"
|
public_physical_network: "floating"
|
||||||
floating_ip_cidr: "10.0.0.0/24"
|
floating_ip_cidr: "10.0.0.0/24"
|
||||||
public_net_pool_start: "10.0.0.191"
|
public_net_pool_start: "10.0.0.191"
|
||||||
@@ -45,7 +46,6 @@ Start every systemd resource
|
|||||||
Start every systemd resource
|
Start every systemd resource
|
||||||
- **test_ha_ng_c**: Stop Galera and Rabbitmq, wait 20 minutes to see if
|
- **test_ha_ng_c**: Stop Galera and Rabbitmq, wait 20 minutes to see if
|
||||||
something fails
|
something fails
|
||||||
- **test_ha_instance**: Instance deployment (**all**)
|
|
||||||
|
|
||||||
It is also possible to omit (or add) tests not made for the specific release,
|
It is also possible to omit (or add) tests not made for the specific release,
|
||||||
using the above vars, by passing to the command line variables like this:
|
using the above vars, by passing to the command line variables like this:
|
||||||
@@ -55,8 +55,8 @@ using the above vars, by passing to the command line variables like this:
|
|||||||
-e test_ha_ng_a=true \
|
-e test_ha_ng_a=true \
|
||||||
...
|
...
|
||||||
|
|
||||||
In this case we will not check for failed actions (which is test that otherwise
|
In this case we will not check for failed actions, a test that otherwise would
|
||||||
will be done in mitaka) and we will force the execution of the "ng_a" test
|
have been done in mitaka, and we will force the execution of the "ng_a" test
|
||||||
described earlier, which is originally executed just in newton versions or
|
described earlier, which is originally executed just in newton versions or
|
||||||
above.
|
above.
|
||||||
|
|
||||||
@@ -71,6 +71,7 @@ described [here](https://github.com/redhat-openstack/tripleo-quickstart-utils/tr
|
|||||||
ansible-playbook /home/stack/tripleo-quickstart-utils/playbooks/overcloud-validate-ha.yml \
|
ansible-playbook /home/stack/tripleo-quickstart-utils/playbooks/overcloud-validate-ha.yml \
|
||||||
-e release=ocata \
|
-e release=ocata \
|
||||||
-e local_working_dir=/home/stack \
|
-e local_working_dir=/home/stack \
|
||||||
|
-e private_net_cidr="192.168.1.0/24" \
|
||||||
-e public_physical_network="floating" \
|
-e public_physical_network="floating" \
|
||||||
-e floating_ip_cidr="10.0.0.0/24" \
|
-e floating_ip_cidr="10.0.0.0/24" \
|
||||||
-e public_net_pool_start="10.0.0.191" \
|
-e public_net_pool_start="10.0.0.191" \
|
||||||
|
|||||||
@@ -4,10 +4,19 @@ working_dir: "/home/stack"
|
|||||||
validate_ha_logs_dir: "{{ working_dir }}/validate_ha_logs"
|
validate_ha_logs_dir: "{{ working_dir }}/validate_ha_logs"
|
||||||
overcloud_working_dir: "/home/heat-admin"
|
overcloud_working_dir: "/home/heat-admin"
|
||||||
|
|
||||||
environment_file: environment.j2
|
validate_ha_heat_environment: "validate-ha-heat-environment.yaml.j2"
|
||||||
apply_workarounds: false
|
validate_ha_heat_template: "validate-ha-heat-template.yaml.j2"
|
||||||
workarounds_script: workarounds.sh.j2
|
validate_ha_heat_instance_image_format: "qcow2"
|
||||||
|
validate_ha_heat_instance_image_location: "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img"
|
||||||
|
validate_ha_heat_instance_volume_gb: 1
|
||||||
|
|
||||||
|
private_net_name: "private-network"
|
||||||
|
private_subnet_name: "private-subnet"
|
||||||
|
public_net_name: "public-network"
|
||||||
|
public_subnet_name: "public-subnet"
|
||||||
|
private_net_cidr: "10.1.1.0/24"
|
||||||
|
public_physical_network: "datacentre"
|
||||||
|
public_network_type: "flat"
|
||||||
floating_ip_cidr: "{{ undercloud_network_cidr|default('192.0.2.0/24') }}"
|
floating_ip_cidr: "{{ undercloud_network_cidr|default('192.0.2.0/24') }}"
|
||||||
floating_ip_start: "{{ floating_ip_cidr|nthhost(100) }}"
|
floating_ip_start: "{{ floating_ip_cidr|nthhost(100) }}"
|
||||||
floating_ip_end: "{{ floating_ip_cidr|nthhost(120) }}"
|
floating_ip_end: "{{ floating_ip_cidr|nthhost(120) }}"
|
||||||
|
|||||||
46
roles/validate-ha/tasks/heat-validation.yml
Normal file
46
roles/validate-ha/tasks/heat-validation.yml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Load image in Glance to be used by Heat
|
||||||
|
shell: |
|
||||||
|
source {{ working_dir }}/overcloudrc
|
||||||
|
openstack image create \
|
||||||
|
--disk-format {{ validate_ha_heat_instance_image_format }} \
|
||||||
|
--file {{ working_dir }}/{{ heat_image_name }} \
|
||||||
|
--format value \
|
||||||
|
--column "id" \
|
||||||
|
validate_ha_image
|
||||||
|
|
||||||
|
- name: Execute environment validation via Heat
|
||||||
|
shell: |
|
||||||
|
source {{ working_dir }}/overcloudrc
|
||||||
|
openstack stack create \
|
||||||
|
--environment validate-ha-heat-environment.yaml \
|
||||||
|
--template validate-ha-heat-template.yaml \
|
||||||
|
--wait \
|
||||||
|
{{ stack_name }}
|
||||||
|
|
||||||
|
- name: Get instance IP
|
||||||
|
shell: |
|
||||||
|
source {{ working_dir }}/overcloudrc
|
||||||
|
openstack stack show -c outputs -f json {{ stack_name }} | \
|
||||||
|
jq --raw-output '.outputs[] | select( .output_key == "server_public_ip") | .output_value'
|
||||||
|
register: instance_ip
|
||||||
|
|
||||||
|
- name: Wait up to one minute for the instance to be reachable
|
||||||
|
wait_for:
|
||||||
|
host: "{{ instance_ip.stdout }}"
|
||||||
|
port: 22
|
||||||
|
timeout: 60
|
||||||
|
|
||||||
|
- name: Clean the created stack
|
||||||
|
shell: |
|
||||||
|
source {{ working_dir }}/overcloudrc
|
||||||
|
openstack stack delete \
|
||||||
|
--yes \
|
||||||
|
--wait \
|
||||||
|
{{ stack_name }}
|
||||||
|
|
||||||
|
- name: Clean image in Glance
|
||||||
|
shell: |
|
||||||
|
source {{ working_dir }}/overcloudrc
|
||||||
|
openstack image delete validate_ha_image
|
||||||
@@ -1,34 +1,4 @@
|
|||||||
---
|
---
|
||||||
- name: Creating the environment file on undercloud
|
|
||||||
template:
|
|
||||||
src: "{{ environment_file }}"
|
|
||||||
dest: "{{ working_dir }}/environment"
|
|
||||||
mode: 0600
|
|
||||||
|
|
||||||
- name: Load the workarounds script on the undercloud
|
|
||||||
template:
|
|
||||||
src: "{{ workarounds_script }}"
|
|
||||||
dest: "{{ working_dir }}/workarounds.sh"
|
|
||||||
mode: 0755
|
|
||||||
when: apply_workarounds
|
|
||||||
|
|
||||||
- name: Execute workarounds script on the undercloud
|
|
||||||
shell: >
|
|
||||||
"{{ working_dir }}/workarounds.sh"
|
|
||||||
when: apply_workarounds
|
|
||||||
|
|
||||||
- name: Copy ha-test-suite on undercloud
|
|
||||||
synchronize:
|
|
||||||
src: "{{ local_working_dir }}/tripleo-quickstart-utils/tools/ha-test-suite/"
|
|
||||||
dest: "{{ working_dir }}/ha-test-suite"
|
|
||||||
use_ssh_args: true
|
|
||||||
|
|
||||||
- name: Copy ha-test-suite on controllers
|
|
||||||
shell: >
|
|
||||||
/usr/bin/rsync --delay-updates -F --compress --archive -e 'ssh -F {{ local_working_dir }}/ssh.config.ansible' {{ local_working_dir }}/tripleo-quickstart-utils/tools/ha-test-suite {{ hostvars[item]['ansible_hostname'] }}:
|
|
||||||
delegate_to: "localhost"
|
|
||||||
with_items:
|
|
||||||
- "{{ groups['controller'] }}"
|
|
||||||
|
|
||||||
- name: Include test sequence depending on release
|
- name: Include test sequence depending on release
|
||||||
include_vars:
|
include_vars:
|
||||||
@@ -38,6 +8,67 @@
|
|||||||
- name: Create directory on the undercloud to store test results
|
- name: Create directory on the undercloud to store test results
|
||||||
file: path={{ validate_ha_logs_dir }} state=directory
|
file: path={{ validate_ha_logs_dir }} state=directory
|
||||||
|
|
||||||
|
- name: Copy ha-test-suite on controllers
|
||||||
|
shell: >
|
||||||
|
/usr/bin/rsync --delay-updates -F --compress --archive -e 'ssh -F {{ local_working_dir }}/ssh.config.ansible' {{ local_working_dir }}/tripleo-quickstart-utils/tools/ha-test-suite {{ hostvars[item]['ansible_hostname'] }}:
|
||||||
|
delegate_to: "localhost"
|
||||||
|
with_items:
|
||||||
|
- "{{ groups['controller'] }}"
|
||||||
|
|
||||||
|
- name: Create the environment template on undercloud
|
||||||
|
template:
|
||||||
|
src: "{{ validate_ha_heat_environment }}"
|
||||||
|
dest: "{{ working_dir }}/validate-ha-heat-environment.yaml"
|
||||||
|
mode: 0600
|
||||||
|
|
||||||
|
- name: Create the test template on undercloud
|
||||||
|
template:
|
||||||
|
src: "{{ validate_ha_heat_template }}"
|
||||||
|
dest: "{{ working_dir }}/validate-ha-heat-template.yaml"
|
||||||
|
mode: 0600
|
||||||
|
|
||||||
|
- name: Download and uncompress (if necessary) image file for Heat
|
||||||
|
shell: |
|
||||||
|
image_url="{{ validate_ha_heat_instance_image_location }}"
|
||||||
|
image_file=$(basename $image_url)
|
||||||
|
|
||||||
|
curl -s -o $image_file $image_url
|
||||||
|
|
||||||
|
case "$image_file" in
|
||||||
|
*.tar)
|
||||||
|
image_name=$(tar xvf $image_file)
|
||||||
|
;;
|
||||||
|
*.tar.gz|*.tgz)
|
||||||
|
image_name=$(tar xzvf $image_file)
|
||||||
|
;;
|
||||||
|
*.tar.bz2|*.tbz2)
|
||||||
|
image_name=$(tar xjvf $image_file)
|
||||||
|
;;
|
||||||
|
*.tar.xz|*.txz)
|
||||||
|
image_name=$(tar xJf $image_file)
|
||||||
|
;;
|
||||||
|
*.bz2)
|
||||||
|
bunzip2 --force --quiet $image_file
|
||||||
|
image_name=${image_file%.*};
|
||||||
|
;;
|
||||||
|
*.gz)
|
||||||
|
gunzip --force --quiet $image_file
|
||||||
|
image_name=${image_file%.*};
|
||||||
|
;;
|
||||||
|
*.xz)
|
||||||
|
xz --force --quiet --decompress $image_file
|
||||||
|
image_name=${image_file%.*};
|
||||||
|
;;
|
||||||
|
*) image_name=$image_file
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo $image_name
|
||||||
|
register: image_name
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
heat_image_name: "{{ image_name.stdout }}"
|
||||||
|
|
||||||
# Test: failed actions
|
# Test: failed actions
|
||||||
- block:
|
- block:
|
||||||
- name: HA test - Failed actions (overcloud)
|
- name: HA test - Failed actions (overcloud)
|
||||||
@@ -45,6 +76,9 @@
|
|||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_check-failed-actions
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_check-failed-actions
|
||||||
register: test_ha_failed_actions_cmd
|
register: test_ha_failed_actions_cmd
|
||||||
|
- include: heat-validation.yml
|
||||||
|
vars:
|
||||||
|
stack_name: "test_ha_failed_actions"
|
||||||
always:
|
always:
|
||||||
- name: copy stdout test result to undercloud and check command
|
- name: copy stdout test result to undercloud and check command
|
||||||
copy: content="{{ test_ha_failed_actions_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_failed_actions_stdout.log"
|
copy: content="{{ test_ha_failed_actions_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_failed_actions_stdout.log"
|
||||||
@@ -61,6 +95,9 @@
|
|||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_master-slave -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_master-slave
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_master-slave -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_master-slave
|
||||||
register: test_ha_master_slave_cmd
|
register: test_ha_master_slave_cmd
|
||||||
|
- include: heat-validation.yml
|
||||||
|
vars:
|
||||||
|
stack_name: "test_ha_master_slave"
|
||||||
always:
|
always:
|
||||||
- name: copy stdout test result to undercloud and check command
|
- name: copy stdout test result to undercloud and check command
|
||||||
copy: content="{{ test_ha_master_slave_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_master_slave_stdout.log"
|
copy: content="{{ test_ha_master_slave_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_master_slave_stdout.log"
|
||||||
@@ -77,6 +114,9 @@
|
|||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal
|
||||||
register: test_ha_keystone_stop_cmd
|
register: test_ha_keystone_stop_cmd
|
||||||
|
- include: heat-validation.yml
|
||||||
|
vars:
|
||||||
|
stack_name: "test_ha_keystone_stop"
|
||||||
always:
|
always:
|
||||||
- name: copy stdout test result to undercloud and check command
|
- name: copy stdout test result to undercloud and check command
|
||||||
copy: content="{{ test_ha_keystone_stop_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_keystone_stop_stdout.log"
|
copy: content="{{ test_ha_keystone_stop_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_keystone_stop_stdout.log"
|
||||||
@@ -93,6 +133,9 @@
|
|||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal
|
||||||
register: test_ha_keystone_constraint_removal_cmd
|
register: test_ha_keystone_constraint_removal_cmd
|
||||||
|
- include: heat-validation.yml
|
||||||
|
vars:
|
||||||
|
stack_name: "test_ha_keystone_constraint_removal"
|
||||||
always:
|
always:
|
||||||
- name: copy stdout test result to undercloud and check command
|
- name: copy stdout test result to undercloud and check command
|
||||||
copy: content="{{ test_ha_keystone_constraint_removal_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_keystone_constraint_removal_stdout.log"
|
copy: content="{{ test_ha_keystone_constraint_removal_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_keystone_constraint_removal_stdout.log"
|
||||||
@@ -109,6 +152,9 @@
|
|||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-a -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-a -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
||||||
register: test_ha_ng_a_cmd
|
register: test_ha_ng_a_cmd
|
||||||
|
- include: heat-validation.yml
|
||||||
|
vars:
|
||||||
|
stack_name: "test_ha_ng_a"
|
||||||
always:
|
always:
|
||||||
- name: copy stdout test result to undercloud and check command
|
- name: copy stdout test result to undercloud and check command
|
||||||
copy: content="{{ test_ha_ng_a_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_a_stdout.log"
|
copy: content="{{ test_ha_ng_a_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_a_stdout.log"
|
||||||
@@ -125,6 +171,9 @@
|
|||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-b -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-b -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
||||||
register: test_ha_ng_b_cmd
|
register: test_ha_ng_b_cmd
|
||||||
|
- include: heat-validation.yml
|
||||||
|
vars:
|
||||||
|
stack_name: "test_ha_ng_b"
|
||||||
always:
|
always:
|
||||||
- name: copy stdout test result to undercloud and check command
|
- name: copy stdout test result to undercloud and check command
|
||||||
copy: content="{{ test_ha_ng_b_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_b_stdout.log"
|
copy: content="{{ test_ha_ng_b_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_b_stdout.log"
|
||||||
@@ -141,6 +190,9 @@
|
|||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-c -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-c -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
||||||
register: test_ha_ng_c_cmd
|
register: test_ha_ng_c_cmd
|
||||||
|
- include: heat-validation.yml
|
||||||
|
vars:
|
||||||
|
stack_name: "test_ha_ng_c"
|
||||||
always:
|
always:
|
||||||
- name: copy stdout test result to undercloud and check command
|
- name: copy stdout test result to undercloud and check command
|
||||||
copy: content="{{ test_ha_ng_c_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_c_stdout.log"
|
copy: content="{{ test_ha_ng_c_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_c_stdout.log"
|
||||||
@@ -150,17 +202,7 @@
|
|||||||
- fail: msg="{{ test_ha_ng_c_cmd.stderr }}"
|
- fail: msg="{{ test_ha_ng_c_cmd.stderr }}"
|
||||||
when: test_ha_ng_c|bool
|
when: test_ha_ng_c|bool
|
||||||
|
|
||||||
# Test: Instance deployment
|
- name: Remove image file
|
||||||
- block:
|
file:
|
||||||
- name: HA Test instance deploy on the overcloud (undercloud)
|
path: "{{ working_dir }}/{{ heat_image_name }}"
|
||||||
shell: >
|
state: absent
|
||||||
{{ working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ working_dir }}/ha-test-suite/test/test_instance-creation -r {{ working_dir }}/ha-test-suite/recovery/recovery_instance-creation -u
|
|
||||||
register: test_ha_instance_cmd
|
|
||||||
always:
|
|
||||||
- name: copy stdout test result to undercloud and check command
|
|
||||||
copy: content="{{ test_ha_instance_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_instance_stdout.log"
|
|
||||||
rescue:
|
|
||||||
- name: copy stderr test result to undercloud and check command
|
|
||||||
copy: content="{{ test_ha_instance_cmd.stderr }}" dest="{{ validate_ha_logs_dir }}/test_ha_instance_stderr.log"
|
|
||||||
- fail: msg="{{ test_ha_instance_cmd.stderr }}"
|
|
||||||
when: test_ha_instance|bool
|
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
# OpenStack version
|
|
||||||
export OPENSTACK_VERSION={{ release }}
|
|
||||||
# SSH related commands
|
|
||||||
export SSH="ssh -q -o StrictHostKeyChecking=no"
|
|
||||||
export SCP="scp -q -o StrictHostKeyChecking=no"
|
|
||||||
# Floating network details
|
|
||||||
export FLOATING_PHYSICAL_NET="{{ public_physical_network }}"
|
|
||||||
export FLOATING_SUBNET="{{ floating_ip_cidr }}"
|
|
||||||
export FLOATING_RANGE_START="{{ public_net_pool_start }}"
|
|
||||||
export FLOATING_RANGE_END="{{ public_net_pool_end }}"
|
|
||||||
export FLOATING_GW="{{ public_net_gateway }}"
|
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
# Heat template parameters
|
||||||
|
parameters:
|
||||||
|
private_net_name: "{{ private_net_name }}"
|
||||||
|
private_subnet_name: "{{ private_subnet_name }}"
|
||||||
|
private_net_cidr: "{{ private_net_cidr }}"
|
||||||
|
public_net_name: "{{ public_net_name }}"
|
||||||
|
public_subnet_name: "{{ public_subnet_name }}"
|
||||||
|
public_physical_network: "{{ public_physical_network }}"
|
||||||
|
public_network_type: "{{ public_network_type }}"
|
||||||
|
public_net_cidr: "{{ floating_ip_cidr }}"
|
||||||
|
public_net_gateway: "{{ public_net_gateway }}"
|
||||||
|
public_net_pool_start: "{{ public_net_pool_start }}"
|
||||||
|
public_net_pool_end: "{{ public_net_pool_end }}"
|
||||||
192
roles/validate-ha/templates/validate-ha-heat-template.yaml.j2
Normal file
192
roles/validate-ha/templates/validate-ha-heat-template.yaml.j2
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
heat_template_version: 2016-10-14
|
||||||
|
description: spawning a server
|
||||||
|
|
||||||
|
parameters:
|
||||||
|
private_net_name:
|
||||||
|
type: string
|
||||||
|
default: "private"
|
||||||
|
description: Name of private network into which servers get deployed
|
||||||
|
private_subnet_name:
|
||||||
|
type: string
|
||||||
|
default: private_subnet
|
||||||
|
description: Name of private subnet into which servers get deployed
|
||||||
|
private_net_cidr:
|
||||||
|
type: string
|
||||||
|
description: Private network address (CIDR notation)
|
||||||
|
public_physical_network:
|
||||||
|
type: string
|
||||||
|
default: "datacentre"
|
||||||
|
description: Physical network name
|
||||||
|
public_network_type:
|
||||||
|
type: string
|
||||||
|
default: "flat"
|
||||||
|
description: Type of the physical network (flat or vlan)
|
||||||
|
constraints:
|
||||||
|
- allowed_values:
|
||||||
|
- vlan
|
||||||
|
- flat
|
||||||
|
public_net_name:
|
||||||
|
type: string
|
||||||
|
default: public
|
||||||
|
description: Name of public network into which servers get deployed
|
||||||
|
public_subnet_name:
|
||||||
|
type: string
|
||||||
|
default: public_subnet
|
||||||
|
description: Name of public subnet into which servers get deployed
|
||||||
|
public_net_cidr:
|
||||||
|
type: string
|
||||||
|
description: Public network address (CIDR notation)
|
||||||
|
public_net_gateway:
|
||||||
|
type: string
|
||||||
|
description: Public network gateway address
|
||||||
|
public_net_pool_start:
|
||||||
|
type: string
|
||||||
|
description: Start of public network IP address allocation pool
|
||||||
|
public_net_pool_end:
|
||||||
|
type: string
|
||||||
|
description: End of public network IP address allocation pool
|
||||||
|
|
||||||
|
resources:
|
||||||
|
|
||||||
|
###########
|
||||||
|
# Network #
|
||||||
|
###########
|
||||||
|
|
||||||
|
private_net:
|
||||||
|
type: OS::Neutron::Net
|
||||||
|
properties:
|
||||||
|
name: { get_param: private_net_name }
|
||||||
|
|
||||||
|
private_subnet:
|
||||||
|
type: OS::Neutron::Subnet
|
||||||
|
properties:
|
||||||
|
name: { get_param: private_subnet_name }
|
||||||
|
network_id: { get_resource: private_net }
|
||||||
|
cidr: { get_param: private_net_cidr }
|
||||||
|
|
||||||
|
public_net:
|
||||||
|
type: OS::Neutron::ProviderNet
|
||||||
|
properties:
|
||||||
|
name: { get_param: public_net_name }
|
||||||
|
router_external: true
|
||||||
|
physical_network: { get_param: public_physical_network }
|
||||||
|
network_type: { get_param: public_network_type }
|
||||||
|
|
||||||
|
public_subnet:
|
||||||
|
type: OS::Neutron::Subnet
|
||||||
|
properties:
|
||||||
|
name: { get_param: public_subnet_name }
|
||||||
|
network_id: { get_resource: public_net }
|
||||||
|
cidr: { get_param: public_net_cidr }
|
||||||
|
gateway_ip: { get_param: public_net_gateway }
|
||||||
|
allocation_pools:
|
||||||
|
- start: { get_param: public_net_pool_start }
|
||||||
|
end: { get_param: public_net_pool_end }
|
||||||
|
|
||||||
|
router:
|
||||||
|
type: OS::Neutron::Router
|
||||||
|
properties:
|
||||||
|
external_gateway_info:
|
||||||
|
network: { get_resource: public_net }
|
||||||
|
|
||||||
|
router_interface:
|
||||||
|
type: OS::Neutron::RouterInterface
|
||||||
|
properties:
|
||||||
|
router_id: { get_resource: router }
|
||||||
|
subnet_id: { get_resource: private_subnet }
|
||||||
|
|
||||||
|
public_net_port:
|
||||||
|
type: OS::Neutron::Port
|
||||||
|
properties:
|
||||||
|
network: { get_resource: private_net }
|
||||||
|
fixed_ips:
|
||||||
|
- subnet: { get_resource: private_subnet }
|
||||||
|
security_groups: [{ get_resource: public_security_group }]
|
||||||
|
|
||||||
|
public_floating_ip:
|
||||||
|
type: OS::Neutron::FloatingIP
|
||||||
|
properties:
|
||||||
|
floating_network: { get_resource: public_net }
|
||||||
|
port_id: { get_resource: public_net_port }
|
||||||
|
|
||||||
|
public_security_group:
|
||||||
|
type: OS::Neutron::SecurityGroup
|
||||||
|
properties:
|
||||||
|
description: Add security group rules for the multi-tier architecture
|
||||||
|
name: pingandssh
|
||||||
|
rules:
|
||||||
|
- remote_ip_prefix: 0.0.0.0/0
|
||||||
|
protocol: tcp
|
||||||
|
port_range_min: 22
|
||||||
|
port_range_max: 22
|
||||||
|
- remote_ip_prefix: 0.0.0.0/0
|
||||||
|
protocol: tcp
|
||||||
|
port_range_min: 80
|
||||||
|
port_range_max: 80
|
||||||
|
- remote_ip_prefix: 0.0.0.0/0
|
||||||
|
protocol: icmp
|
||||||
|
|
||||||
|
###########
|
||||||
|
# Volume #
|
||||||
|
###########
|
||||||
|
|
||||||
|
instance_volume:
|
||||||
|
type: OS::Cinder::Volume
|
||||||
|
properties:
|
||||||
|
name: "instance_volume"
|
||||||
|
size: {{ validate_ha_heat_instance_volume_gb }}
|
||||||
|
image: "validate_ha_image"
|
||||||
|
|
||||||
|
###########
|
||||||
|
# Keypair #
|
||||||
|
###########
|
||||||
|
|
||||||
|
instance_keypair:
|
||||||
|
type: OS::Nova::KeyPair
|
||||||
|
properties:
|
||||||
|
name: "instance_keypair"
|
||||||
|
save_private_key: "true"
|
||||||
|
|
||||||
|
###########
|
||||||
|
# Flavor #
|
||||||
|
###########
|
||||||
|
|
||||||
|
instance_flavor:
|
||||||
|
type: OS::Nova::Flavor
|
||||||
|
properties:
|
||||||
|
name: "instance_flavor"
|
||||||
|
ephemeral: 0
|
||||||
|
ram: 2048
|
||||||
|
disk: 10
|
||||||
|
vcpus: 2
|
||||||
|
|
||||||
|
###########
|
||||||
|
# Server #
|
||||||
|
###########
|
||||||
|
|
||||||
|
instance:
|
||||||
|
type: OS::Nova::Server
|
||||||
|
properties:
|
||||||
|
name: "validate_ha_instance"
|
||||||
|
flavor: { get_resource: instance_flavor }
|
||||||
|
key_name: { get_resource: instance_keypair }
|
||||||
|
networks:
|
||||||
|
- port: { get_resource: public_net_port }
|
||||||
|
block_device_mapping: [{ device_name: "vda", volume_id : { get_resource : instance_volume }, delete_on_termination : "true" }]
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
server_private_ip:
|
||||||
|
description: IP address of first web server in private network
|
||||||
|
value: { get_attr: [ instance, first_address ] }
|
||||||
|
|
||||||
|
server_public_ip:
|
||||||
|
description: Floating IP address of the web server
|
||||||
|
value: { get_attr: [ public_floating_ip, floating_ip_address ] }
|
||||||
|
|
||||||
|
public_key:
|
||||||
|
description: The public key of the keypair.
|
||||||
|
value: { get_attr: [instance_keypair, public_key] }
|
||||||
|
|
||||||
|
private_key:
|
||||||
|
description: The private key of the keypair.
|
||||||
|
value: { get_attr: [instance_keypair, private_key] }
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
source {{ working_dir }}/environment
|
|
||||||
|
|
||||||
source {{ working_dir }}/stackrc
|
|
||||||
CONTROLLERS=$(nova list | grep controller | awk '{print $12}' | cut -f2 -d=)
|
|
||||||
CONTROLLER0=$(nova list | grep controller-0 | awk '{print $12}' | cut -f2 -d=)
|
|
||||||
|
|
||||||
{% if release in [ 'mitaka', 'rhos-9', 'newton', 'rhos-10' ] %}
|
|
||||||
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1348222
|
|
||||||
for CONTROLLER in $CONTROLLERS; do
|
|
||||||
$SSH heat-admin@$CONTROLLER sudo pip install redis;
|
|
||||||
done
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if release in [ 'mitaka', 'rhos-9' ] %}
|
|
||||||
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1357229
|
|
||||||
for CONTROLLER in $CONTROLLERS; do
|
|
||||||
$SSH heat-admin@$CONTROLLER "sudo sed -i -e 's/^After=.*/After=syslog.target network.target/g' /usr/lib/systemd/system/openstack-heat-engine.service";
|
|
||||||
done
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if release in [ 'mitaka', 'rhos-9', 'newton', 'rhos-10' ] %}
|
|
||||||
$SSH heat-admin@$CONTROLLER0 sudo pcs resource cleanup
|
|
||||||
{% endif %}
|
|
||||||
@@ -5,4 +5,3 @@ test_ha_keystone_constraint_removal: false
|
|||||||
test_ha_ng_a: false
|
test_ha_ng_a: false
|
||||||
test_ha_ng_b: false
|
test_ha_ng_b: false
|
||||||
test_ha_ng_c: false
|
test_ha_ng_c: false
|
||||||
test_ha_instance: true
|
|
||||||
|
|||||||
@@ -5,4 +5,3 @@ test_ha_keystone_constraint_removal: true
|
|||||||
test_ha_ng_a: false
|
test_ha_ng_a: false
|
||||||
test_ha_ng_b: false
|
test_ha_ng_b: false
|
||||||
test_ha_ng_c: false
|
test_ha_ng_c: false
|
||||||
test_ha_instance: true
|
|
||||||
|
|||||||
@@ -5,4 +5,3 @@ test_ha_keystone_constraint_removal: false
|
|||||||
test_ha_ng_a: true
|
test_ha_ng_a: true
|
||||||
test_ha_ng_b: true
|
test_ha_ng_b: true
|
||||||
test_ha_ng_c: true
|
test_ha_ng_c: true
|
||||||
test_ha_instance: true
|
|
||||||
|
|||||||
@@ -5,4 +5,3 @@ test_ha_keystone_constraint_removal: false
|
|||||||
test_ha_ng_a: true
|
test_ha_ng_a: true
|
||||||
test_ha_ng_b: true
|
test_ha_ng_b: true
|
||||||
test_ha_ng_c: true
|
test_ha_ng_c: true
|
||||||
test_ha_instance: true
|
|
||||||
|
|||||||
@@ -5,4 +5,3 @@ test_ha_keystone_constraint_removal: false
|
|||||||
test_ha_ng_a: true
|
test_ha_ng_a: true
|
||||||
test_ha_ng_b: true
|
test_ha_ng_b: true
|
||||||
test_ha_ng_c: true
|
test_ha_ng_c: true
|
||||||
test_ha_instance: true
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Raoul Scarazzini (rasca@redhat.com)
|
# Raoul Scarazzini (rasca@redhat.com)
|
||||||
# This script provides a testing suite for TripleO/Director OpenStack HA (so with Pacemaker) environments
|
# This script provides a testing suite for TripleO HA environments
|
||||||
|
|
||||||
# Define main workdir
|
# Define main workdir
|
||||||
WORKDIR=$(dirname $0)
|
WORKDIR=$(dirname $0)
|
||||||
@@ -24,10 +24,6 @@ if [ $# -gt 0 ]
|
|||||||
usage
|
usage
|
||||||
exit
|
exit
|
||||||
;;
|
;;
|
||||||
-u|--undercloud)
|
|
||||||
undercloud=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-t|--test)
|
-t|--test)
|
||||||
test_sequence="$2"
|
test_sequence="$2"
|
||||||
shift
|
shift
|
||||||
@@ -55,15 +51,12 @@ if [ $# -gt 0 ]
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Populating overcloud elements if not on undercloud
|
# Populating overcloud elements
|
||||||
if [ "$undercloud" != true ]
|
|
||||||
then
|
|
||||||
echo -n "$(date) - Populationg overcloud elements..."
|
echo -n "$(date) - Populationg overcloud elements..."
|
||||||
OVERCLOUD_CORE_RESOURCES="galera redis rabbitmq"
|
OVERCLOUD_CORE_RESOURCES="galera redis rabbitmq"
|
||||||
OVERCLOUD_RESOURCES=$(sudo pcs resource show | egrep '^ (C|[a-Z])' | sed 's/.* \[\(.*\)\]/\1/g' | sed 's/ \(.*\)(.*):.*/\1/g' | sort)
|
OVERCLOUD_RESOURCES=$(sudo pcs resource show | egrep '^ (C|[a-Z])' | sed 's/.* \[\(.*\)\]/\1/g' | sed 's/ \(.*\)(.*):.*/\1/g' | sort)
|
||||||
OVERCLOUD_SYSTEMD_RESOURCES=$(sudo pcs config show | egrep "Resource:.*systemd"|grep -v "haproxy"|awk '{print $2}')
|
OVERCLOUD_SYSTEMD_RESOURCES=$(sudo pcs config show | egrep "Resource:.*systemd"|grep -v "haproxy"|awk '{print $2}')
|
||||||
echo "OK"
|
echo "OK"
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f "$test_sequence" ]
|
if [ -f "$test_sequence" ]
|
||||||
then
|
then
|
||||||
|
|||||||
@@ -1,27 +0,0 @@
|
|||||||
# Recovery: Instance creation
|
|
||||||
|
|
||||||
STACKDIR=/home/stack
|
|
||||||
|
|
||||||
INSTANCE_NAME=cirros-1
|
|
||||||
|
|
||||||
source $STACKDIR/overcloudrc
|
|
||||||
|
|
||||||
floatingip=$(nova list | grep $INSTANCE_NAME | awk '{print $13}')
|
|
||||||
floatingip_id=$(neutron floatingip-list | grep $floatingip | awk '{print $2}')
|
|
||||||
port_id=$(neutron port-list | grep $floatingip | awk '{print $2}')
|
|
||||||
|
|
||||||
neutron floatingip-disassociate $floatingip_id $port_id
|
|
||||||
neutron floatingip-delete $floatingip_id
|
|
||||||
nova delete $INSTANCE_NAME
|
|
||||||
projectid=$(openstack project list | awk '/admin/ {print $2}')
|
|
||||||
glance --os-project-id=$projectid image-delete $(glance --os-project-id=$projectid image-list | grep CirrOS | awk '{print $2}')
|
|
||||||
rm /tmp/cirros-0.3.4-x86_64-disk.img
|
|
||||||
nova flavor-delete overcloud-instance-test-small-flavor
|
|
||||||
neutron router-gateway-clear floating-router floating-network
|
|
||||||
neutron router-interface-delete floating-router private-subnet
|
|
||||||
neutron router-delete floating-router
|
|
||||||
neutron security-group-delete pingandssh
|
|
||||||
neutron subnet-delete private-subnet
|
|
||||||
neutron subnet-delete floating-subnet
|
|
||||||
neutron net-delete floating-network
|
|
||||||
neutron net-delete private-network
|
|
||||||
@@ -1,128 +0,0 @@
|
|||||||
# Test: Instance deployment
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
STACKDIR=/home/stack
|
|
||||||
|
|
||||||
source $STACKDIR/overcloudrc
|
|
||||||
|
|
||||||
# Load the environment with all the info for floating network
|
|
||||||
source $STACKDIR/environment
|
|
||||||
|
|
||||||
TIMEOUT=30
|
|
||||||
INSTANCE_NAME=cirros-1
|
|
||||||
PRIVATE_NETWORK=10.1.1.0/24
|
|
||||||
|
|
||||||
# Gateway creation
|
|
||||||
# When the environment is recovering from a previous test it can happen
|
|
||||||
# that neutron is waiting to rejoin its cluster, preventing from creating
|
|
||||||
# new stuff like router. We wait at least 300 seconds before giving up.
|
|
||||||
set +e
|
|
||||||
RUN=1
|
|
||||||
while [ $RUN -lt $TIMEOUT ]
|
|
||||||
do
|
|
||||||
neutron router-create floating-router
|
|
||||||
if [ $? -eq 0 ]
|
|
||||||
then
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "Waiting..."
|
|
||||||
let "RUN++"
|
|
||||||
sleep 10
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
# If timeout was reached then we need to exit with error
|
|
||||||
if [ $RUN -ge $TIMEOUT ]
|
|
||||||
then
|
|
||||||
echo "It was not possible to create the router, giving up."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Network and subnet creation
|
|
||||||
neutron net-create floating-network --router:external=True --provider:physical_network $FLOATING_PHYSICAL_NET --provider:network_type flat
|
|
||||||
neutron subnet-create --name floating-subnet --disable-dhcp --allocation-pool start=$FLOATING_RANGE_START,end=$FLOATING_RANGE_END --gateway $FLOATING_GW floating-network $FLOATING_SUBNET
|
|
||||||
neutron net-create private-network
|
|
||||||
neutron subnet-create private-network $PRIVATE_NETWORK --name private-subnet
|
|
||||||
# Router configuration
|
|
||||||
neutron router-interface-add floating-router private-subnet
|
|
||||||
neutron router-gateway-set floating-router floating-network
|
|
||||||
neutron security-group-create pingandssh
|
|
||||||
securitygroup_id=$(neutron security-group-list | grep pingandssh | head -1 | awk '{print $2}')
|
|
||||||
neutron security-group-rule-create --direction ingress --protocol tcp --port-range-min 22 --port-range-max 22 $securitygroup_id
|
|
||||||
neutron security-group-rule-create --protocol icmp --direction ingress $securitygroup_id
|
|
||||||
floatingip=$(neutron floatingip-create floating-network | grep floating_ip_address | awk '{print $4}')
|
|
||||||
echo floatingip=$floatingip
|
|
||||||
|
|
||||||
#[stack@mrg-06 ~]$ neutron net-list
|
|
||||||
#...
|
|
||||||
#| 6fde7d2a-e2d9-4b0f-a982-b7cbc3244807 | private-network | 31a5ccd5-07bd-4103-a4a3-ab2c6d6148d7 10.1.1.0/24 |
|
|
||||||
#...
|
|
||||||
nova flavor-create --ephemeral 0 --is-public True test.small overcloud-instance-test-small-flavor 2048 20 1
|
|
||||||
private_net_id=$(neutron net-list | grep private-network | awk '{print $2}')
|
|
||||||
wget -O /tmp/cirros-0.3.4-x86_64-disk.img http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
|
|
||||||
projectid=$(openstack project list | awk '/admin/ {print $2}')
|
|
||||||
glance --os-project-id=$projectid image-create --name CirrOS --container-format bare --disk-format raw --file /tmp/cirros-0.3.4-x86_64-disk.img
|
|
||||||
nova boot --image CirrOS --flavor test.small --security-groups pingandssh --nic net-id=$private_net_id $INSTANCE_NAME
|
|
||||||
|
|
||||||
#[stack@mrg-06 ~]$ nova list
|
|
||||||
#...
|
|
||||||
#| eb29c1a1-c30e-4f8f-91ea-cec1fd38c088 | $INSTANCE_NAME | BUILD | spawning | NOSTATE | private-network=10.1.1.5 |
|
|
||||||
#...
|
|
||||||
echo "Waiting for instance $INSTANCE_NAME to come up"
|
|
||||||
COUNTER=1
|
|
||||||
while [ $COUNTER -lt $TIMEOUT ]
|
|
||||||
do
|
|
||||||
instance_status=$(nova list | awk "/$INSTANCE_NAME/ {print \$10}")
|
|
||||||
|
|
||||||
if [ "$instance_status" == "Running" ]
|
|
||||||
then
|
|
||||||
echo "SUCCESS"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo -n "."
|
|
||||||
fi
|
|
||||||
let COUNTER=COUNTER+1
|
|
||||||
done
|
|
||||||
|
|
||||||
[ $COUNTER -ge $TIMEOUT ] && (echo "FAILURE! Instance status: $instance_status"; exit 1)
|
|
||||||
instance_ip=$(nova list | grep $INSTANCE_NAME | awk '{print $12}' | sed "s/private-network=//g")
|
|
||||||
echo instance_ip=$instance_ip
|
|
||||||
|
|
||||||
#[stack@mrg-06 ~]$ neutron port-list
|
|
||||||
#...
|
|
||||||
#| 61ce215d-3dc7-4873-af73-342620cdc3b6 | | fa:16:3e:8d:8b:8d | {"subnet_id": "31a5ccd5-07bd-4103-a4a3-ab2c6d6148d7", "ip_address": "10.1.1.5"} |
|
|
||||||
#...
|
|
||||||
port_id=$(neutron port-list | grep $instance_ip | awk '{print $2}')
|
|
||||||
echo port_id=$port_id
|
|
||||||
|
|
||||||
#[stack@mrg-06 ~]$ neutron floatingip-list
|
|
||||||
#...
|
|
||||||
#| 624f5256-ee89-438f-8335-904017e74a18 | | 10.16.144.77 | |
|
|
||||||
#...
|
|
||||||
floatingip_id=$(neutron floatingip-list | grep $floatingip | awk '{print $2}')
|
|
||||||
echo floatingip_id=$floatingip_id
|
|
||||||
neutron floatingip-associate $floatingip_id $port_id
|
|
||||||
|
|
||||||
echo "------------------------------------------------------------"
|
|
||||||
echo "$(date) Instance will be available at the IP $floatingip"
|
|
||||||
echo "------------------------------------------------------------"
|
|
||||||
|
|
||||||
set +e
|
|
||||||
|
|
||||||
COUNTER=1
|
|
||||||
while [ $COUNTER -lt $TIMEOUT ]
|
|
||||||
do
|
|
||||||
ping -c1 $floatingip 2>&1 > /dev/null
|
|
||||||
|
|
||||||
if [ $? -eq 0 ]
|
|
||||||
then
|
|
||||||
echo "SUCCESS"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo -n "."
|
|
||||||
fi
|
|
||||||
let COUNTER=COUNTER+1
|
|
||||||
done
|
|
||||||
|
|
||||||
[ $COUNTER -ge $TIMEOUT ] && (echo "FAILURE!"; exit 1)
|
|
||||||
Reference in New Issue
Block a user