Merge "Remove validate-ha from available roles"

This commit is contained in:
Zuul 2017-11-22 15:45:01 +00:00 committed by Gerrit Code Review
commit 4c97fdb0fb
13 changed files with 74 additions and 553 deletions

View File

@ -1,179 +0,0 @@
---
- name: Baremetal undercloud install
hosts: localhost
roles:
- baremetal-undercloud
tags:
- baremetal-undercloud
- name: Add the undercloud node to the generated inventory
hosts: localhost
gather_facts: yes
roles:
- tripleo-inventory
tags:
- undercloud-inventory
- name: Setup repositories
hosts: undercloud
gather_facts: yes
roles:
- repo-setup
tags:
- undercloud-repo-setup
- name: Install packages
hosts: undercloud
gather_facts: no
roles:
- baremetal-undercloud/packages
tags:
- undercloud-pkgs-install
- name: Deploy the undercloud
hosts: undercloud
gather_facts: no
roles:
- undercloud-deploy
tags:
- undercloud-deploy
- name: Configure tripleo-validations
hosts: undercloud
gather_facts: no
tags:
- tripleo-validations
vars:
run_tripleo_validations_setup: True
roles:
- { role: tripleo-validations,
when: run_tripleo_validations|bool or run_tripleo_validations_negative_tests|bool}
- name: Prepare baremetal for the overcloud deployment
hosts: undercloud
roles:
- baremetal-prep-overcloud
tags:
- baremetal-prep-overcloud
- name: Prepare configuration files for the overcloud deployment
hosts: undercloud
gather_facts: no
roles:
- overcloud-prep-config
tags:
- overcloud-prep-config
- name: Prepare overcloud containers
hosts: undercloud
gather_facts: no
roles:
- overcloud-prep-containers
tags:
- overcloud-prep-containers
- name: Fetch the overcloud images
hosts: undercloud
gather_facts: no
become: true
roles:
- fetch-images
tags:
- overcloud-fetch-images
- name: Prepare the overcloud images for deployment
hosts: undercloud
gather_facts: no
roles:
- overcloud-prep-images
tags:
- overcloud-prep-images
- name: Run tripleo-validations pre-introspection tests
hosts: undercloud
gather_facts: no
tags:
- tripleo-validations
vars:
validations_group: ['pre-introspection']
roles:
- { role: tripleo-validations,
when: run_tripleo_validations|bool or run_tripleo_validations_negative_tests|bool}
- name: Prepare overcloud flavors
hosts: undercloud
gather_facts: no
roles:
- overcloud-prep-flavors
tags:
- overcloud-prep-flavors
- name: Prepare the undercloud networks for the overcloud deployment
hosts: undercloud
gather_facts: no
roles:
- overcloud-prep-network
tags:
- overcloud-prep-network
- name: Run tripleo-validations pre-deployment tests
hosts: undercloud
gather_facts: no
tags:
- tripleo-validations
vars:
validations_group: ['pre-deployment']
roles:
- { role: tripleo-validations,
when: run_tripleo_validations|bool or run_tripleo_validations_negative_tests|bool}
- name: Deploy the overcloud
hosts: undercloud
gather_facts: yes
roles:
- overcloud-deploy
tags:
- overcloud-deploy
- name: Run tripleo-validations post-deployment tests
hosts: undercloud
gather_facts: no
tags:
- tripleo-validations
vars:
validations_group: ['post-deployment']
roles:
- { role: tripleo-validations,
when: run_tripleo_validations|bool or run_tripleo_validations_negative_tests|bool}
- name: Add the overcloud nodes to the generated inventory
hosts: undercloud
gather_facts: yes
vars:
inventory: all
roles:
- tripleo-inventory
tags:
- overcloud-inventory
- name: Check the result of the deployment
hosts: localhost
tasks:
- name: ensure the deployment result has been read into memory
include_vars: "{{ local_working_dir }}/overcloud_deployment_result.json"
# overcloud_deploy_result = ["failed", "passed"]
- name: did the deployment pass or fail?
debug: var=overcloud_deploy_result
failed_when: overcloud_deploy_result == "failed"
tags:
- overcloud-deploy-check
- name: Validate the overcloud using HA tests
hosts: undercloud
gather_facts: no
roles:
- validate-ha
tags:
- overcloud-validate-ha

View File

@ -1,7 +0,0 @@
---
- name: Validate overcloud HA status
hosts: undercloud
gather_facts: no
roles:
- validate-ha

View File

@ -1,30 +1,42 @@
ansible-role-tripleo-baremetal-undercloud
=========================================
This role aims to build a baremetal undercloud machine from scratch. Using tripleo-quickstart, this means that you will be able to provide, prepare and install the undercloud on a physical machine.
This role aims to build a baremetal undercloud machine from scratch. Using
tripleo-quickstart, this means that you will be able to provide, prepare and
install the undercloud on a physical machine.
From the tripleo-quickstart perspective virthost and undercloud will be the same host.
From the tripleo-quickstart perspective virthost and undercloud will be the
same host.
Requirements
------------
For make all the things working you need to have an environment with all the things in place:
To make all the things working you need to have an environment with all the
things in place:
Hardware requirements
**Hardware requirements**
* A physical machine for the undercloud that can be accessed as root from the jump host
* At least two other physical machines that will become controller and compute, for HA three controllers and one compute are needed
* A working network link between overcloud and undercloud, typically the second net device of the undercloud will talk to the first net device of all the overcloud machines
* A physical machine for the undercloud that can be accessed as root from the
jump host
* At least two other physical machines that will become controller and compute,
for HA three controllers and one compute are needed
* A working network link between overcloud and undercloud, typically the second
net device of the undercloud will talk to the first net device of all the
overcloud machines
Software requirements
**Software requirements**
* The tripleo-quickstart quickstart.sh script:
* A config file (i.e. ha.yml) containing all the customizations for the baremetal environment
* A config file (i.e. ha.yml) containing all the customizations for the
baremetal environment
* This set of files, dependent from the hardware:
* File undercloud-provisioning.sh - optional, name is not important
* File network-environment.yaml - mandatory
* Directory nic-configs - mandatory if declared inside the resource_registry section in network-environment.yaml and must contain all the needed files
* File instackenv.json - mandatory, must contain the ipmi credentials for the nodes
* Directory nic-configs - mandatory if declared inside the
resource_registry section in network-environment.yaml and must contain
all the needed files
* File instackenv.json - mandatory, must contain the ipmi credentials for
the nodes
Quickstart invocation
---------------------
@ -34,7 +46,7 @@ You can invoke *quickstart.sh* like this:
```console
./quickstart.sh \
--clean \
--playbook baremetal-undercloud-validate-ha.yml \
--playbook baremetal-undercloud.yml \
--working-dir /path/to/workdir \
--config /path/to/config.yml \
--release <RELEASE> \
@ -44,8 +56,9 @@ You can invoke *quickstart.sh* like this:
Basically this command:
* Uses the playbook **baremetal-undercloud-validate-ha.yml**
* Uses a custom workdir that is rebuilt from scratch (so if it already exists, it is dropped, see *--clean*)
* Uses the playbook **baremetal-undercloud.yml**
* Uses a custom workdir that is rebuilt from scratch (so if it already exists,
it is dropped, see *--clean*)
* Get all the extra requirements
* Select the config file
* Chooses release (liberty, mitaka, newton, or “master” for ocata)
@ -61,15 +74,17 @@ A typical config file will contain something like this:
# Virthost key for accessing newly provided machine
virthost_key: ~/.ssh/customkey
# Type of undercloud (we're deploying on baremetal otherwise this should be virtual)
# Type of undercloud (we're deploying on baremetal otherwise this should be
# virtual)
undercloud_type: baremetal
# Specify the secondary net interface for overcloud provisioning
undercloud_local_interface: eth1
# Specify the external network for undercloud that will be used to route overcloud traffic
# Specify the external network for undercloud that will be used to route
# overcloud traffic
undercloud_external_network_cidr: 172.20.0.0/24
# Declare the additional interface on undercloud to route overcloud traffic
undercloud_networks:
external:
@ -119,18 +134,32 @@ extra_args: "--ntp-server <NTP SERVER IP> --control-scale 3 --compute-scale 2 --
A brief explanation of the variables:
* The variable **undercloud_type** is checked in some of the dependent roles (see @Dependencies).
* The variable **virthost_key** is optional, if defined it must be a path to a private ssh key file needed to access to virthost. If you access to the virthost with the default ssh key of the user launching quickstart.sh then you don't need to set it.
* The **undercloud_local_interface** needs to be changed accordingly to the baremetal hardware.
* The **undercloud_external_network_cidr** will be the overcloud external network that undercloud will route.
* A specific **flavor_map** (in this case baremetal) needs to be applied to each node kind.
* With **step_provide_undercloud** you can choose if you want to provide the virthost.
* The variable **undercloud_type** is checked in some of the dependent roles
(see @Dependencies).
* The variable **virthost_key** is optional, if defined it must be a path to a
private ssh key file needed to access to virthost. If you access to the
virthost with the default ssh key of the user launching quickstart.sh then
you don't need to set it.
* The **undercloud_local_interface** needs to be changed accordingly to the
baremetal hardware.
* The **undercloud_external_network_cidr** will be the overcloud external
network that undercloud will route.
* A specific **flavor_map** (in this case baremetal) needs to be applied to
each node kind.
* With **step_provide_undercloud** you can choose if you want to provide the
virthost.
* With **step_introspect** you can choose if you want to introspect nodes.
* With **step_install_upstream_ipxe** you can choose if you want to install upstream ipxe (useful with some hardware issues).
* The **libvirt_type** and **libvirt_args** must be set to kvm, since we will work on baremetal with native virtual capabilities.
* **baremetal_provisioning_script** is the script to provide the machine, if **step_provide_undercloud is false** than this can be omitted.
* **baremetal_network_environment**, **baremetal_instackenv** and *optionally* **baremetal_nic_configs** will contain all the environment files.
* If instances needs to be accessible from the outside network then all the parameters (so **floating_ip_cidr** and **public_net_***) of this floating network must be explicited.
* With **step_install_upstream_ipxe** you can choose if you want to install
upstream ipxe (useful with some hardware issues).
* The **libvirt_type** and **libvirt_args** must be set to kvm, since we will
work on baremetal with native virtual capabilities.
* **baremetal_provisioning_script** is the script to provide the machine, if
**step_provide_undercloud is false** than this can be omitted.
* **baremetal_network_environment**, **baremetal_instackenv** and *optionally*
**baremetal_nic_configs** will contain all the environment files.
* If instances needs to be accessible from the outside network then all the
parameters (so **floating_ip_cidr** and **public_net_**) of this floating
network must be explicited.
* **extra_args** will contain all deploy specific (like HA settings)
The main task of the role is this one:
@ -164,36 +193,44 @@ The main task of the role is this one:
This is basically what each specific tasks does:
* **machine-provisioning.yml** provides the machine and make it become both virthost/undercloud
* **machine-setup.yml** prepares the undercloud with ssh connections, users, sudoers and inventory addition
* **machine-provisioning.yml** provides the machine and make it become both
virthost/undercloud
* **machine-setup.yml** prepares the undercloud with ssh connections, users,
sudoers and inventory addition
* **undercloud-repos-conf.yml** repositories and packages configurations
* **overcloud-images.yml** overcloud images retrieving
Some notes:
* Even if virthost and undercloud are the same machine, the name “undercloud” will be inventoried in any case
* Even if virthost and undercloud are the same machine, the name “undercloud”
will be inventoried in any case
* Each action is tagged so it is possible to exclude a specific section
* Some variables can be controlled via config settings (look above in @Role usage)
* Some variables can be controlled via config settings (look above in @Role
usage)
Dependencies
------------
If you don't need to change anything in how the environments gets deployed, then all the dependencies should be satisfied by the default **quickstart-extras-requirements.txt** file.
If you don't need to change anything in how the environments gets deployed,
then all the dependencies should be satisfied by the default
**quickstart-extras-requirements.txt** file.
In any case the roles you will need to deploy an entire environment from scratch (see @Example Playbook) are:
In any case the roles you will need to deploy an entire environment from
scratch (see @Example Playbook) are:
* **baremetal-undercloud** (this role)
* **tripleo-inventory** (part of *tripleo-quickstart*)
* **tripleo/undercloud** (part of *tripleo-quickstart*)
* **baremetal-prep-overcloud
* **baremetal-prep-overcloud**
* **overcloud-prep-{config,images,flavors,network}**
* **overcloud-deploy**
* **overcloud-validate** or **overcloud-validate-ha** (if you want to test HA capabilities)
* **overcloud-validate**
Example Playbook
----------------
Here's is an example on host to use this role in combination to all the others coming from various related to tripleo-quickstart:
Here's is an example playbook that uses this role in combination to all the
others coming from various related to tripleo-quickstart:
```yaml
---
@ -299,15 +336,6 @@ Here's is an example on host to use this role in combination to all the others c
failed_when: overcloud_deploy_result == "failed"
tags:
- overcloud-deploy-check
# HA Validation
- name: Validate the overcloud using HA tests
hosts: undercloud
gather_facts: no
roles:
- validate-ha
tags:
- overcloud-validate-ha
```
The steps of the sample playbook are these:

View File

@ -1,106 +0,0 @@
overcloud-validate-ha
=====================
This role acts on an already deployed tripleo environment, testing all HA related functionalities of the installation.
Requirements
------------
This role must be used with a deployed TripleO environment, so you'll need a working directory of tripleo-quickstart with these files:
- **hosts**: which will contain all the hosts used in the deployment;
- **ssh.config.ansible**: which will have all the ssh data to connect to the undercloud and all the overcloud nodes;
- A **config file** with a definition for the floating network (which will be used to test HA instances), like this one:
public_physical_network: "floating"
floating_ip_cidr: "10.0.0.0/24"
public_net_pool_start: "10.0.0.191"
public_net_pool_end: "10.0.0.198"
public_net_gateway: "10.0.0.254"
Quickstart invocation
---------------------
Quickstart can be invoked like this:
./quickstart.sh \
--retain-inventory \
--playbook overcloud-validate-ha.yml \
--working-dir /path/to/workdir \
--config /path/to/config.yml \
--release <RELEASE> \
--tags all \
<HOSTNAME or IP>
Basically this command:
- **Keeps** existing data on the repo (it's the most important one)
- Uses the *overcloud-validate-ha.yml* playbook
- Uses the same custom workdir where quickstart was first deployed
- Select the specific config file (which must contain the floating network data)
- Specifies the release (mitaka, newton, or “master” for ocata)
- Performs all the tasks in the playbook overcloud-validate-ha.yml
**Important note**
If the role is called by itself, so not in the same playbook that already deploys the environment (see [baremetal-undercloud-validate-ha.yml](https://github.com/openstack/tripleo-quickstart-extras/blob/master/playbooks/baremetal-undercloud-validate-ha.yml), you need to export *ANSIBLE_SSH_ARGS* with the path of the *ssh.config.ansible* file, like this:
export ANSIBLE_SSH_ARGS="-F /path/to/quickstart/workdir/ssh.config.ansible"
HA tests
--------
Each test is associated to a global variable that, if true, makes the test happen. Tests are grouped and performed by default depending on the OpenStack release.
This is the list of the supported variables, with test description and name of the release on which test is performed:
- **test_ha_failed_actions**: Look for failed actions (**all**)
- **test_ha_master_slave**: Stop master slave resources (galera and redis), all the resources should come down (**all**)
- **test_ha_keystone_constraint_removal**: Stop keystone resource (by stopping httpd), check no other resource is stopped (**mitaka**)
- **Test: next generation cluster checks (**newton**):
- **test_ha_ng_a**: Stop every systemd resource, stop Galera and Rabbitmq, Start every systemd resource
- **test_ha_ng_b**: Stop Galera and Rabbitmq, stop every systemd resource, Start every systemd resource
- **test_ha_ng_c**: Stop Galera and Rabbitmq, wait 20 minutes to see if something fails
- **test_ha_instance**: Instance deployment (**all**)
It is also possible to omit (or add) tests not made for the specific release, using the above vars, like in this example:
./quickstart.sh \
--retain-inventory \
--ansible-debug \
--no-clone \
--playbook overcloud-validate-ha.yml \
--working-dir /path/to/workdir/ \
--config /path/to/config.yml \
--extra-vars test_ha_failed_actions=false \
--extra-vars test_ha_ng_a=true \
--release mitaka \
--tags all \
<VIRTHOST>
In this case we will not check for failed actions (which is test that otherwise will be done in mitaka) and we will force the execution of the "ng_a" test described earlier, which is originally executed just in newton versions or above.
All tests are performed using an external application named [tripleo-director-ha-test-suite](https://github.com/rscarazz/tripleo-director-ha-test-suite).
Example Playbook
----------------
The main playbook couldn't be simpler:
---
- name: Validate overcloud HA status
hosts: localhost
gather_facts: no
roles:
- tripleo-overcloud-validate-ha
But it could also be used at the end of a deployment, like in this file [baremetal-undercloud-validate-ha.yml](https://github.com/openstack/tripleo-quickstart-extras/blob/master/playbooks/baremetal-undercloud-validate-ha.yml).
License
-------
Apache
Author Information
------------------
Raoul Scarazzini <rasca@redhat.com>

View File

@ -1,14 +0,0 @@
---
overcloud_working_dir: "/home/heat-admin"
validate_ha_logs_dir: "{{ working_dir }}/validate_ha_logs"
environment_file: environment.j2
apply_workarounds: false
workarounds_script: workarounds.sh.j2
floating_ip_cidr: "{{ undercloud_network_cidr }}"
floating_ip_start: "{{ floating_ip_cidr|nthhost(100) }}"
floating_ip_end: "{{ floating_ip_cidr|nthhost(120) }}"
external_network_gateway: "{{ floating_ip_cidr|nthhost(1) }}"

View File

@ -1,2 +0,0 @@
dependencies:
- extras-common

View File

@ -1,139 +0,0 @@
---
- name: Creating the environment file on undercloud
template:
src: "{{ environment_file }}"
dest: "{{ working_dir }}/environment"
mode: 0600
- name: Load the workarounds script on the undercloud
template:
src: "{{ workarounds_script }}"
dest: "{{ working_dir }}/workarounds.sh"
mode: 0755
when: apply_workarounds
- name: Execute workarounds script on the undercloud
shell: >
"{{ working_dir }}/workarounds.sh"
when: apply_workarounds
- name: Get overcloud-ha-test-suite on undercloud and controllers
shell: >
rm -rf tripleo-director-ha-test-suite;
git clone https://github.com/rscarazz/tripleo-director-ha-test-suite/ tripleo-director-ha-test-suite;
delegate_to: "{{ item }}"
with_items:
- "undercloud"
- "{{ groups['controller'] }}"
- name: Include test sequence depending on release
include_vars:
dir: "vars"
files_matching: "test_list_{{ release }}.yml"
- name: Create directory on the undercloud to store test results
file: path={{ validate_ha_logs_dir }} state=directory
# Test: failed actions
- block:
- name: HA test - Failed actions (overcloud)
delegate_to: overcloud-controller-0
shell: >
{{ overcloud_working_dir }}/tripleo-director-ha-test-suite/TD-ha-test-suite.sh -t {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/test/test_check-failed-actions
register: test_ha_failed_actions_cmd
always:
- name: copy stdout test result to undercloud and check command
copy: content="{{ test_ha_failed_actions_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_failed_actions_stdout.log"
rescue:
- name: copy stderr test result to undercloud and check command
copy: content="{{ test_ha_failed_actions_cmd.stderr }}" dest="{{ validate_ha_logs_dir }}/test_ha_failed_actions_stderr.log"
when: test_ha_failed_actions|bool
# Test: Master/Slave
- block:
- name: HA test - Master/Slave core resource stop and start (overcloud)
delegate_to: overcloud-controller-0
shell: >
{{ overcloud_working_dir }}/tripleo-director-ha-test-suite/TD-ha-test-suite.sh -t {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/test/test_master-slave -r {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/recovery/recovery_master-slave
register: test_ha_master_slave_cmd
always:
- name: copy stdout test result to undercloud and check command
copy: content="{{ test_ha_master_slave_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_master_slave_stdout.log"
rescue:
- name: copy stderr test result to undercloud and check command
copy: content="{{ test_ha_master_slave_cmd.stderr }}" dest="{{ validate_ha_logs_dir }}/test_ha_master_slave_stderr.log"
when: test_ha_master_slave|bool
# Test: Keystone removal
- block:
- name: HA test Keystone removal (overcloud)
delegate_to: overcloud-controller-0
shell: >
{{ overcloud_working_dir }}/tripleo-director-ha-test-suite/TD-ha-test-suite.sh -t {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/recovery/recovery_keystone-constraint-removal
register: test_ha_keystone_constraint_removal_cmd
always:
- name: copy stdout test result to undercloud and check command
copy: content="{{ test_ha_keystone_constraint_removal_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_keystone_constraint_removal_stdout.log"
rescue:
- name: copy stderr test result to undercloud and check command
copy: content="{{ test_ha_keystone_constraint_removal_cmd.stderr }}" dest="{{ validate_ha_logs_dir }}/test_ha_keystone_constraint_removal_stderr.log"
when: test_ha_keystone_constraint_removal|bool
# Test: NG A
- block:
- name: HA test NG A (overcloud)
delegate_to: overcloud-controller-0
shell: >
{{ overcloud_working_dir }}/tripleo-director-ha-test-suite/TD-ha-test-suite.sh -t {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/test/test_pacemaker-light-a -r {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/recovery/recovery_pacemaker-light
register: test_ha_ng_a_cmd
always:
- name: copy stdout test result to undercloud and check command
copy: content="{{ test_ha_ng_a_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_a_stdout.log"
rescue:
- name: copy stderr test result to undercloud and check command
copy: content="{{ test_ha_ng_a_cmd.stderr }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_a_stderr.log"
when: test_ha_ng_a|bool
# Test: NG B
- block:
- name: HA test NG B (overcloud)
delegate_to: overcloud-controller-0
shell: >
{{ overcloud_working_dir }}/tripleo-director-ha-test-suite/TD-ha-test-suite.sh -t {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/test/test_pacemaker-light-b -r {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/recovery/recovery_pacemaker-light
register: test_ha_ng_b_cmd
always:
- name: copy stdout test result to undercloud and check command
copy: content="{{ test_ha_ng_b_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_b_stdout.log"
rescue:
- name: copy stderr test result to undercloud and check command
copy: content="{{ test_ha_ng_b_cmd.stderr }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_b_stderr.log"
when: test_ha_ng_b|bool
# Test: NG C
- block:
- name: HA test NG C (overcloud)
delegate_to: overcloud-controller-0
shell: >
{{ overcloud_working_dir }}/tripleo-director-ha-test-suite/TD-ha-test-suite.sh -t {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/test/test_pacemaker-light-c -r {{ overcloud_working_dir }}/tripleo-director-ha-test-suite/recovery/recovery_pacemaker-light
register: test_ha_ng_c_cmd
always:
- name: copy stdout test result to undercloud and check command
copy: content="{{ test_ha_ng_c_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_c_stdout.log"
rescue:
- name: copy stderr test result to undercloud and check command
copy: content="{{ test_ha_ng_c_cmd.stderr }}" dest="{{ validate_ha_logs_dir }}/test_ha_ng_c_stderr.log"
when: test_ha_ng_c|bool
# Test: Instance deployment
- block:
- name: HA Test instance deploy on the overcloud (undercloud)
shell: >
{{ working_dir }}/tripleo-director-ha-test-suite/TD-ha-test-suite.sh -t {{ working_dir }}/tripleo-director-ha-test-suite/test/test_instance-creation -r {{ working_dir }}/tripleo-director-ha-test-suite/recovery/recovery_instance-creation -u
register: test_ha_instance_cmd
always:
- name: copy stdout test result to undercloud and check command
copy: content="{{ test_ha_instance_cmd.stdout }}" dest="{{ validate_ha_logs_dir }}/test_ha_instance_stdout.log"
rescue:
- name: copy stderr test result to undercloud and check command
copy: content="{{ test_ha_instance_cmd.stderr }}" dest="{{ validate_ha_logs_dir }}/test_ha_instance_stderr.log"
when: test_ha_instance|bool

View File

@ -1,11 +0,0 @@
# OpenStack version
export OPENSTACK_VERSION={{ release }}
# SSH related commands
export SSH="ssh -q -o StrictHostKeyChecking=no"
export SCP="scp -q -o StrictHostKeyChecking=no"
# Floating network details
export FLOATING_PHYSICAL_NET="{{ public_physical_network }}"
export FLOATING_SUBNET="{{ floating_ip_cidr }}"
export FLOATING_RANGE_START="{{ public_net_pool_start }}"
export FLOATING_RANGE_END="{{ public_net_pool_end }}"
export FLOATING_GW="{{ public_net_gateway }}"

View File

@ -1,27 +0,0 @@
#!/bin/bash
set -eux
source {{ working_dir }}/environment
source {{ working_dir }}/stackrc
CONTROLLERS=$(nova list | grep controller | awk '{print $12}' | cut -f2 -d=)
CONTROLLER0=$(nova list | grep controller-0 | awk '{print $12}' | cut -f2 -d=)
{% if release == 'newton' or release == 'mitaka' %}
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1348222
for CONTROLLER in $CONTROLLERS; do
$SSH heat-admin@$CONTROLLER sudo pip install redis;
done
{% endif %}
{% if release == 'mitaka' %}
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1357229
for CONTROLLER in $CONTROLLERS; do
$SSH heat-admin@$CONTROLLER "sudo sed -i -e 's/^After=.*/After=syslog.target network.target/g' /usr/lib/systemd/system/openstack-heat-engine.service";
done
{% endif %}
{% if release == 'newton' or release == 'mitaka' %}
$SSH heat-admin@$CONTROLLER0 sudo pcs resource cleanup
{% endif %}

View File

@ -1 +0,0 @@
test_list_ocata.yml

View File

@ -1,7 +0,0 @@
test_ha_failed_actions: true
test_ha_master_slave: true
test_ha_keystone_constraint_removal: true
test_ha_ng_a: false
test_ha_ng_b: false
test_ha_ng_c: false
test_ha_instance: true

View File

@ -1,7 +0,0 @@
test_ha_failed_actions: true
test_ha_master_slave: true
test_ha_keystone_constraint_removal: false
test_ha_ng_a: true
test_ha_ng_b: true
test_ha_ng_c: true
test_ha_instance: true

View File

@ -1,7 +0,0 @@
test_ha_failed_actions: true
test_ha_master_slave: true
test_ha_keystone_constraint_removal: false
test_ha_ng_a: true
test_ha_ng_b: true
test_ha_ng_c: true
test_ha_instance: true