Integrate undercloud_user var into oooq-extras

We need to differentiate local_working_dir from working_dir
as well as decouple the stack user from `ansible_user` var.
Both of these are causing issues as we begin to automate
deployments in more environments.

- Cleanup duplicate variables that are consumed via extras-common
  - Note: extras-common depends on the common role in OOOQ
- Cleanup redundant var and superfluous quotes from overcloud-scale
  role
- Cleanup redundant comments in <role>/defaults/main.yml

Closes-bug: 1654574
Change-Id: I9c7a3166ed1fc5042c11e420223134ea912b45c5
This commit is contained in:
Harry Rybacki 2017-01-06 14:07:26 -05:00
parent 19933e5f03
commit ce7982192f
55 changed files with 80 additions and 107 deletions

View File

@ -1,5 +1,4 @@
---
working_dir: /home/stack
step_install_upstream_ipxe: false

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,7 +1,6 @@
---
# defaults file for ansible-role-tripleo-baremetal-prep-virthost
virthost_provisioning_interface: eth1
virthost_provisioning_ip: 192.168.122.1
virthost_provisioning_netmask: 255.255.255.192
virthost_provisioning_hwaddr: 52:54:00:00:76:00
working_dir: /home/stack

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,6 +1,5 @@
working_dir: /home/stack
---
non_root_user: stack
undercloud_key: "{{ local_working_dir }}/id_rsa_undercloud"
baremetal_provisioning_log: "{{ local_working_dir }}/baremetal_provision.log"

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,7 +1,5 @@
---
# defaults file for ansible-role-tripleo-gate
local_working_dir: "{{ lookup('env', 'HOME') }}/.quickstart"
artg_dlrn_repo_url: "https://github.com/openstack-packages/DLRN.git"
artg_rdoinfo_repo_url: "https://github.com/redhat-openstack/rdoinfo"
artg_compressed_gating_repo: "/home/stack/gating_repo.tar.gz"

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,6 +1,5 @@
---
## collection related vars
artcl_collect: true
artcl_collect_list:
- /var/log/

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -6,7 +6,7 @@
- name: Unarchive shell scripts
shell: >
gunzip "{{ artcl_collect_dir }}/undercloud/home/{{ ansible_user }}/{{ item }}.sh.gz";
gunzip "{{ artcl_collect_dir }}/undercloud/home/{{ undercloud_user }}/{{ item }}.sh.gz";
with_items: "{{ artcl_create_docs_payload.included_deployment_scripts }}"
ignore_errors: yes
when: artcl_gzip_only|bool
@ -14,7 +14,7 @@
- name: Generate rST docs from scripts and move to Sphinx src dir
shell: >
awk -f "{{ local_working_dir }}/usr/local/share/ansible/roles/collect-logs/scripts/doc_extrapolation.awk" \
"{{ artcl_collect_dir }}/undercloud/home/{{ ansible_user }}/{{ item }}.sh" > \
"{{ artcl_collect_dir }}/undercloud/home/{{ undercloud_user }}/{{ item }}.sh" > \
"{{ artcl_docs_source_dir }}/{{ item }}.rst"
with_items: "{{ artcl_create_docs_payload.included_deployment_scripts }}"
ignore_errors: yes

View File

@ -1,11 +1,6 @@
---
overcloud_ipv6: false
containerized_overcloud: false
overcloud_templates_path: /usr/share/openstack-tripleo-heat-templates
# TODO(hrybacki): remove after https://review.openstack.org/#/c/418998/ merges
# The overcloud-deploy role had a dependency on the tripleo role (no longer
# exists) which depdended on the common role which /has/ `enable_pacemaker`.
# this workaround can be removed after https://review.openstack.org/#/c/413800/
# is merged creating a dependency chain common<-extras-common<-overcloud-deploy
enable_pacemaker: false

View File

@ -1,3 +1,5 @@
---
gated_projects:
- openstack/tripleo-quickstart
- openstack/tripleo-quickstart-extras

View File

@ -1,4 +1,5 @@
---
# defaults file for modify-image
# These variable do not have a default because there is no sane default. The
# role will fail immediately if either is not specified.
@ -23,4 +24,3 @@ vc_args: ""
# modify_image_vc_cpu:
modify_image_vc_verbose: false
modify_image_vc_trace: false

View File

@ -1,6 +1,4 @@
---
# defaults for all ovb-stack related tasks
local_working_dir: "{{ lookup('env', 'HOME') }}/.quickstart"
release: mitaka
@ -18,7 +16,6 @@ os_tenant_name: admin
os_auth_url: 'http://10.0.1.10:5000/v2.0'
cloud_name: qeos7
ansible_user: stack
stack_name: 'oooq-{{ prefix }}stack'
rc_file: /home/{{ ansible_user }}/overcloudrc
node_name: 'undercloud'

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,3 +1,5 @@
---
# Script and log locations used during the delete process.
delete_script: overcloud_delete.sh.j2
delete_log: "{{ working_dir }}/overcloud_delete.log"

View File

@ -5,7 +5,7 @@
## * Gather informations on the deployment
## ::
source {{ working_dir}}/stackrc
source {{ working_dir }}/stackrc
OVERCLOUD_NAME="overcloud"
OVERCLOUD_ID=$(openstack stack list | grep "$OVERCLOUD_NAME" | awk '{print $2}')
echo $OVERCLOUD_ID > {{ working_dir }}/overcloud_id

View File

@ -1,5 +1,4 @@
# set working dir so that the deployment can run independently
working_dir: /home/stack
---
# Script and log locations used during the deploy process.
deploy_script: overcloud-deploy.sh.j2

View File

@ -1,5 +1,4 @@
---
working_dir: /home/stack
network_isolation: true
@ -36,4 +35,3 @@ overcloud_custom_tht_log: overcloud_custom_tht_script.log
bond_with_vlans_copy_nic_configs_script: bond-with-vlans-copy-nic-configs.sh.j2
bond_with_vlans_nic_configs_log: bond_with_vlans_nic_configs_script_log

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,6 +1,5 @@
---
# defaults file for ansible-role-tripleo-overcloud-prep-containers
working_dir: /home/stack
tripleo_common_dir: /usr/share/openstack-tripleo-common
containerized_overcloud: false
overcloud_prep_containers_script: overcloud-prep-containers.sh.j2

View File

@ -1,2 +1,4 @@
---
overcloud_prep_flavors_script: overcloud-prep-flavors.sh.j2
overcloud_prep_flavors_log: "{{ working_dir }}/overcloud_prep_flavors.log"

View File

@ -1,3 +1,5 @@
---
overcloud_prep_images_script: overcloud-prep-images.sh.j2
overcloud_prep_images_log: "{{ working_dir }}/overcloud_prep_images.log"

View File

@ -1,3 +1,5 @@
---
overcloud_prep_network_script: overcloud-prep-network.sh.j2
overcloud_prep_network_log: "{{ working_dir }}/overcloud_prep_network.log"

View File

@ -19,7 +19,6 @@ A description of the settable variables for this role should go here, including
- artosn_scale_nodes: <true> -- boolean value that will scale nodes if true
- artosn_delete_original_node: <false> -- boolean value that will delete the original node of type that was scaled
- artosn_working_dir: <'/home/stack'> -- working directory for the role. Assumes stackrc file is present at this location
Dependencies

View File

@ -1,6 +1,4 @@
---
# defaults file for ansible-role-tripleo-overcloud-scale-nodes
artosn_scale_nodes: true
artosn_delete_original_node: false
artosn_working_dir: /home/stack

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -3,7 +3,7 @@
# Delete the scaled node
- name: Check the overcloud heat stack-list state
shell: >
source "{{ artosn_working_dir }}"/stackrc;
source {{ working_dir }}/stackrc;
heat stack-list
register: heat_stack_list_result
@ -11,16 +11,16 @@
fail: msg='Overcloud heat stack is not in a complete state'
when: heat_stack_list_result.stdout.find('COMPLETE') == -1
- name: Register uuid of original "{{ node_to_scale}}" node
- name: Register uuid of original {{ node_to_scale }} node
shell: >
source "{{ artosn_working_dir }}"/stackrc;
nova list | grep -m 1 "{{ node_to_scale }}" | sed -e 's/|//g' | awk '{print $1}'
source {{ working_dir }}/stackrc;
nova list | grep -m 1 {{ node_to_scale }} | sed -e 's/|//g' | awk '{print $1}'
register: node_id_to_delete
- name: Register the Name of the original "{{ node_to_scale }}" node
- name: Register the Name of the original {{ node_to_scale }} node
shell: >
source "{{ artosn_working_dir }}"/stackrc;
nova list | grep -m 1 "{{ node_to_scale }}" | sed -e 's/|//g' | awk '{print $2}'
source {{ working_dir }}/stackrc;
nova list | grep -m 1 {{ node_to_scale }} | sed -e 's/|//g' | awk '{print $2}'
register: node_name_to_delete
- name: Display node name to be deleted
@ -29,18 +29,18 @@
- name: Copy delete node script to undercloud
template:
src: delete-node.j2
dest: "{{ artosn_working_dir }}/delete-node.sh"
dest: "{{ working_dir }}/delete-node.sh"
mode: 0755
- name: Delete node by id
shell: >
cat "{{ artosn_working_dir }}"/delete-node.sh;
"{{ artosn_working_dir }}"/delete-node.sh &> delete_node_scale_console.log;
cat {{ working_dir }}/delete-node.sh;
{{ working_dir }}/delete-node.sh &> delete_node_scale_console.log;
# Verify the delete was successful
- name: Poll heat stack-list to determine when node delete is complete
shell: >
source "{{ artosn_working_dir }}"/stackrc;
source {{ working_dir }}/stackrc;
heat stack-list
register: heat_stack_list_result
until: heat_stack_list_result.stdout.find("COMPLETE") != -1
@ -49,8 +49,8 @@
- name: Determine the post scale node count
shell: >
source "{{ artosn_working_dir }}/stackrc";
nova list | grep "{{ node_to_scale }}" | cut -f2- -d':' | wc -l
source {{ working_dir }}/stackrc;
nova list | grep {{ node_to_scale }} | cut -f2- -d':' | wc -l
register: post_scale_node_count
- name: Remove deleted hosts from the host file
@ -59,5 +59,5 @@
delegate_to: localhost
- name: Check that post delete node count is correct
fail: msg="Overcloud nova list does not show expected number of {{ node_to_scale }} services"
when: post_scale_node_count.stdout != "{{ initial_scale_value }}"
fail: msg=Overcloud nova list does not show expected number of {{ node_to_scale }} services
when: post_scale_node_count.stdout != {{ initial_scale_value }}

View File

@ -20,25 +20,25 @@
# Prep for scaling overcloud
- name: Determine initial number of node(s) that will be scaled
shell: >
source "{{ artosn_working_dir }}/stackrc";
nova list | grep "{{ node_to_scale }}" | cut -f2- -d':' | wc -l
source {{ working_dir }}/stackrc;
nova list | grep {{ node_to_scale }} | cut -f2- -d':' | wc -l
register: initial_node_count
- name: Register uuid of original "{{ node_to_scale}}" node
- name: Register uuid of original {{ node_to_scale }} node
shell: >
source "{{ artosn_working_dir }}"/stackrc;
nova list | grep -m 1 "{{ node_to_scale }}" | sed -e 's/|//g' | awk '{print $1}'
source {{ working_dir }}/stackrc;
nova list | grep -m 1 {{ node_to_scale }} | sed -e 's/|//g' | awk '{print $1}'
register: node_id_to_delete
- name: Register the Name of the original "{{ node_to_scale }}" node
- name: Register the Name of the original {{ node_to_scale }} node
shell: >
source "{{ artosn_working_dir }}"/stackrc;
nova list | grep -m 1 "{{ node_to_scale }}" | sed -e 's/|//g' | awk '{print $2}'
source {{ working_dir }}/stackrc;
nova list | grep -m 1 {{ node_to_scale }} | sed -e 's/|//g' | awk '{print $2}'
register: node_name_to_delete
- name: Register pre-scale nova list
shell: >
source "{{ artosn_working_dir }}/stackrc";
source {{ working_dir }}/stackrc;
nova list
register: pre_scale_nova_list
@ -49,12 +49,12 @@
- name: Copy scale deployment template to undercloud
template:
src: scale-deployment.j2
dest: "{{ artosn_working_dir }}/scale-deployment.sh"
dest: "{{ working_dir }}/scale-deployment.sh"
mode: 0755
- name: Copy neutron l3 ha heat template
when: enable_pacemaker|bool and number_of_controllers|int < 3
template:
src: "neutronl3ha.yaml.j2"
dest: "{{ artosn_working_dir }}/neutronl3ha.yaml"
src: neutronl3ha.yaml.j2
dest: "{{ working_dir }}/neutronl3ha.yaml"
mode: 0755

View File

@ -3,12 +3,12 @@
# Do the scale
- name: Call scale deployment script
shell: >
source "{{ artosn_working_dir }}/stackrc";
"{{ artosn_working_dir }}"/scale-deployment.sh &> overcloud_deployment_scale_console.log;
source {{ working_dir }}/stackrc;
{{ working_dir }}/scale-deployment.sh &> overcloud_deployment_scale_console.log;
- name: Poll heat stack-list to determine when node scale is complete
shell: >
source "{{ artosn_working_dir }}"/stackrc;
source {{ working_dir }}/stackrc;
heat stack-list
register: heat_stack_list_result
until: heat_stack_list_result.stdout.find("COMPLETE") != -1
@ -17,7 +17,7 @@
- name: Register post-scale nova list
shell: >
source "{{ artosn_working_dir }}/stackrc";
source {{ working_dir }}/stackrc;
nova list
register: post_scale_nova_list
@ -28,10 +28,10 @@
# Verify the scale
- name: Determine the post scale node count
shell: >
source "{{ artosn_working_dir }}/stackrc";
nova list | grep "{{ node_to_scale }}" | cut -f2- -d':' | wc -l
source {{ working_dir }}/stackrc;
nova list | grep {{ node_to_scale }} | cut -f2- -d':' | wc -l
register: post_scale_node_count
- name: Check that post scale node count is correct
fail: msg="Overcloud nova list does not show expected number of {{ node_to_scale }} services"
when: post_scale_node_count.stdout != "{{ final_scale_value }}"
fail: msg=Overcloud nova list does not show expected number of {{ node_to_scale }} services
when: post_scale_node_count.stdout != {{ final_scale_value }}

View File

@ -10,7 +10,7 @@ set -eux
## * Source in undercloud credentials.
## ::
source {{ artosn_working_dir }}/stackrc
source {{ working_dir }}/stackrc
### --stop_docs

View File

@ -1,6 +1,5 @@
---
# defaults file for ansible-role-tripleo-ssl
working_dir: /home/stack
ssl_overcloud: false
overcloud_public_vip: 10.0.0.5

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,4 +1,5 @@
---
network_isolation: true
# pre upgrade settings:
upgrade_overcloud_dns_server: 8.8.8.8
@ -20,7 +21,7 @@ repos_url:
- http://trunk.rdoproject.org/centos7-{{ target_upgrade_version }}/{{ upgrade_delorean_hash | default('current-passed-ci')}}/delorean.repo
- http://trunk.rdoproject.org/centos7-{{ target_upgrade_version }}/delorean-deps.repo
# upgrade settings:
upgrade_working_dir: /home/stack
upgrade_working_dir: {{ working_dir }}
tht_dir: tripleo-heat-templates
# scripts
undercloud_upgrade_script: upgrade-undercloud.sh.j2

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,4 +1,5 @@
working_dir: /home/stack
---
repo_setup_script: repo_setup.sh.j2
repo_setup_log: "{{ working_dir }}/repo_setup.log"
repo_run_live: true

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,3 +1,5 @@
---
undercloud_config_file: undercloud.conf.j2
undercloud_install_script: undercloud-install.sh.j2
undercloud_post_install_script: undercloud-install-post.sh.j2

View File

@ -27,7 +27,8 @@ Role Variables
--------------
- local_working_dir: <"{{ lookup('env', 'HOME') }}/.quickstart"> -- Directory for quickstart.sh script
- non_root_user: <stack> -- Default user to execute TripleO Quickstart
- non_root_user: <stack> -- The non-root user operating on the virthost
- undercloud_user: <stack> -- The non-root user operating on the undercloud
- undercloud_key: <"{{ local_working_dir }}/id_rsa_undercloud"> -- Key to access the undercloud node/machine
- non_root_user_setup: <true> -- Switch to setup a non-root user
- toci_vxlan_networking: <false> -- Switch to setup the VXLAN networking from devstack-gate

View File

@ -1,6 +1,5 @@
local_working_dir: "{{ lookup('env', 'HOME') }}/.quickstart"
---
non_root_user: stack
undercloud_key: "{{ local_working_dir }}/id_rsa_undercloud"
non_root_user_setup: true
@ -22,4 +21,3 @@ external_interface: eth2
external_interface_ip: 10.0.0.1
external_interface_netmask: 255.255.255.0
custom_nameserver: 8.8.8.8

View File

@ -0,0 +1,2 @@
dependencies:
- extras-common

View File

@ -19,7 +19,7 @@
- name: Configure non-root user authorized_keys on undercloud
authorized_key:
user: "{{ non_root_user }}"
user: "{{ undercloud_user }}"
key: "{{ item }}"
with_file:
- "{{ undercloud_key }}.pub"

View File

@ -10,14 +10,14 @@ set -eux
## * Create non-root user on undercloud
## ::
id -u "{{ non_root_user }}" || \
sudo useradd -s /bin/bash -d /home/"{{ non_root_user }}" "{{ non_root_user }}"
id -u "{{ undercloud_user }}" || \
sudo useradd -s /bin/bash -d /home/"{{ undercloud_user }}" "{{ undercloud_user }}"
## * Grant sudo privileges to non-root user on undercloud
## ::
echo "{{ non_root_user }} ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/{{ non_root_user }}
sudo chown root:root /etc/sudoers.d/{{ non_root_user }}
sudo chmod 440 /etc/sudoers.d/{{ non_root_user }}
echo "{{ undercloud_user }} ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/{{ undercloud_user }}
sudo chown root:root /etc/sudoers.d/{{ undercloud_user }}
sudo chmod 440 /etc/sudoers.d/{{ undercloud_user }}
### --stop_docs

View File

@ -1,6 +1,5 @@
---
# defaults file for ansible-role-tripleo-overcloud-validate-ha
working_dir: /home/stack
overcloud_working_dir: /home/heat-admin
environment_file: environment.j2

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,3 +1,3 @@
---
validate_ipmi_step: true
working_dir: /home/stack

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -1,6 +1,5 @@
---
# defaults file for ansible-role-tripleo-overcloud-validate
working_dir: /home/stack
tenantrc: overcloudrc
validate_script: overcloud-validate.sh.j2
validate_template: tenantvm_floatingip.yaml.j2

View File

@ -1,5 +1,5 @@
---
# defaults file for ansible-role-tripleo-tempest
configure_tempest: true
floating_ip_cidr: "{{ undercloud_network_cidr|default('192.168.24.0/24') }}"
public_net_pool_start: "{{ floating_ip_cidr|nthhost(100) }}"
@ -7,7 +7,6 @@ public_net_pool_end: "{{ floating_ip_cidr|nthhost(120) }}"
public_net_gateway: "{{ floating_ip_cidr|nthhost(1) }}"
tempest_log_file: 'tempest_output.log'
test_regex: '.*smoke'
working_dir: '/home/stack'
public_net_name: public
public_network_type: flat

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common

View File

@ -24,4 +24,4 @@
- name: Copy tempest input file to undercloud
copy:
src: "tempest-undercloud-config.conf"
dest: "/home/stack/{{ tempest_deployer_input_file }}"
dest: "{{ working_dir }}/{{ tempest_deployer_input_file }}"

View File

@ -1,7 +1,7 @@
---
undercloud_sanity_check_script: undercloud-sanity-check.sh.j2
undercloud_sanity_check_log: "{{ working_dir }}/undercloud_sanity_check.log"
undercloud_check_idempotency: false
undercloud_check_sanity: false
undercloud_reinstall_log: "{{ working_dir }}/undercloud_reinstall.log"

View File

@ -1,3 +1 @@
---
# defaults file for ansible-role-tripleo-cleanup-nfo
non_root_user: stack

View File

@ -1,3 +1,2 @@
dependencies:
- extras-common