Remove hiera roles

The hiera roles have been moved into tripleo-ansible and are no longer
needed within this repository. This change removes the roles that have
been moved.

Because this change removes the last of the roles within this repo
the setup.cfg file has been modified to remove the no longer relevant
install path(s), the molecule related config has been pulled out of
the tox.ini file, and the molecule related packages have been purged
from the bindep.txt file.

Story: 2006044
Task: 34726
Story: 2006044
Task: 34730
Depends-On: I7f9e993735a0347aac12f728393639d88c80ff0f
Change-Id: I1bf74a4fb0adcae632c06b38e06a572ebc434201
Signed-off-by: Kevin Carter <kecarter@redhat.com>
This commit is contained in:
Kevin Carter 2019-07-29 16:05:30 -05:00 committed by Kevin Carter (cloudnull)
parent 29b7c8a1ac
commit 920b2124d9
23 changed files with 1 additions and 422 deletions

View File

@ -31,5 +31,3 @@ gzip
# Required to build language docs
gettext
# Required for molecule testing
docker

View File

@ -1,37 +0,0 @@
tripleo-hieradata
=================
An Ansible role to hieradata files.
Role variables
--------------
Required:
* `hieradata_template` -- path to template of hieradata content
* `hieradata_variable_start_string` -- string marking the beginning of a template print statement.
* `hieradata_variable_end_string` -- string marking the end of a template print statement.
* `hieradata_per_host` -- whether or not we configure hieradata per host.
* `hieradata_files` -- List of hieradata files (Order matters for Hierarchy).
Test playbook
-------------
Assuming you have tripleo-inventory.yml generated, you can run the
test playbook like:
ANSIBLE_ROLES_PATH=tripleo-common/roles \
ANSIBLE_HOST_KEY_CHECKING=False \
ansible-playbook \
-i tripleo-inventory.yml \
tripleo-common/roles/tripleo-hieradata/test-playbook.yml
License
-------
Free software: Apache License (2.0)
Author Information
------------------
OpenStack TripleO team

View File

@ -1,10 +0,0 @@
hieradata_template: ""
hieradata_files: []
# jinja2 escape trick for simple {{ and }} strings:
hieradata_variable_start_string: "{{ '{{' }}"
hieradata_variable_end_string: "{{ '}}' }}"
hieradata_per_host: false
hieradata_templates_list:
- bootstrap_node
- all_nodes
- vip_data

View File

@ -1,5 +0,0 @@
- name: Render hieradata from template
template:
src: templates/{{ item }}.j2
dest: "{{ dest_path | default('/etc/puppet/hieradata/' ~ item ~ '.json') }}"
loop: "{{ hieradata_templates_list }}"

View File

@ -1,21 +0,0 @@
- name: Create /etc/puppet/hieradata
file:
path: /etc/puppet/hieradata
state: directory
mode: 0700
- name: Write hiera config
when: (hieradata_files | length) > 0
copy:
mode: 0600
dest: /etc/puppet/hiera.yaml
# TODO(emilien) Switch to Hiera5 format once we stop calling Hiera CLI
# which is deprecated and should be replaced by "puppet lookup" command.
content: |
---
:backends:
- json
:json:
:datadir: /etc/puppet/hieradata
:hierarchy:
{{ hieradata_files | to_nice_yaml if hieradata_files | type_debug == 'list' else hieradata_files }}

View File

@ -1,62 +0,0 @@
{
{% set all_enabled_services = (enabled_services + all_nodes_extra_map_data['enabled_services'] | default([])) | unique %}
"enabled_services": {{ all_enabled_services | to_nice_json }},
{% for service in all_enabled_services %}
{# <service>_enabled: true #}
{{ '"' ~ service ~ '_enabled": true,' }}
{# <service>_node_ips: <list of ips> #}
{{ '"' ~ service ~ '_node_ips": ' ~ ((groups[service] | default ([]) | map('extract', hostvars, service_net_map[service ~ '_network'] | default('ctlplane') ~ '_ip') | list + all_nodes_extra_map_data[service ~ '_node_ips'] | default([])) | to_json) ~ ',' }}
{% if nova_additional_cell %}
{# <service>_cell_node_names: <list of hostnames> #}
{{ '"' ~ service ~ '_cell_node_names": ' ~ (groups[service] | default ([]) | map('extract', hostvars, service_net_map[service ~ '_network'] | default('ctlplane') ~ '_hostname') | list | to_json) ~ ',' }}
{% else %}
{# <service>_node_names: <list of hostnames> #}
{{ '"' ~ service ~ '_node_names": ' ~ ((groups[service] | default ([]) | map('extract', hostvars, service_net_map[service ~ '_network'] | default('ctlplane') ~ '_hostname') | list + all_nodes_extra_map_data[service ~ '_node_names'] | default([])) | to_json) ~ ',' }}
{% endif %}
{# <service>_short_node_names: <list of hostnames> #}
{{ '"' ~ service ~ '_short_node_names": ' ~ ((groups[service] | default ([]) | map('extract', hostvars, 'inventory_hostname') | list + all_nodes_extra_map_data[service ~ '_short_node_names'] | default([])) | to_json) ~ ',' }}
{# <service>_short_bootstrap_node_name: hostname #}
{% set services = (groups[service] | default ([]) | map('extract', hostvars, 'inventory_hostname')) | list + [all_nodes_extra_map_data[service ~ '_short_bootstrap_node_name'] | default('') ] %}
{% if (services | length) > 0 %}
{{ '"' ~ service ~ '_short_bootstrap_node_name": ' ~ (services | first | to_json) ~ ',' }}
{% endif %}
{# <service>_bootstrap_node_ip: ip #}
{% set services = (groups[service] | default ([]) | map('extract', hostvars, service_net_map[service ~ '_network'] | default('ctlplane') ~ '_ip')) | list %}
{% if (services | length) > 0 %}
{{ '"' ~ service ~ '_bootstrap_node_ip": ' ~ ((services + [all_nodes_extra_map_data[service ~ '_bootstrap_node_ip'] | default('')]) | first | to_json) ~ ',' }}
{% endif %}
{% endfor %}
{# <service>_network: <network> #}
{% for service, network in service_net_map.items() %}
{{ '"' ~ service ~ '": "' ~ network ~ '",' }}
{% endfor %}
{% if 'redis' in enabled_services %}
"redis_vip": "{{ net_vip_map.redis }}",
{% endif %}
{{ '"deploy_identifier": "' ~ deploy_identifier ~ '"' }},
{{ '"stack_action": "' ~ stack_action ~ '"' }},
{{ '"stack_update_type": "' ~ stack_update_type ~ '"' }},
{{ '"container_cli": "' ~ container_cli ~ '"' }},
{{ '"controller_node_ips": "' ~ groups[primary_role_name] | default([]) | map('extract', hostvars, 'ctlplane_ip') | list | join(',') ~ '"' }},
{{ '"controller_node_names": "' ~ groups[primary_role_name] | default([]) | map('extract', hostvars, 'inventory_hostname') | list | join(',') ~ '"' }}
}

View File

@ -1,3 +0,0 @@
{
{{ '"boostrap_node_id": "' ~ bootstrap_nodeid ~ '"' }}
}

View File

@ -1,3 +0,0 @@
{
"tripleo::clouddomain": "{{cloud_domain}}"
}

View File

@ -1 +0,0 @@
{{ extraconfig | to_nice_json }}

View File

@ -1,13 +0,0 @@
{
"fqdn_ctlplane": "{{ inventory_hostname ~ '.ctlplane.' ~ cloud_domain }}",
"fqdn_canonical": "{{ inventory_hostname ~ '.' ~ cloud_domain }}"{% if enabled_networks | length > 0 and role_networks and role_networks | length > 0 %},{% endif %}
{% if role_networks %}
{% for network in enabled_networks %}
{% if network in role_networks %}
"fqdn_{{ networks[network]['name_lower'] }}": "{{ inventory_hostname ~ '.' ~ network | lower ~ '.' ~ cloud_domain }}"
{%- endif %}{% if not loop.last and loop.nextitem | default("") in role_networks %},
{% endif %}
{%- endfor %}
{% endif %}
}

View File

@ -1,18 +0,0 @@
{
"ctlplane": "{{ ctlplane_ip }}",
"ctlplane_subnet": "{{ ctlplane_ip ~ '/' ~ ctlplane_subnet_cidr }}",
"ctlplane_uri": "{{ ctlplane_ip | ipwrap }}"{% if enabled_networks | length > 0 %},{% endif %}
{%- for network in enabled_networks %}
{%- if network_cidrs and network ~ '_cidr' in network_cidrs %}
"{{ networks[network]['name'] }}": "{{ hostvars[inventory_hostname][networks[network]['name'] ~ '_ip'] }}",
"{{ networks[network]['name'] }}_subnet": "{{ hostvars[inventory_hostname][networks[network]['name'] ~ '_ip'] ~ '/' ~ network_cidrs[network ~ '_cidr'] }}",
"{{ networks[network]['name'] }}_uri": "{{ hostvars[inventory_hostname][networks[network]['name'] ~ '_ip'] | ipwrap }}"{% if not loop.last %},{% endif %}
{%- else %}
{# just add empty entries for this network if it doesn't apply to this role. matches previous behavior from Heat #}
"{{ networks[network]['name'] }}": "",
"{{ networks[network]['name'] }}_subnet": "",
"{{ networks[network]['name'] }}_uri": ""{% if not loop.last %},{% endif %}
{%- endif %}
{%- endfor %}
}

View File

@ -1 +0,0 @@
{{ role_extraconfig | to_nice_json }}

View File

@ -1 +0,0 @@
{{ service_configs | to_nice_json }}

View File

@ -1,52 +0,0 @@
{
"controller_virtual_ip": "{{ control_virtual_ip }}",
"keystone_admin_api_vip": "{{ net_vip_map[service_net_map['keystone_admin_api_network']] }}",
"keystone_public_api_vip": "{{ net_vip_map[service_net_map['keystone_public_api_network']] }}",
{%- if 'External' in enabled_networks %}
"public_virtual_ip": "{{ net_vip_map[networks['External']['name_lower']] }}",
{%- else %}
"public_virtual_ip": "{{ net_vip_map.ctlplane }}",
{%- endif %}
{%- if 'InternalApi' in enabled_networks %}
{# the internal_api_virtual_ip is needed for contrail only #}
"internal_api_virtual_ip": "{{ net_vip_map[networks['InternalApi']['name_lower']] }}",
{%- endif %}
"network_virtual_ips": {{ network_virtual_ips | to_json }},
{%- for network in enabled_networks if network == 'StorageNFS' %}
"ganesha_vip": "{{ net_vip_map[storagenfs_net_name] }}"
{%- endfor %}
{# public_virtual_ip and controller_virtual_ip are needed in both HAproxy & keepalived. #}
{%- if 'External' in enabled_networks %}
"tripleo::haproxy::public_virtual_ip": "{{ net_vip_map[networks['External']['name_lower']] }}",
"tripleo::keepalived::public_virtual_ip": "{{ net_vip_map[networks['External']['name_lower']] }}",
{%- else %}
"tripleo::haproxy::public_virtual_ip": "{{ net_vip_map.ctlplane }}",
"tripleo::keepalived::public_virtual_ip": "{{ net_vip_map.ctlplane }}",
{%- endif %}
"tripleo::haproxy::controller_virtual_ip": "{{ net_vip_map.ctlplane }}",
"tripleo::keepalived::controller_virtual_ip": "{{ net_vip_map.ctlplane }}",
"tripleo::keepalived::redis_virtual_ip": "{{ net_vip_map.redis }}",
"tripleo::redis_notification::haproxy_monitor_ip": "{{ net_vip_map.ctlplane }}",
{%- for key, value in cloud_names.items() %}
"{{key}}": "{{value}}",
{%- endfor %}
"enable_internal_tls": {{ enable_internal_tls | lower }}
{%- for service in enabled_services %}
{%- if service_net_map.get(service ~ '_network', 'noop') in net_vip_map %}
{# we set explicit vips for these services, no need to calculate them dynamically #}
{%- if service not in ['redis', 'ganesha', 'keystone_admin_api_vip', 'keystone_public_api_vip'] %}
,"{{service}}_vip": "{{ net_vip_map[service_net_map[service ~ '_network']] }}"
{%- endif %}
{%- endif %}
{%- endfor %}
}

View File

@ -1,20 +0,0 @@
- hosts: overcloud
tasks:
- name: test tripleo-hieradata
include_role:
name: tripleo-hieradata
vars:
hieradata_template: hieradata.j2.yaml
# jinja2 escape trick for simple {{ and }} strings:
variable_start_string: "{{ '{{' }}"
variable_end_string: "{{ '}}' }}"
hieradata_files:
- node
- common
- hosts: overcloud
tasks:
- name: Create hieradata from templates
include_role:
name: tripleo-hieradata
tasks_from: hieradata_vars.yaml

View File

@ -1,39 +0,0 @@
tripleo-upgrade-hiera
=====================
An Ansible role to set hiera value during upgrade as json value/key.
Role variables
--------------
Required:
* `tripleo_upgrade_key` -- the hiera key to setup. (optional for remove_all)
* `tripleo_upgrade_value` -- the hiera value to setup. (non-needed for remove and remove_all)
Optional:
* `tripleo_upgrade_hiera_file` -- hiera file to were the variable go.
(defaults to "/etc/puppet/hieradata/upgrade.json")
Test playbook
-------------
Assuming you have tripleo-inventory.yml generated, you can run the
test playbook like:
ANSIBLE_ROLES_PATH=tripleo-common/roles \
ANSIBLE_HOST_KEY_CHECKING=False \
ansible-playbook
-i tripleo-inventory.yml \
tripleo-common/roles/tripleo-upgrade-hiera/test-playbook.yml
License
-------
Free software: Apache License (2.0)
Author Information
------------------
OpenStack TripleO team

View File

@ -1 +0,0 @@
tripleo_upgrade_hiera_file: /etc/puppet/hieradata/upgrade.json

View File

@ -1,35 +0,0 @@
- name: create the directory for hiera file
file:
path: "{{ tripleo_upgrade_hiera_file|dirname }}"
owner: "root"
group: "root"
mode: 0755
state: directory
become: yes
- name: check if the upgrade file exists.
stat:
path: "{{ tripleo_upgrade_hiera_file }}"
register: _tripleo_upgrade_hiera_file
become: yes
- name: check if the file contains valid json
command: "jq . {{ tripleo_upgrade_hiera_file }}"
register: _tripleo_upgrade_hiera_test
become: yes
when: _tripleo_upgrade_hiera_file.stat.exists
- name: create the hiera file when no file or empty file.
copy:
dest: "{{ tripleo_upgrade_hiera_file }}"
owner: "root"
group: "root"
mode: 0644
content: "{}"
become: yes
when: not _tripleo_upgrade_hiera_file.stat.exists or _tripleo_upgrade_hiera_test.stdout == ""
- name: load the json hiera data
command: "jq . {{ tripleo_upgrade_hiera_file }}"
register: tripleo_upgrade_hiera_command
become: yes

View File

@ -1,6 +0,0 @@
---
- name: delete the upgrade hiera file
file:
path: "{{ tripleo_upgrade_hiera_file }}"
state: absent
become: yes

View File

@ -1,22 +0,0 @@
---
- name: ensure tripleo-upgrade hiera file exists
include_tasks: create-tripleo-upgrade-file.yml
- name: reset tripleo_upgrade_hiera_data_del fact
set_fact:
tripleo_upgrade_hiera_data_del: {}
- name: remove a tripleo-upgrade key
set_fact:
tripleo_upgrade_hiera_data_del: "{{ tripleo_upgrade_hiera_data_del|combine({upgrade_hiera_item.key: upgrade_hiera_item.value}) }}"
cacheable: no
when: upgrade_hiera_item.key != tripleo_upgrade_key
loop_control:
loop_var: upgrade_hiera_item
loop: "{{ tripleo_upgrade_hiera_command.stdout | from_json | default({}) | dict2items }}"
- name: write the updated tripleo-upgrade hiera data
copy:
content: "{{ tripleo_upgrade_hiera_data_del | to_nice_json }}"
dest: "{{ tripleo_upgrade_hiera_file }}"
become: yes

View File

@ -1,15 +0,0 @@
---
- name: ensure tripleo-upgrade hiera file exists
include_tasks: create-tripleo-upgrade-file.yml
- name: set/update the tripleo-upgrade key/value
set_fact:
tripleo_upgrade_hiera_data_add: "{{ tripleo_upgrade_hiera_command.stdout | from_json | combine({ tripleo_upgrade_key: tripleo_upgrade_value }) }}"
cacheable: no
- name: write the updated tripleo-upgrade hiera data
copy:
content: "{{ tripleo_upgrade_hiera_data_add | to_nice_json }}"
dest: "{{ tripleo_upgrade_hiera_file }}"
become: yes

View File

@ -1,39 +0,0 @@
- hosts: controller-0
gather_facts: false
become: true
tasks:
- name: test tripleo-upgrade-hiera - add a first value
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: pacemaker_short_node_names_override
tripleo_upgrade_value: [ "controller-0" ]
- name: test tripleo-upgrade-hiera - add another value
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: mysql_short_node_names_override
tripleo_upgrade_value: [ "controller-0", "controller-1" ]
- name: test tripleo-upgrade-hiera - update a value
include_role:
name: tripleo-upgrade-hiera
tasks_from: set.yml
vars:
tripleo_upgrade_key: mysql_short_node_names_override
tripleo_upgrade_value: [ "controller-1" ]
- name: test tripleo-upgrade-hiera - remove a value
include_role:
name: tripleo-upgrade-hiera
tasks_from: remove.yml
vars:
tripleo_upgrade_key: pacemaker_short_node_names_override
- name: test tripleo-upgrade-hiera - remove all values
include_role:
name: tripleo-upgrade-hiera
tasks_from: remove-all.yml

17
tox.ini
View File

@ -1,6 +1,6 @@
[tox]
minversion = 3.8
envlist = linters,py27,py37,molecule
envlist = linters,py27,py37
skipsdist = True
[testenv]
@ -20,7 +20,6 @@ setenv =
passenv =
ANSIBLE_*
DOCKER_*
MOLECULE_*
PYTEST*
SSH_AUTH_SOCK
TERM
@ -75,20 +74,6 @@ skip_install = true
deps =
commands =
tox -e linters -- bashate
[testenv:molecule]
deps =
ansi2html
docker>=3.7
mock
molecule>=2.22rc3
pytest
pytest-cov
pytest-html
pytest-molecule>=1.0rc1
pytest-xdist
selinux
commands =
python -m pytest --color=yes --html={envlogdir}/reports.html --self-contained-html {tty:-s} {posargs:roles}
[testenv:linters]
skip_install = true