
As per the RBAC new direction in Zed cycle, we have dropped the system scope from API policies and all the policies are hardcoded to project scoped so that any user accessing APIs using system scope will get 403 error. It is dropped from all the OpenStack services except for the Ironic service which will have system scope and to support ironic only deployment, we are keeping system as well as project scope in Keystone. Complete discussion and direction can be found in the below gerrit change and TC goal direction: - https://review.opendev.org/c/openstack/governance/+/847418 - https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html#the-issues-we-are-facing-with-scope-concept As phase-2 of RBAC goal, services will start enabling the new defaults and project scope by default. For example: Nova did in - https://review.opendev.org/c/openstack/nova/+/866218 Kolla who start accessing the services using system scope token - https://review.opendev.org/c/openstack/kolla-ansible/+/692179 This commit partially revert the above change except keeping system scope usage for Keystone and Ironic. Rest all services are changed to use the project scope token. And enable the scope and new defaults for Nova which was disabled by https://review.opendev.org/c/openstack/kolla-ansible/+/870804 Change-Id: I0adbe0a6c39e11d7c9542569085fc5d580f26c9d
90 lines
4.1 KiB
YAML
90 lines
4.1 KiB
YAML
---
|
|
# We need to wait for all expected compute services to register before running
|
|
# cells v2 host discovery. This includes virtualised compute services and
|
|
# ironic compute services.
|
|
# Work with --limit by including only hosts in ansible_play_batch.
|
|
|
|
- block:
|
|
- name: Waiting for nova-compute services to register themselves
|
|
become: true
|
|
command: >
|
|
{{ kolla_container_engine }} exec kolla_toolbox openstack
|
|
--os-interface {{ openstack_interface }}
|
|
--os-auth-url {{ openstack_auth.auth_url }}
|
|
--os-project-domain-name {{ openstack_auth.domain_name }}
|
|
--os-project-name {{ openstack_auth.project_name }}
|
|
--os-username {{ openstack_auth.username }}
|
|
--os-password {{ openstack_auth.password }}
|
|
--os-identity-api-version 3
|
|
--os-user-domain-name {{ openstack_auth.user_domain_name }}
|
|
--os-region-name {{ openstack_region_name }}
|
|
{% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
|
|
compute service list --format json --column Host --service nova-compute
|
|
register: nova_compute_services
|
|
changed_when: false
|
|
failed_when: false
|
|
retries: 20
|
|
delay: 10
|
|
until:
|
|
- nova_compute_services is success
|
|
# A list containing the 'Host' field of compute services that have
|
|
# registered themselves. Don't exclude compute services that are disabled
|
|
# since these could have been explicitly disabled by the operator. While we
|
|
# could exclude services that are down, the nova-manage cell_v2
|
|
# discover_hosts does not do this so let's not block on it here.
|
|
- (nova_compute_services.stdout |
|
|
from_json |
|
|
map(attribute='Host') |
|
|
list)
|
|
is superset(expected_compute_service_hosts)
|
|
# Execute on one compute per cell, and delegate to a cell conductor.
|
|
when: inventory_hostname == all_computes_in_batch[0]
|
|
delegate_to: "{{ groups[nova_cell_conductor_group][0] }}"
|
|
|
|
# NOTE(mgoddard): Use a separate fail task to ensure we fail only those hosts
|
|
# that failed to register.
|
|
- name: Fail if nova-compute service failed to register
|
|
vars:
|
|
# 'Host' field of all registered compute services.
|
|
nova_compute_service_hosts: >-
|
|
{{ hostvars[all_computes_in_batch[0]].nova_compute_services.stdout |
|
|
from_json |
|
|
map(attribute='Host') |
|
|
list }}
|
|
# 'Host' field of failed compute services.
|
|
failed_compute_service_hosts: >-
|
|
{{ expected_compute_service_hosts | difference(nova_compute_service_hosts) | list }}
|
|
# Whether any compute services failed on this host.
|
|
any_failed_services: >-
|
|
{{ ansible_facts.nodename in failed_compute_service_hosts or
|
|
(ansible_facts.hostname ~ "-ironic") in failed_compute_service_hosts }}
|
|
fail:
|
|
msg: >-
|
|
The Nova compute service failed to register itself on the following
|
|
hosts: {{ failed_compute_service_hosts | join(',') }}
|
|
when: >-
|
|
any_failed_services or
|
|
(nova_compute_registration_fatal | bool and
|
|
failed_compute_service_hosts | length > 0)
|
|
vars:
|
|
# For virt, use ansible_facts.nodename rather than inventory_hostname, since this
|
|
# is similar to what nova uses internally as its default for the
|
|
# [DEFAULT] host config option.
|
|
virt_compute_service_hosts: >-
|
|
{{ virt_computes_in_batch |
|
|
map('extract', hostvars, ['ansible_facts', 'nodename']) |
|
|
list }}
|
|
# For ironic, use {{ansible_facts.hostname}}-ironic since this is what we
|
|
# configure for [DEFAULT] host in nova.conf.
|
|
ironic_compute_service_hosts: >-
|
|
{{ ironic_computes_in_batch |
|
|
map('extract', hostvars, ['ansible_facts', 'hostname']) |
|
|
map('regex_replace', '^(.*)$', '\1-ironic') |
|
|
list }}
|
|
expected_compute_service_hosts: "{{ virt_compute_service_hosts + ironic_compute_service_hosts }}"
|
|
|
|
- name: Include discover_computes.yml
|
|
include_tasks: discover_computes.yml
|
|
# Execute on one compute host per cell.
|
|
when: inventory_hostname == all_computes_in_batch[0]
|