Add nova placement to placement migration

In order to facilitate the nova placement -> extracted placement
migration, while keeping the services up for as long as possible,
we combine the two groups for the load balancer configuration on
the first pass, and pass the appropriate flag for the openstack
playbook to handle the migration. Once it's all done, we re-run
the haproxy playbook without the flag to ensure that the old nova
placement backends are removed.

We also remove haproxy_nova_placement_whitelist_networks as it is
no longer used anywhere.

Change-Id: I85e9182e7c4fe9477d30dd16b4132c1645205cce
This commit is contained in:
Jesse Pretorius 2019-06-12 13:16:36 +01:00 committed by Jonathan Rosser
parent 93920cfe3f
commit cd32d15cc0
6 changed files with 82 additions and 16 deletions

View File

@ -187,13 +187,15 @@ Upgrade infrastructure
~~~~~~~~~~~~~~~~~~~~~~
We can now go ahead with the upgrade of all the infrastructure components. To
ensure that rabbitmq and mariadb are upgraded, we pass the appropriate flags.
ensure that rabbitmq and mariadb are upgraded, and to handle the transition
from the nova placement service to the extracted placement service, we pass
the appropriate flags.
.. code-block:: console
# openstack-ansible setup-infrastructure.yml -e 'galera_upgrade=true' -e 'rabbitmq_upgrade=true'
# openstack-ansible setup-infrastructure.yml -e 'galera_upgrade=true' -e 'rabbitmq_upgrade=true' -e 'placement_migrate_flag=true'
With this complete, we can no restart the mariadb containers one at a time,
With this complete, we can now restart the mariadb containers one at a time,
ensuring that each is started, responding, and synchronized with the other
nodes in the cluster before moving on to the next steps. This step allows
the LXC container configuration that you applied earlier to take effect,
@ -206,8 +208,21 @@ ensuring that the containers are restarted in a controlled fashion.
Upgrade OpenStack
~~~~~~~~~~~~~~~~~
We can now go ahead with the upgrade of all the OpenStack components.
We can now go ahead with the upgrade of all the OpenStack components, passing
the flag that enabled the transition from the nova placement service to the
extracted placement service.
.. code-block:: console
# openstack-ansible setup-openstack.yml
# openstack-ansible setup-openstack.yml -e 'placement_migrate_flag=true'
Remove legacy nova placement service backends
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Now that the new extracted placement service is operational, we can remove the
legacy implementation from the load balancer.
.. code-block:: console
# openstack-ansible haproxy-install.yml

View File

@ -31,7 +31,6 @@ haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}"
haproxy_nova_placement_whitelist_networks: "{{ haproxy_whitelist_networks }}"
haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}"
haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}"
haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}"

View File

@ -35,6 +35,17 @@
include_tasks: common-tasks/package-cache-proxy.yml
when: install_method == "source"
- name: Stop nova-placement-api services for the upgrade
delegate_to: "{{ item }}"
service:
name: nova-placement-api
state: stopped
enabled: no
when: placement_migrate_flag | default(False)
with_items: "{{ groups['nova_api_placement'] }}"
run_once: true
ignore_errors: true
roles:
- role: "os_placement"
- role: "system_crontab_coordination"

View File

@ -31,10 +31,10 @@ export SCRIPTS_PATH="$(dirname "$(readlink -f "${0}")")"
export MAIN_PATH="$(dirname "${SCRIPTS_PATH}")"
# The expected source series name
export SOURCE_SERIES="rocky"
export SOURCE_SERIES="stein"
# The expected target series name
export TARGET_SERIES="stein"
export TARGET_SERIES="train"
## Functions -----------------------------------------------------------------
@ -168,18 +168,22 @@ function main {
bootstrap_ansible
pushd ${MAIN_PATH}/playbooks
RUN_TASKS+=("${SCRIPTS_PATH}/upgrade-utilities/deploy-config-changes.yml")
RUN_TASKS+=("${SCRIPTS_PATH}/upgrade-utilities/pip-conf-removal.yml")
# we don't want to trigger container restarts for these groups yet
RUN_TASKS+=("setup-hosts.yml --limit '!galera_all:!rabbitmq_all'")
# add new container config to containers but don't restart
RUN_TASKS+=("setup-hosts.yml -e 'lxc_container_allow_restarts=false' --limit 'galera_all:rabbitmq_all'")
RUN_TASKS+=("${SCRIPTS_PATH}/upgrade-utilities/deploy-config-changes.yml -e 'placement_migrate_flag=true'")
# we don't want to trigger container restarts for galera and rabbit
# but as there will be no hosts available for metal deployments,
# as a fallback option we just run setup-hosts.yml without any arguments
RUN_TASKS+=("setup-hosts.yml --limit '!galera_all:!rabbitmq_all' && \
openstack-ansible setup-hosts.yml -e 'lxc_container_allow_restarts=false' --limit 'galera_all:rabbitmq_all' || \
openstack-ansible setup-hosts.yml")
# upgrade infrastructure
RUN_TASKS+=("setup-infrastructure.yml -e 'galera_upgrade=true' -e 'rabbitmq_upgrade=true'")
RUN_TASKS+=("setup-infrastructure.yml -e 'galera_upgrade=true' -e 'rabbitmq_upgrade=true' -e 'placement_migrate_flag=true'")
# explicitly perform controlled galera cluster restart with new lxc config
RUN_TASKS+=("${SCRIPTS_PATH}/upgrade-utilities/galera-cluster-rolling-restart.yml")
# upgrade openstack
RUN_TASKS+=("setup-openstack.yml")
RUN_TASKS+=("setup-openstack.yml -e 'placement_migrate_flag=true'")
# run haproxy setup again without the placement migrate flag to remove the nova placement api backends
RUN_TASKS+=("haproxy-install.yml")
# Run the tasks in order
for item in ${!RUN_TASKS[@]}; do
echo "### NOW RUNNING: ${RUN_TASKS[$item]}"

View File

@ -82,7 +82,7 @@
line: "{{ item }}"
with_items: "{{ new_secrets.stdout_lines }}"
when:
- "user_secrets.stdout.find(item) == -1"
- not (user_secrets.stdout | regex_search('((^|\n)' ~ item ~ ')'))
tags:
- update-secrets
@ -97,3 +97,30 @@
state: absent
tags:
- remove-fact-cache
- name: Define placement service hosts
copy:
dest: /etc/openstack_deploy/conf.d/placement.yml
content: |-
{% set config = {'placement-infra_hosts': {}} %}
{% for host in groups['compute-infra_hosts'] %}
{% set _ = config['placement-infra_hosts'].update(
{
host: {
'ip': hostvars[host]['container_address']
}
}
) %}
{% endfor %}
{{ config | to_yaml }}
when:
- placement_migrate_flag | default(False)
- not ('placement-infra_hosts' in groups and groups['placement-infra_hosts'])
- name: Set placement service is_metal property
copy:
dest: /etc/openstack_deploy/env.d/placement_metal.yml
content: "{{ '---\n' ~ {'container_skel': {'placement_container': {'properties': {'is_metal': true}}}} | to_yaml }}"
when:
- placement_migrate_flag | default(False)
- hostvars[groups['nova_api_placement'][0]]['is_metal'] | default(False)

View File

@ -32,12 +32,22 @@
name: "{{ inventory_hostname }}"
state: "stopped"
delegate_to: "{{ physical_host }}"
when: not hostvars[inventory_hostname]['is_metal']
- name: Start container
lxc_container:
name: "{{ inventory_hostname }}"
state: "started"
delegate_to: "{{ physical_host }}"
when: not hostvars[inventory_hostname]['is_metal']
- name: Start mariadb
service:
name: mysql
state: started
retries: 5
delay: 10
when: hostvars[inventory_hostname]['is_metal']
post_tasks:
- name: Wait for mariadb port 3306 to be available