Remove mon_host msgr v1 setting on upgrade activate

During an upgrade, we specifically set msgr v1 addressing for any new
Nautilus monitors while we still have a mixed Mimic/Nautilus cluster.

After all monitors are running Nautilus, we enable msgr v2 and all
monitors start to use that protocol for communication.

Finally we now clean up the ceph.conf configuration and restart the
monitors during the upgrade activation step. This will align ceph.conf
with what a fresh installation would produce.

Testing performed:
- Upgrade of 2+2+2
- Upgrade of AIO-DX
- Upgrade of AIO-SX
- Fresh install of AIO-SX
- Fresh install of AIO-DX
- Fresh install of 2+2

Depends-on: https://review.opendev.org/c/starlingx/stx-puppet/+/817859
Depends-on: https://review.opendev.org/c/starlingx/config/+/817795
Story: 2009074
Task: 43968

Signed-off-by: Vinicius Lopes da Silva <vinicius.lopesdasilva@windriver.com>
Change-Id: I3b100280f5a92885c567251b9e7d9d0270bf18bc
This commit is contained in:
Vinicius Lopes da Silva 2021-11-13 15:58:31 -03:00 committed by Robert Church
parent 79e7db3dff
commit 4ac0ba60de
2 changed files with 63 additions and 19 deletions

View File

@ -1645,6 +1645,40 @@ class ConductorManager(service.PeriodicService):
"Skipping deleting ceph monitor."
% str(host.hostname))
def _update_upgraded_ceph_monitors(self, context):
"""Run the Ceph monitor update manifests after upgrading
Note: this can be removed in the release after STX6.0
returns True if runtime manifests were applied
"""
if StorageBackendConfig.has_backend(
self.dbapi, constants.CINDER_BACKEND_CEPH):
personalities = [constants.CONTROLLER,
constants.WORKER,
constants.STORAGE]
monitors = self.dbapi.ceph_mon_get_list()
host_uuids = []
for mon in monitors:
host_uuids.append(mon.ihost_uuid)
config_uuid = self._config_update_hosts(context, personalities,
host_uuids)
config_dict = {
"personalities": personalities,
"host_uuids": host_uuids,
"classes": ['platform::ceph::upgrade::runtime'],
puppet_common.REPORT_STATUS_CFG:
puppet_common.REPORT_UPGRADE_ACTIONS
}
self._config_apply_runtime_manifest(context,
config_uuid=config_uuid,
config_dict=config_dict)
return True
return False
def _split_etcd_security_config(self, context):
"""Update the manifests for separating etcd ca
@ -11295,6 +11329,9 @@ class ConductorManager(service.PeriodicService):
# Apply etcd split ca puppet manifest for standby controller.
manifests_applied = self._split_etcd_security_config(context)
# Make sure to remove v1 from address format after upgrade
manifests_applied |= self._update_upgraded_ceph_monitors(context)
manifests_applied = manifests_applied or\
self._apply_kube_apiserver_parameters(context)

View File

@ -259,28 +259,35 @@ class CephPuppet(openstack.OpenstackBasePuppet):
# Use default values from the system config if no upgrade in progress
pass
else:
ceph_mon_ips = StorageBackendConfig.get_ceph_mon_ip_addresses(
self.dbapi)
LOG.info("Formatting addresses to enforce v1 in monitors")
# When in any activating state, we'll update the monitors to
# drop the v1 enforcement (so skip the special formatting below)
if upgrade.state not in [constants.UPGRADE_ACTIVATION_REQUESTED,
constants.UPGRADE_ACTIVATING,
constants.UPGRADE_ACTIVATING_HOSTS,
constants.UPGRADE_ACTIVATION_FAILED,
constants.UPGRADE_ACTIVATION_COMPLETE]:
ceph_mon_ips = StorageBackendConfig.get_ceph_mon_ip_addresses(
self.dbapi)
LOG.info("Formatting addresses to enforce v1 in monitors")
mon_0_ip = ceph_mon_ips[constants.CEPH_MON_0]
mon_1_ip = ceph_mon_ips[constants.CEPH_MON_1]
mon_2_ip = ceph_mon_ips.get(constants.CEPH_MON_2, None)
mon_0_ip = ceph_mon_ips[constants.CEPH_MON_0]
mon_1_ip = ceph_mon_ips[constants.CEPH_MON_1]
mon_2_ip = ceph_mon_ips.get(constants.CEPH_MON_2, None)
mon_0_addr = "[v1:%s:%s]" % (self._format_ceph_mon_address(mon_0_ip),
self.SERVICE_PORT_MON_V1)
mon_1_addr = "[v1:%s:%s]" % (self._format_ceph_mon_address(mon_1_ip),
self.SERVICE_PORT_MON_V1)
if mon_2_ip:
mon_2_addr = "[v1:%s:%s]" % (self._format_ceph_mon_address(mon_2_ip),
mon_0_addr = "[v1:%s:%s]" % (self._format_ceph_mon_address(mon_0_ip),
self.SERVICE_PORT_MON_V1)
else:
mon_2_addr = None
config.update({
'platform::ceph::params::mon_0_addr': mon_0_addr,
'platform::ceph::params::mon_1_addr': mon_1_addr,
'platform::ceph::params::mon_2_addr': mon_2_addr,
})
mon_1_addr = "[v1:%s:%s]" % (self._format_ceph_mon_address(mon_1_ip),
self.SERVICE_PORT_MON_V1)
if mon_2_ip:
mon_2_addr = "[v1:%s:%s]" % (self._format_ceph_mon_address(mon_2_ip),
self.SERVICE_PORT_MON_V1)
else:
mon_2_addr = None
config.update({
'platform::ceph::params::mon_0_addr': mon_0_addr,
'platform::ceph::params::mon_1_addr': mon_1_addr,
'platform::ceph::params::mon_2_addr': mon_2_addr,
})
return config
def _get_ceph_osd_config(self, host):