Enable add ceph-mon on any compute without storage-0 host

When deploying a Standard setup using a mgmt network with range to
match exacly the number of required nodes, it does not accept to
add ceph-mon on compute-0 node because it tries to reserve an IP
address to storage-0 which will never be configured. Nonetheless,
there is no available ip address to reserve for storage-0 and
system ceph-mon-add command fails.

To avoid reserving ip address to a host that will never be installed
now it verifies if it is trying to add ceph-mon to storage or worker
and call the right function to reserve the IP address.

Test Plan:
  PASS: Deploy Standard setup (2+3) with mgmt network netmask 125 on
        IPv6 network and after adding all 5 hosts, add ceph-mon on
        compute-0 and unlock all workers nodes.
  PASS: Deploy Dedicated Storage setup (2+1+2) with mgmt network
        netmask 29 on IPv4 network.

Closes-bug: 1999286

Signed-off-by: Felipe Sanches Zanoni <Felipe.SanchesZanoni@windriver.com>
Change-Id: I1def05f4580131ea6b5a6202461a2fa921d8e5f2
This commit is contained in:
Felipe Sanches Zanoni 2022-12-09 20:46:41 -05:00
parent b5bed7733c
commit ecb1fabed5
3 changed files with 26 additions and 11 deletions

View File

@ -445,6 +445,11 @@ def _create(ceph_mon):
"replication is set to: %s'. Please update replication "
"before configuring a monitor on a worker node." % supported_replication))
# only accept a 3rd ceph monitor if this is storage-0 or any other worker
if chost['personality'] == constants.STORAGE and chost['hostname'] != constants.STORAGE_0_HOSTNAME:
raise wsme.exc.ClientSideError(
_("Ceph monitor can only be added to storage-0 or any worker."))
ceph_mon = _set_defaults(ceph_mon)
# Size of ceph-mon logical volume must be the same for all
@ -458,8 +463,8 @@ def _create(ceph_mon):
controller_fs_utils._check_ceph_mon_growth(ceph_mon['ceph_mon_gib'])
utils.check_all_ceph_mon_growth(ceph_mon['ceph_mon_gib'], chost)
pecan.request.rpcapi.reserve_ip_for_first_storage_node(
pecan.request.context)
pecan.request.rpcapi.reserve_ip_for_third_monitor_node(
pecan.request.context, chost.hostname)
# In case we add the monitor on a worker node, the state
# and task must be set properly.

View File

@ -12445,20 +12445,29 @@ class ConductorManager(service.PeriodicService):
network_type)
return "%s-cinder-%s" % ADDRESS_FORMAT_ARGS
def reserve_ip_for_first_storage_node(self, context):
def reserve_ip_for_third_monitor_node(self, context, hostname):
"""
Reserve ip address for the first storage node for Ceph monitor
when installing Ceph as a second backend
Reserve an IP address for a host that will run the third
Ceph monitor when Ceph is installed as a storage backend.
:param context: request context.
:param hostname: hostname to reserve ip.
"""
chost = self.dbapi.ihost_get_by_hostname(hostname)
# check if hostname is storage-0 or any worker
if (chost['personality'] == constants.STORAGE and hostname != constants.STORAGE_0_HOSTNAME) \
or chost['personality'] == constants.CONTROLLER:
raise exception.SysinvException(_(
"Ceph monitor can only be added to storage-0 or any worker."))
network = self.dbapi.network_get_by_type(constants.NETWORK_TYPE_MGMT)
address_name = cutils.format_address_name(
constants.STORAGE_0_HOSTNAME, constants.NETWORK_TYPE_MGMT)
hostname, constants.NETWORK_TYPE_MGMT)
try:
self.dbapi.address_get_by_name(address_name)
LOG.debug("Addres %s already reserved, continuing." % address_name)
LOG.debug("Address %s already reserved, continuing." % address_name)
except exception.AddressNotFoundByName:
LOG.debug("Reserving address for %s." % address_name)
self._allocate_pool_address(None, network.pool_uuid,

View File

@ -1343,15 +1343,16 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
kube_rootca_update=kube_rootca_update,
alarm_ignore_list=alarm_ignore_list))
def reserve_ip_for_first_storage_node(self, context):
def reserve_ip_for_third_monitor_node(self, context, hostname):
"""
Reserve ip address for the first storage node for Ceph monitor
when installing Ceph as a second backend
Reserve an IP address for a host that will run the third
Ceph monitor when Ceph is installed as a storage backend.
:param context: request context.
:param hostname: hostname to reserve ip.
"""
self.call(context,
self.make_msg('reserve_ip_for_first_storage_node'))
self.make_msg('reserve_ip_for_third_monitor_node', hostname=hostname))
def reserve_ip_for_cinder(self, context):
"""