diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index 9f93350622..46410abd48 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -1554,3 +1554,4 @@ DEFAULT_DNS_SERVICE_DOMAIN = 'cluster.local' # Ansible bootstrap ANSIBLE_BOOTSTRAP_FLAG = os.path.join(tsc.VOLATILE_PATH, ".ansible_bootstrap") UNLOCK_READY_FLAG = os.path.join(tsc.PLATFORM_CONF_PATH, ".unlock_ready") +INVENTORY_WAIT_TIMEOUT_IN_SECS = 90 diff --git a/sysinv/sysinv/sysinv/sysinv/common/utils.py b/sysinv/sysinv/sysinv/sysinv/common/utils.py index 425fe8454c..c82f582b86 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/common/utils.py @@ -2043,3 +2043,18 @@ def refresh_helm_repo_information(): except subprocess.CalledProcessError: # Just log an error. Don't stop any callers from further execution. LOG.error("Failed to update helm repo data for user wrsroot.") + + +def is_inventory_config_complete(dbapi, forihostid): + """Check if the initial inventory has completed + + Due to lack of host state that signifies the completion of inventory, this + function retrieves the list of persistent volumes from the database. If + the count is not zero; ports, disks and PVs have been inventoried. + """ + + try: + pvs = dbapi.ipv_get_by_ihost(forihostid) + return len(pvs) > 0 + except Exception: + return False diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py index 23e0758986..10e4ea49b0 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py @@ -10846,21 +10846,37 @@ class ConductorManager(service.PeriodicService): if (os.path.isfile(constants.ANSIBLE_BOOTSTRAP_FLAG) and host.hostname == constants.CONTROLLER_0_HOSTNAME): - controller_0_address = self.dbapi.address_get_by_name( - constants.CONTROLLER_0_MGMT) - if controller_0_address.address != host.mgmt_ip: - self.dbapi.ihost_update(host.uuid, - {'mgmt_ip': controller_0_address.address}) + inventory_completed = True - personalities = [constants.CONTROLLER] - config_uuid = self._config_update_hosts(context, personalities) - config_dict = { - "personalities": personalities, - "host_uuids": [host.uuid], - "classes": ['openstack::keystone::endpoint::runtime'] - } - self._config_apply_runtime_manifest( - context, config_uuid, config_dict, force=True) + # This could be called as part of host creation, wait for + # inventory to complete + for i in range(constants.INVENTORY_WAIT_TIMEOUT_IN_SECS): + if cutils.is_inventory_config_complete(self.dbapi, host.uuid): + break + LOG.info('Inventory incomplete, will try again in 1 second.') + greenthread.sleep(1) + else: + inventory_completed = False + + if inventory_completed: + controller_0_address = self.dbapi.address_get_by_name( + constants.CONTROLLER_0_MGMT) + if controller_0_address.address != host.mgmt_ip: + self.dbapi.ihost_update( + host.uuid, {'mgmt_ip': controller_0_address.address}) + + personalities = [constants.CONTROLLER] + config_uuid = self._config_update_hosts(context, personalities) + config_dict = { + "personalities": personalities, + "host_uuids": [host.uuid], + "classes": ['openstack::keystone::endpoint::runtime'] + } + self._config_apply_runtime_manifest( + context, config_uuid, config_dict, force=True) + else: + LOG.error("Unable to reconfigure service endpoints. Timed out " + "waiting for inventory to complete.") else: LOG.error("Received a request to reconfigure service endpoints " "for host %s under the wrong condition." % host.hostname)