Apply black formatter to dcmanager

This commit applies the Black format to the `dcmanager`
files to ensure that it adheres to the Black code style guidelines.

Test Plan:
PASS: Success in stx-distcloud-tox-black

Story: 2011149
Task: 50444

Change-Id: I4a8af46e24d4b5da2757f0a4e20a50a69523c44a
Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
Hugo Brito 2024-06-29 16:35:17 -03:00
parent 2967ee254c
commit 8c27f069dd
33 changed files with 3521 additions and 2597 deletions

View File

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -16,4 +15,4 @@
import pbr.version
__version__ = pbr.version.VersionInfo('distributedcloud').version_string()
__version__ = pbr.version.VersionInfo("distributedcloud").version_string()

View File

@ -23,6 +23,7 @@ import logging as std_logging
import sys
import eventlet
eventlet.monkey_patch(os=False)
# pylint: disable=wrong-import-position
@ -36,11 +37,12 @@ from dcmanager.api import app # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
from dcorch.common import messaging as dcorch_messaging # noqa: E402
# pylint: enable=wrong-import-position
CONF = cfg.CONF
config.register_options()
LOG = logging.getLogger('dcmanager.api')
LOG = logging.getLogger("dcmanager.api")
def main():
@ -56,8 +58,10 @@ def main():
LOG.warning("Wrong worker number, worker = %(workers)s", workers)
workers = 1
LOG.info("Server on http://%(host)s:%(port)s with %(workers)s",
{'host': host, 'port': port, 'workers': workers})
LOG.info(
"Server on http://%(host)s:%(port)s with %(workers)s",
{"host": host, "port": port, "workers": workers},
)
messaging.setup()
dcorch_messaging.setup()
systemd.notify_once()
@ -72,5 +76,5 @@ def main():
app.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -19,6 +19,7 @@ DC Manager Audit Service.
"""
import eventlet
eventlet.monkey_patch()
# pylint: disable=wrong-import-position
@ -29,28 +30,28 @@ from oslo_service import service # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
config.register_keystone_options()
LOG = logging.getLogger('dcmanager.audit')
LOG = logging.getLogger("dcmanager.audit")
CONF = cfg.CONF
def main():
logging.register_options(CONF)
CONF(project='dcmanager', prog='dcmanager-audit')
logging.setup(cfg.CONF, 'dcmanager-audit')
CONF(project="dcmanager", prog="dcmanager-audit")
logging.setup(cfg.CONF, "dcmanager-audit")
logging.set_defaults()
messaging.setup()
from dcmanager.audit import service as audit
srv = audit.DCManagerAuditService()
launcher = service.launch(cfg.CONF,
srv, workers=CONF.audit_workers)
launcher = service.launch(cfg.CONF, srv, workers=CONF.audit_workers)
LOG.info("Starting...")
LOG.debug("Configuration:")
@ -59,5 +60,5 @@ def main():
launcher.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -19,6 +19,7 @@ DC Manager Audit Worker Service.
"""
import eventlet
eventlet.monkey_patch()
# pylint: disable=wrong-import-position
@ -29,28 +30,28 @@ from oslo_service import service # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
config.register_keystone_options()
LOG = logging.getLogger('dcmanager.audit-worker')
LOG = logging.getLogger("dcmanager.audit-worker")
CONF = cfg.CONF
def main():
logging.register_options(CONF)
CONF(project='dcmanager', prog='dcmanager-audit-worker')
logging.setup(cfg.CONF, 'dcmanager-audit-worker')
CONF(project="dcmanager", prog="dcmanager-audit-worker")
logging.setup(cfg.CONF, "dcmanager-audit-worker")
logging.set_defaults()
messaging.setup()
from dcmanager.audit import service as audit
srv = audit.DCManagerAuditWorkerService()
launcher = service.launch(cfg.CONF,
srv, workers=CONF.audit_worker_workers)
launcher = service.launch(cfg.CONF, srv, workers=CONF.audit_worker_workers)
LOG.info("Starting...")
LOG.debug("Configuration:")
@ -59,5 +60,5 @@ def main():
launcher.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -45,32 +45,37 @@ def do_db_sync():
def add_command_parsers(subparsers):
parser = subparsers.add_parser('db_version')
parser = subparsers.add_parser("db_version")
parser.set_defaults(func=do_db_version)
parser = subparsers.add_parser('db_sync')
parser = subparsers.add_parser("db_sync")
parser.set_defaults(func=do_db_sync)
parser.add_argument('version', nargs='?')
parser.add_argument('current_version', nargs='?')
parser.add_argument("version", nargs="?")
parser.add_argument("current_version", nargs="?")
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Show available commands.',
handler=add_command_parsers)
command_opt = cfg.SubCommandOpt(
"command",
title="Commands",
help="Show available commands.",
handler=add_command_parsers,
)
def main():
logging.register_options(CONF)
logging.setup(CONF, 'dcmanager-manage')
logging.setup(CONF, "dcmanager-manage")
CONF.register_cli_opt(command_opt)
try:
default_config_files = cfg.find_config_files('dcmanager',
'dcmanager-engine')
CONF(sys.argv[1:], project='dcmanager', prog='dcmanager-manage',
version=version.version_info.version_string(),
default_config_files=default_config_files)
default_config_files = cfg.find_config_files("dcmanager", "dcmanager-engine")
CONF(
sys.argv[1:],
project="dcmanager",
prog="dcmanager-manage",
version=version.version_info.version_string(),
default_config_files=default_config_files,
)
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
@ -80,5 +85,5 @@ def main():
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -33,28 +33,27 @@ from dcmanager.common import config # noqa: E402
from dcmanager.common import consts # noqa: E402
from dcmanager.common import messaging # noqa: E402
from dcorch.common import messaging as dcorch_messaging # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
config.register_keystone_options()
LOG = logging.getLogger('dcmanager.engine')
LOG = logging.getLogger("dcmanager.engine")
def main():
logging.register_options(cfg.CONF)
cfg.CONF(project='dcmanager', prog='dcmanager-engine')
logging.setup(cfg.CONF, 'dcmanager-engine')
cfg.CONF(project="dcmanager", prog="dcmanager-engine")
logging.setup(cfg.CONF, "dcmanager-engine")
logging.set_defaults()
messaging.setup()
dcorch_messaging.setup()
from dcmanager.manager import service as manager
srv = manager.DCManagerService(cfg.CONF.host,
consts.TOPIC_DC_MANAGER)
launcher = service.launch(cfg.CONF,
srv, workers=cfg.CONF.workers)
srv = manager.DCManagerService(cfg.CONF.host, consts.TOPIC_DC_MANAGER)
launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.workers)
LOG.info("Starting...")
LOG.debug("Configuration:")
@ -65,5 +64,5 @@ def main():
launcher.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -19,6 +19,7 @@ DC Manager Orchestrator Service.
"""
import eventlet
eventlet.monkey_patch()
# pylint: disable=wrong-import-position
@ -29,10 +30,11 @@ from oslo_service import service # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
# pylint: enable=wrong-import-position
CONF = cfg.CONF
LOG = logging.getLogger('dcmanager.orchestrator')
LOG = logging.getLogger("dcmanager.orchestrator")
def main():
@ -40,16 +42,15 @@ def main():
config.register_options()
config.register_keystone_options()
logging.register_options(CONF)
CONF(project='dcmanager', prog='dcmanager-orchestrator')
logging.setup(CONF, 'dcmanager-orchestrator')
CONF(project="dcmanager", prog="dcmanager-orchestrator")
logging.setup(CONF, "dcmanager-orchestrator")
logging.set_defaults()
messaging.setup()
from dcmanager.orchestrator import service as orchestrator
srv = orchestrator.DCManagerOrchestratorService()
launcher = service.launch(CONF,
srv, workers=cfg.CONF.orch_workers)
launcher = service.launch(CONF, srv, workers=cfg.CONF.orch_workers)
LOG.info("Starting...")
LOG.debug("Configuration:")
@ -58,5 +59,5 @@ def main():
launcher.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -24,6 +24,7 @@ DC Manager State Engine Server.
"""
import eventlet
eventlet.monkey_patch()
# pylint: disable=wrong-import-position
@ -35,18 +36,19 @@ from oslo_service import service # noqa: E402
from dcmanager.common import config # noqa: E402
from dcmanager.common import messaging # noqa: E402
from dcorch.common import messaging as dcorch_messaging # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
config.register_keystone_options()
LOG = logging.getLogger('dcmanager.state')
LOG = logging.getLogger("dcmanager.state")
def main():
logging.register_options(cfg.CONF)
cfg.CONF(project='dcmanager', prog='dcmanager-state')
logging.setup(cfg.CONF, 'dcmanager-state')
cfg.CONF(project="dcmanager", prog="dcmanager-state")
logging.setup(cfg.CONF, "dcmanager-state")
logging.set_defaults()
messaging.setup()
dcorch_messaging.setup()
@ -55,18 +57,21 @@ def main():
# Override values from /etc/dcmanager/dcmanager.conf specific
# to dcmanager-state:
cfg.CONF.set_override('max_pool_size', 10, group='database')
cfg.CONF.set_override('max_overflow', 100, group='database')
cfg.CONF.set_override("max_pool_size", 10, group="database")
cfg.CONF.set_override("max_overflow", 100, group="database")
LOG.info("Starting...")
LOG.debug("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
LOG.info("Launching service, host=%s, state_workers=%s ...",
cfg.CONF.host, cfg.CONF.state_workers)
LOG.info(
"Launching service, host=%s, state_workers=%s ...",
cfg.CONF.host,
cfg.CONF.state_workers,
)
srv = state.DCManagerStateService(cfg.CONF.host)
launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.state_workers)
launcher.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -183,8 +183,7 @@ class PeerGroupAssociationCombinationNotFound(NotFound):
class PeerGroupAssociationTargetNotMatch(NotFound):
message = _(
"Peer Group Association with peer site controller "
"UUID %(uuid)s doesn't match."
"Peer Group Association with peer site controller UUID %(uuid)s doesn't match."
)
@ -237,8 +236,7 @@ class CertificateUploadError(DCManagerException):
class LicenseInstallError(DCManagerException):
message = _(
"Error while installing license on subcloud: "
"%(subcloud_id)s. %(error_message)s"
"Error while installing license on subcloud: %(subcloud_id)s. %(error_message)s"
)

View File

@ -623,10 +623,7 @@ def validate_install_values(payload, ip_version=None, subcloud=None):
# the expected value is less than the default. so throw an error.
pecan.abort(
400,
_(
"persistent_size of %s MB is less than "
"the permitted minimum %s MB "
)
_("persistent_size of %s MB is less than the permitted minimum %s MB")
% (str(persistent_size), consts.DEFAULT_PERSISTENT_SIZE),
)

View File

@ -6,12 +6,10 @@
import threading
from oslo_config import cfg
from oslo_log import log as logging
from fm_api import constants as fm_const
from fm_api import fm_api
from oslo_config import cfg
from oslo_log import log as logging
from dccommon import consts as dccommon_consts
from dcmanager.common import consts
@ -22,7 +20,6 @@ from dcmanager.common import utils
from dcmanager.db import api as db_api
from dcmanager.manager.system_peer_manager import SystemPeerManager
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -31,9 +28,8 @@ class PeerGroupAuditManager(manager.Manager):
"""Manages audit related tasks."""
def __init__(self, subcloud_manager, peer_group_id, *args, **kwargs):
LOG.debug(_('PeerGroupAuditManager initialization...'))
super().__init__(service_name="peer_group_audit_manager",
*args, **kwargs)
LOG.debug(_("PeerGroupAuditManager initialization..."))
super().__init__(service_name="peer_group_audit_manager", *args, **kwargs)
self.context = context.get_admin_context()
self.fm_api = fm_api.FaultAPIs()
self.subcloud_manager = subcloud_manager
@ -42,118 +38,121 @@ class PeerGroupAuditManager(manager.Manager):
self.thread = None
self.thread_lock = threading.Lock()
def _get_subclouds_by_peer_group_from_system_peer(self,
dc_client,
system_peer,
peer_group_name):
def _get_subclouds_by_peer_group_from_system_peer(
self, dc_client, system_peer, peer_group_name
):
try:
subclouds = dc_client.get_subcloud_list_by_peer_group(
peer_group_name)
subclouds = dc_client.get_subcloud_list_by_peer_group(peer_group_name)
return subclouds
except Exception:
LOG.exception(f"Failed to get subclouds of peer group "
f"{peer_group_name} from DC: "
f"{system_peer.peer_name}")
LOG.exception(
f"Failed to get subclouds of peer group {peer_group_name} "
f"from DC: {system_peer.peer_name}"
)
@staticmethod
def _get_association_sync_status_from_peer_site(dc_client,
system_peer,
peer_group_id):
def _get_association_sync_status_from_peer_site(
dc_client, system_peer, peer_group_id
):
try:
# Get peer site system peer
dc_peer_system_peer = dc_client.get_system_peer(
utils.get_local_system().uuid)
utils.get_local_system().uuid
)
association = dc_client.get_peer_group_association_with_peer_id_and_pg_id(
dc_peer_system_peer.get("id"), peer_group_id
)
return association.get("sync-status")
except Exception:
LOG.exception(f"Failed to get subclouds of peer group "
f"{peer_group_id} from DC: {system_peer.peer_name}")
LOG.exception(
f"Failed to get subclouds of peer group {peer_group_id} "
f"from DC: {system_peer.peer_name}"
)
def _update_remote_peer_group_migration_status(self,
system_peer,
peer_group_name,
migration_status):
def _update_remote_peer_group_migration_status(
self, system_peer, peer_group_name, migration_status
):
dc_client = SystemPeerManager.get_peer_dc_client(system_peer)
peer_group_kwargs = {
'migration_status': migration_status
}
dc_client.update_subcloud_peer_group(peer_group_name,
**peer_group_kwargs)
LOG.info(f"Updated Subcloud Peer Group {peer_group_name} on "
f"peer site {system_peer.peer_name}, set migration_status "
f"to: {migration_status}")
peer_group_kwargs = {"migration_status": migration_status}
dc_client.update_subcloud_peer_group(peer_group_name, **peer_group_kwargs)
LOG.info(
f"Updated Subcloud Peer Group {peer_group_name} on peer site "
f"{system_peer.peer_name}, set migration_status to: {migration_status}"
)
def _get_local_subclouds_to_update_and_delete(self,
local_peer_group,
remote_subclouds,
remote_sync_status):
def _get_local_subclouds_to_update_and_delete(
self, local_peer_group, remote_subclouds, remote_sync_status
):
local_subclouds_to_update = list()
local_subclouds_to_delete = list()
any_rehome_failed = False
remote_subclouds_dict = {remote_subcloud.get('region-name'):
remote_subcloud for remote_subcloud
in remote_subclouds}
remote_subclouds_dict = {
remote_subcloud.get("region-name"): remote_subcloud
for remote_subcloud in remote_subclouds
}
local_subclouds = db_api.subcloud_get_for_peer_group(
self.context, local_peer_group.id)
self.context, local_peer_group.id
)
for local_subcloud in local_subclouds:
remote_subcloud = remote_subclouds_dict.get(
local_subcloud.region_name)
remote_subcloud = remote_subclouds_dict.get(local_subcloud.region_name)
if remote_subcloud:
# Check if the remote subcloud meets the conditions for update
# if it is 'managed' and the local subcloud is not
# in 'secondary' status
if (remote_subcloud.get('management-state') ==
dccommon_consts.MANAGEMENT_MANAGED and
not utils.subcloud_is_secondary_state(
local_subcloud.deploy_status)):
MANAGED = dccommon_consts.MANAGEMENT_MANAGED
if remote_subcloud.get(
"management-state"
) == MANAGED and not utils.subcloud_is_secondary_state(
local_subcloud.deploy_status
):
local_subclouds_to_update.append(local_subcloud)
# Sync rehome_data from remote to local subcloud if the remote
# PGA sync_status is out-of-sync once migration completes,
# indicating any bootstrap values/address updates to
# the subcloud on the remote site.
if remote_sync_status == \
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC:
if remote_sync_status == consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC:
self._sync_rehome_data(
local_subcloud.id, remote_subcloud.get('rehome_data'))
elif remote_subcloud.get('deploy-status') in \
(consts.DEPLOY_STATE_REHOME_FAILED,
consts.DEPLOY_STATE_REHOME_PREP_FAILED):
local_subcloud.id, remote_subcloud.get("rehome_data")
)
elif remote_subcloud.get("deploy-status") in (
consts.DEPLOY_STATE_REHOME_FAILED,
consts.DEPLOY_STATE_REHOME_PREP_FAILED,
):
# Set local subcloud to rehome-failed if the remote is
# rehome-failed or rehome-prep-failed, otherwise, the
# deploy_status will remain rehome-pending, which will
# block the correction of the bootstrap values/address.
db_api.subcloud_update(
self.context, local_subcloud.id,
deploy_status=consts.DEPLOY_STATE_REHOME_FAILED)
self.context,
local_subcloud.id,
deploy_status=consts.DEPLOY_STATE_REHOME_FAILED,
)
any_rehome_failed = True
else:
local_subclouds_to_delete.append(local_subcloud)
return local_subclouds_to_update, local_subclouds_to_delete, \
any_rehome_failed
return local_subclouds_to_update, local_subclouds_to_delete, any_rehome_failed
def _set_local_subcloud_to_secondary(self, subcloud):
try:
LOG.info("Set local subcloud %s to secondary" % subcloud.name)
# There will be an exception when unmanage
# a subcloud in 'unamaged' state.
if subcloud.management_state != \
dccommon_consts.MANAGEMENT_UNMANAGED:
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED:
self.subcloud_manager.update_subcloud(
self.context,
subcloud.id,
management_state=dccommon_consts.
MANAGEMENT_UNMANAGED)
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
)
self.subcloud_manager.update_subcloud(
self.context,
subcloud.id,
deploy_status=consts.DEPLOY_STATE_SECONDARY)
self.context, subcloud.id, deploy_status=consts.DEPLOY_STATE_SECONDARY
)
except Exception as e:
LOG.exception(f"Failed to update local non-secondary "
f"and offline subcloud [{subcloud.name}], err: {e}")
LOG.exception(
"Failed to update local non-secondary and offline subcloud "
f"[{subcloud.name}], err: {e}"
)
raise e
def _sync_rehome_data(self, subcloud_id, rehome_data):
@ -164,86 +163,99 @@ class PeerGroupAuditManager(manager.Manager):
LOG.info("Local peer group in migrating state, quit audit")
return
LOG.info("Auditing remote subcloud peer group:[%s] "
"migration_status:[%s] group_priority[%s], "
"local subcloud peer group:[%s] "
"migration_status:[%s] group_priority[%s]" %
(remote_peer_group.get("peer_group_name"),
remote_peer_group.get("migration_status"),
remote_peer_group.get("group_priority"),
local_peer_group.peer_group_name,
local_peer_group.migration_status,
local_peer_group.group_priority))
LOG.info(
"Auditing remote subcloud peer group:[%s] migration_status:[%s] "
"group_priority[%s], local subcloud peer group:[%s] "
"migration_status:[%s] group_priority[%s]"
% (
remote_peer_group.get("peer_group_name"),
remote_peer_group.get("migration_status"),
remote_peer_group.get("group_priority"),
local_peer_group.peer_group_name,
local_peer_group.migration_status,
local_peer_group.group_priority,
)
)
# if remote subcloud peer group's migration_status is 'migrating',
# 'unmanaged' all local subclouds in local peer group and change its
# deploy status to consts.DEPLOY_STATE_REHOME_PENDING to stop cert-mon
# audits.
if remote_peer_group.get("migration_status") == \
consts.PEER_GROUP_MIGRATING:
if remote_peer_group.get("migration_status") == consts.PEER_GROUP_MIGRATING:
# Unmanaged all local subclouds of peer group
LOG.info(f"Unmanaged all local subclouds of peer group "
f"{local_peer_group.peer_group_name} "
f"since remote is in migrating state")
subclouds = db_api.subcloud_get_for_peer_group(self.context,
local_peer_group.id)
LOG.info(
"Unmanaged all local subclouds of peer group "
f"{local_peer_group.peer_group_name} since remote is in migrating state"
)
subclouds = db_api.subcloud_get_for_peer_group(
self.context, local_peer_group.id
)
for subcloud in subclouds:
try:
# update_subcloud raises an exception when trying to umanage
# an already unmanaged subcloud, so the deploy status
# update must be done separately
if subcloud.management_state != \
dccommon_consts.MANAGEMENT_UNMANAGED:
if (
subcloud.management_state
!= dccommon_consts.MANAGEMENT_UNMANAGED
):
# Unmanage and update the deploy-status
LOG.info("Unmanaging and setting the local subcloud "
f"{subcloud.name} deploy status to "
f"{consts.DEPLOY_STATE_REHOME_PENDING}")
LOG.info(
"Unmanaging and setting the local subcloud "
f"{subcloud.name} deploy status to "
f"{consts.DEPLOY_STATE_REHOME_PENDING}"
)
self.subcloud_manager.update_subcloud(
self.context,
subcloud.id,
management_state=dccommon_consts.
MANAGEMENT_UNMANAGED,
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING)
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING,
)
else:
# Already unmanaged, just update the deploy-status
LOG.info(f"Setting the local subcloud {subcloud.name} "
"deploy status to "
f"{consts.DEPLOY_STATE_REHOME_PENDING}")
LOG.info(
f"Setting the local subcloud {subcloud.name} "
f"deploy status to {consts.DEPLOY_STATE_REHOME_PENDING}"
)
self.subcloud_manager.update_subcloud(
self.context,
subcloud.id,
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING)
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING,
)
except Exception as e:
LOG.exception(f"Fail to unmanage local subcloud "
f"{subcloud.name}, err: {e}")
LOG.exception(
f"Fail to unmanage local subcloud {subcloud.name}, err: {e}"
)
raise e
SystemPeerManager.update_sync_status(
self.context, system_peer,
self.context,
system_peer,
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
local_peer_group, remote_peer_group)
local_peer_group,
remote_peer_group,
)
self.require_audit_flag = False
# if remote subcloud peer group's migration_status is 'complete',
# get remote subclouds. For 'managed+online' subclouds,
# set 'unmanaged+secondary' to local on same subclouds
elif remote_peer_group.get("migration_status") == \
consts.PEER_GROUP_MIGRATION_COMPLETE:
elif (
remote_peer_group.get("migration_status")
== consts.PEER_GROUP_MIGRATION_COMPLETE
):
dc_client = SystemPeerManager.get_peer_dc_client(system_peer)
remote_subclouds = \
self._get_subclouds_by_peer_group_from_system_peer(
dc_client,
system_peer,
remote_peer_group.get("peer_group_name"))
remote_sync_status = \
self._get_association_sync_status_from_peer_site(
dc_client,
system_peer,
remote_peer_group.get("id"))
remote_subclouds = self._get_subclouds_by_peer_group_from_system_peer(
dc_client, system_peer, remote_peer_group.get("peer_group_name")
)
remote_sync_status = self._get_association_sync_status_from_peer_site(
dc_client, system_peer, remote_peer_group.get("id")
)
local_subclouds_to_update, local_subclouds_to_delete, \
any_rehome_failed = \
local_subclouds_to_update, local_subclouds_to_delete, any_rehome_failed = (
self._get_local_subclouds_to_update_and_delete(
local_peer_group, remote_subclouds, remote_sync_status)
local_peer_group, remote_subclouds, remote_sync_status
)
)
for subcloud in local_subclouds_to_update:
self._set_local_subcloud_to_secondary(subcloud)
@ -253,85 +265,90 @@ class PeerGroupAuditManager(manager.Manager):
for subcloud in local_subclouds_to_delete:
self._set_local_subcloud_to_secondary(subcloud)
try:
self.subcloud_manager.delete_subcloud(
self.context, subcloud.id)
self.subcloud_manager.delete_subcloud(self.context, subcloud.id)
LOG.info(f"Deleted local subcloud {subcloud.name}")
except Exception as e:
SystemPeerManager.update_sync_status(
self.context, system_peer,
self.context,
system_peer,
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
local_peer_group, remote_peer_group)
LOG.exception(f"Failed to delete local subcloud "
f"[{subcloud.name}] that does not exist "
f"under the same subcloud_peer_group on "
f"peer site, err: {e}")
local_peer_group,
remote_peer_group,
)
LOG.exception(
f"Failed to delete local subcloud [{subcloud.name}] that does "
"not exist under the same subcloud_peer_group on peer site, "
f"err: {e}"
)
raise e
if remote_peer_group.get("system_leader_id") == system_peer.peer_uuid:
self._clear_or_raise_alarm(system_peer,
local_peer_group,
remote_peer_group)
self._clear_or_raise_alarm(
system_peer, local_peer_group, remote_peer_group
)
db_api.subcloud_peer_group_update(
self.context,
local_peer_group.id,
system_leader_id=system_peer.peer_uuid,
system_leader_name=system_peer.peer_name)
system_leader_name=system_peer.peer_name,
)
self._update_remote_peer_group_migration_status(
system_peer,
remote_peer_group.get("peer_group_name"),
None)
system_peer, remote_peer_group.get("peer_group_name"), None
)
if not (remote_sync_status == consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
and any_rehome_failed):
if not (
remote_sync_status == consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
and any_rehome_failed
):
SystemPeerManager.update_sync_status(
self.context, system_peer,
self.context,
system_peer,
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
local_peer_group, remote_peer_group)
local_peer_group,
remote_peer_group,
)
self.require_audit_flag = False
else:
# If remote peer group migration_status is 'None'
self.require_audit_flag = False
def _clear_or_raise_alarm(self,
system_peer,
local_peer_group,
remote_peer_group):
def _clear_or_raise_alarm(self, system_peer, local_peer_group, remote_peer_group):
# If local subcloud peer group's group_priority is
# lower than remote subcloud peer group's group_priority,
# an alarm will be raised.
# lower number means higher priority
entity_instance_id = "peer_group=%s,peer=%s" % \
(local_peer_group.peer_group_name, system_peer.peer_uuid)
if local_peer_group.group_priority < remote_peer_group.get('group_priority'):
LOG.warning("Alarm: local subcloud peer group ["
f"{local_peer_group.peer_group_name}] "
f"is managed by remote system ["
f"{system_peer.peer_name}]")
entity_instance_id = "peer_group=%s,peer=%s" % (
local_peer_group.peer_group_name,
system_peer.peer_uuid,
)
if local_peer_group.group_priority < remote_peer_group.get("group_priority"):
LOG.warning(
f"Alarm: local subcloud peer group [{local_peer_group.peer_group_name}]"
f" is managed by remote system [{system_peer.peer_name}]"
)
try:
fault = fm_api.Fault(
alarm_id=fm_const.
FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
alarm_state=fm_const.FM_ALARM_STATE_SET,
entity_type_id=fm_const.
FM_ENTITY_TYPE_SUBCLOUD_PEER_GROUP,
entity_type_id=fm_const.FM_ENTITY_TYPE_SUBCLOUD_PEER_GROUP,
entity_instance_id=entity_instance_id,
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
reason_text=("Subcloud peer group "
"(peer_group_name=%s) "
"is managed by remote "
"system (peer_uuid=%s) "
"with a lower priority." %
(local_peer_group.peer_group_name,
system_peer.peer_uuid)),
reason_text=(
"Subcloud peer group (peer_group_name=%s) is managed by "
"remote system (peer_uuid=%s) with a lower priority."
% (local_peer_group.peer_group_name, system_peer.peer_uuid)
),
alarm_type=fm_const.FM_ALARM_TYPE_0,
probable_cause=fm_const.
ALARM_PROBABLE_CAUSE_UNKNOWN,
proposed_repair_action="Check the reported peer group "
"state. Migrate it back to the current system if the "
"state is 'rehomed' and the current system is stable. "
"Otherwise, wait until these conditions are met.",
service_affecting=False)
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_UNKNOWN,
proposed_repair_action=(
"Check the reported peer group state. Migrate it back to the "
"current system if the state is 'rehomed' and the current "
"system is stable. Otherwise, wait until these conditions "
"are met."
),
service_affecting=False,
)
self.fm_api.set_fault(fault)
except Exception as e:
LOG.exception(e)
@ -339,17 +356,19 @@ class PeerGroupAuditManager(manager.Manager):
try:
fault = self.fm_api.get_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
entity_instance_id)
entity_instance_id,
)
if fault:
LOG.info(f"Clear alarm: {entity_instance_id}")
self.fm_api.clear_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
entity_instance_id)
entity_instance_id,
)
except Exception:
LOG.exception(
f"Problem clearing fault [{entity_instance_id}], "
f"alarm_id="
f"{fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED}")
f"Problem clearing fault [{entity_instance_id}], alarm_id="
f"{fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED}"
)
def _do_audit(self, system_peer, remote_peer_group, local_peer_group):
with self.thread_lock:
@ -367,20 +386,24 @@ class PeerGroupAuditManager(manager.Manager):
def start(self, system_peer, remote_peer_group, local_peer_group):
if self.thread_lock.locked():
LOG.warning(f"Audit thread for {local_peer_group.peer_group_name} "
f"has already started")
LOG.warning(
f"Audit thread for {local_peer_group.peer_group_name} "
"has already started"
)
else:
self.thread = threading.Thread(
target=self._do_audit,
args=(system_peer, remote_peer_group, local_peer_group))
args=(system_peer, remote_peer_group, local_peer_group),
)
self.thread.start()
def audit_peer_group_from_system(self,
system_peer,
remote_peer_group,
local_peer_group):
LOG.info(f"Audit peer group [{local_peer_group.peer_group_name}] "
f"with remote system {system_peer.peer_name}")
def audit_peer_group_from_system(
self, system_peer, remote_peer_group, local_peer_group
):
LOG.info(
f"Audit peer group [{local_peer_group.peer_group_name}] "
f"with remote system {system_peer.peer_name}"
)
self.start(system_peer, remote_peer_group, local_peer_group)
@staticmethod
@ -391,21 +414,23 @@ class PeerGroupAuditManager(manager.Manager):
for system in system_peers:
try:
dc_client = SystemPeerManager.get_peer_dc_client(system)
payload = db_api.subcloud_peer_group_db_model_to_dict(
peer_group)
if 'created-at' in payload:
del payload['created-at']
if 'updated-at' in payload:
del payload['updated-at']
payload['peer_uuid'] = local_system.uuid
LOG.info("Send audit payload [%s] of peer group %s" %
(payload, peer_group.peer_group_name))
payload = db_api.subcloud_peer_group_db_model_to_dict(peer_group)
if "created-at" in payload:
del payload["created-at"]
if "updated-at" in payload:
del payload["updated-at"]
payload["peer_uuid"] = local_system.uuid
LOG.info(
"Send audit payload [%s] of peer group %s"
% (payload, peer_group.peer_group_name)
)
response = dc_client.audit_subcloud_peer_group(
peer_group.peer_group_name,
**payload)
peer_group.peer_group_name, **payload
)
if response:
return response
except Exception:
LOG.exception("Failed to send audit request for peer group "
f"{peer_group.peer_group_name} to DC: "
f"{system.peer_name}")
LOG.exception(
"Failed to send audit request for peer group "
f"{peer_group.peer_group_name} to DC: {system.peer_name}"
)

View File

@ -19,7 +19,6 @@ from dcmanager.db import api as db_api
from dcmanager.manager import peer_group_audit_manager as pgam
from dcmanager.manager.system_peer_manager import SystemPeerManager
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -46,27 +45,35 @@ class PeerMonitor(object):
self.fm_api.clear_fault(alarm_id, entity_instance_id)
except Exception as e:
LOG.exception(
"Problem clearing fault for peer %s, alarm_id=%s "
"error: %s" % (self.peer.peer_uuid, alarm_id, e))
"Problem clearing fault for peer %s, alarm_id=%s error: %s"
% (self.peer.peer_uuid, alarm_id, e)
)
def _raise_failure(self):
alarm_id = fm_const.FM_ALARM_ID_DC_SYSTEM_PEER_HEARTBEAT_FAILED
entity_instance_id = "peer=%s" % self.peer.peer_uuid
reason_text = ("Peer %s (peer_uuid=%s) connections in "
"disconnected state." % (self.peer.peer_name,
self.peer.peer_uuid))
reason_text = "Peer %s (peer_uuid=%s) connections in disconnected state." % (
self.peer.peer_name,
self.peer.peer_uuid,
)
severity = fm_const.FM_ALARM_SEVERITY_MAJOR
peer_groups = db_api.subcloud_peer_group_get_by_leader_id(
self.context, self.peer.peer_uuid)
self.context, self.peer.peer_uuid
)
if len(peer_groups) > 0:
peer_group_names = [peer_group.peer_group_name
for peer_group in peer_groups]
reason_text = ("Peer %s (peer_uuid=%s) is in disconnected "
"state. The following subcloud peer groups "
"are impacted: %s." %
(self.peer.peer_name, self.peer.peer_uuid,
", ".join(peer_group_names)))
peer_group_names = [
peer_group.peer_group_name for peer_group in peer_groups
]
reason_text = (
"Peer %s (peer_uuid=%s) is in disconnected state. The following "
"subcloud peer groups are impacted: %s."
% (
self.peer.peer_name,
self.peer.peer_uuid,
", ".join(peer_group_names),
)
)
severity = fm_const.FM_ALARM_SEVERITY_CRITICAL
try:
@ -79,18 +86,22 @@ class PeerMonitor(object):
reason_text=reason_text,
alarm_type=fm_const.FM_ALARM_TYPE_1,
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_UNKNOWN,
proposed_repair_action="Check the connectivity between "
"the current system and the reported peer site. If the "
"peer system is down, migrate the affected peer group(s) "
"to the current system for continued subcloud management.",
service_affecting=False)
proposed_repair_action=(
"Check the connectivity between the current system and the "
"reported peer site. If the peer system is down, migrate the "
"affected peer group(s) to the current system for continued "
"subcloud management."
),
service_affecting=False,
)
self.fm_api.set_fault(fault)
except Exception as e:
LOG.exception(
"Problem setting fault for peer %s, alarm_id=%s, "
"error: %s" % (self.peer.peer_uuid, alarm_id, e))
"Problem setting fault for peer %s, alarm_id=%s, error: %s"
% (self.peer.peer_uuid, alarm_id, e)
)
def _heartbeat_check_via_get_peer_group_list(self):
"""Checking the heartbeat of system peer."""
@ -98,29 +109,28 @@ class PeerMonitor(object):
dc_peer_subcloud_peer_group_list = list()
try:
dc_client = SystemPeerManager.get_peer_dc_client(self.peer)
dc_peer_subcloud_peer_group_list = \
dc_client.get_subcloud_peer_group_list()
dc_peer_subcloud_peer_group_list = dc_client.get_subcloud_peer_group_list()
failed = False
if not dc_peer_subcloud_peer_group_list:
LOG.warning("Resource subcloud peer group of dc:%s "
"not found" % self.peer.manager_endpoint)
LOG.warning(
"Resource subcloud peer group of dc:%s not found"
% self.peer.manager_endpoint
)
except Exception:
LOG.exception("Failed to access the dc: %s" %
self.peer.peer_name)
LOG.exception("Failed to access the dc: %s" % self.peer.peer_name)
return failed, dc_peer_subcloud_peer_group_list
def _update_sync_status_secondary_site_becomes_unreachable(self):
# Get associations by system peer
associations = SystemPeerManager.get_local_associations(self.context,
self.peer)
associations = SystemPeerManager.get_local_associations(self.context, self.peer)
for association in associations:
# If the association is not primary, skip it.
if association.association_type == consts.\
ASSOCIATION_TYPE_NON_PRIMARY:
LOG.debug("Skip update the Association sync_status as "
"it is not primary.")
if association.association_type == consts.ASSOCIATION_TYPE_NON_PRIMARY:
LOG.debug(
"Skip update the Association sync_status as it is not primary."
)
continue
# If the secondary site is down, set the association sync status
# "in-sync" -> "unknown"
@ -131,24 +141,27 @@ class PeerMonitor(object):
sync_status = consts.ASSOCIATION_SYNC_STATUS_UNKNOWN
message = f"Peer site ({self.peer.peer_name}) is unreachable."
if association.sync_status not in [
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN]:
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
]:
sync_status = consts.ASSOCIATION_SYNC_STATUS_FAILED
db_api.peer_group_association_update(
self.context, association.id,
self.context,
association.id,
sync_status=sync_status,
sync_message=message)
sync_message=message,
)
def _update_sync_status_secondary_site_becomes_reachable(self):
# Get associations by system peer
associations = SystemPeerManager.get_local_associations(self.context,
self.peer)
associations = SystemPeerManager.get_local_associations(self.context, self.peer)
for association in associations:
# If the association is not primary, skip it.
if association.association_type == consts.\
ASSOCIATION_TYPE_NON_PRIMARY:
LOG.debug("Skip update Peer Site Association sync_status as "
"current site Association is not primary.")
if association.association_type == consts.ASSOCIATION_TYPE_NON_PRIMARY:
LOG.debug(
"Skip update Peer Site Association sync_status as "
"current site Association is not primary."
)
continue
# Upon detecting that the secondary site is reachable again,
# the PGA sync_status will be set for both sites by the primary
@ -156,37 +169,43 @@ class PeerMonitor(object):
# "unknown" -> "in-sync"
# "failed" -> "out-of-sync"
sync_status = consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
if association.sync_status == \
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN:
if association.sync_status == consts.ASSOCIATION_SYNC_STATUS_UNKNOWN:
sync_status = consts.ASSOCIATION_SYNC_STATUS_IN_SYNC
dc_local_pg = db_api.subcloud_peer_group_get(
self.context, association.peer_group_id)
self.context, association.peer_group_id
)
SystemPeerManager.update_sync_status(
self.context, self.peer, sync_status, dc_local_pg,
association=association)
self.context,
self.peer,
sync_status,
dc_local_pg,
association=association,
)
def _do_monitor_peer(self):
failure_count = 0
LOG.info("Start monitoring thread for peer %s" %
self.peer.peer_name)
LOG.info("Start monitoring thread for peer %s" % self.peer.peer_name)
UNAVAILABLE_STATE = consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE
AVAILABLE_STATE = consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE
# Do the actual peer monitor.
while not self.exit_flag.wait(timeout=self.peer.heartbeat_interval):
try:
# Get system peer from DB
self.peer = db_api.system_peer_get(self.context, self.peer.id)
failed, remote_pg_list = \
self._heartbeat_check_via_get_peer_group_list()
failed, remote_pg_list = self._heartbeat_check_via_get_peer_group_list()
if failed:
failure_count += 1
if failure_count >= self.peer.heartbeat_failure_threshold:
# heartbeat_failure_threshold reached.
LOG.warning("DC %s heartbeat failed, Raising alarm" %
self.peer.peer_name)
LOG.warning(
"DC %s heartbeat failed, Raising alarm"
% self.peer.peer_name
)
self._raise_failure()
db_api.system_peer_update(
self.context, self.peer.id,
availability_state= # noqa: E251
consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE
self.context,
self.peer.id,
availability_state=UNAVAILABLE_STATE,
)
# pylint: disable=line-too-long
self._update_sync_status_secondary_site_becomes_unreachable()
@ -195,23 +214,24 @@ class PeerMonitor(object):
else:
failure_count = 0
self._audit_local_peer_groups(remote_pg_list)
if self.peer.availability_state != \
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE:
if self.peer.availability_state != AVAILABLE_STATE:
db_api.system_peer_update(
self.context, self.peer.id,
availability_state= # noqa: E251
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE
self.context,
self.peer.id,
availability_state=AVAILABLE_STATE,
)
# pylint: disable=line-too-long
self._update_sync_status_secondary_site_becomes_reachable()
LOG.info("DC %s back online, clear alarm" %
self.peer.peer_name)
LOG.info("DC %s back online, clear alarm" % self.peer.peer_name)
self._clear_failure()
except Exception as e:
LOG.exception("Got exception monitoring peer %s error: %s" %
(self.peer.peer_name, e))
LOG.info("Caught graceful exit signal for peer monitor %s" %
self.peer.peer_name)
LOG.exception(
"Got exception monitoring peer %s error: %s"
% (self.peer.peer_name, e)
)
LOG.info(
"Caught graceful exit signal for peer monitor %s" % self.peer.peer_name
)
def _audit_local_peer_groups(self, remote_pg_list):
# Generate a dict index by remote peer group name
@ -222,21 +242,25 @@ class PeerMonitor(object):
# Only audit peer groups existing on both side
for peer_group_id, pgam_obj in self.peer_group_audit_obj_map.items():
peer_group = db_api.subcloud_peer_group_get(self.context,
peer_group_id)
peer_group = db_api.subcloud_peer_group_get(self.context, peer_group_id)
if peer_group.peer_group_name in remote_pg_dict:
remote_peer_group = remote_pg_dict[peer_group.peer_group_name]
# Audit for require_audit_flag is True or
# Remote peer group is in 'complete' state.
if (pgam_obj.require_audit_flag
or remote_peer_group.get("migration_status") ==
consts.PEER_GROUP_MIGRATION_COMPLETE):
if (
pgam_obj.require_audit_flag
or remote_peer_group.get("migration_status")
== consts.PEER_GROUP_MIGRATION_COMPLETE
):
pgam_obj.audit_peer_group_from_system(
self.peer, remote_peer_group, peer_group)
self.peer, remote_peer_group, peer_group
)
else:
LOG.warning("peer group %s not found on remote DC %s "
"nothing to audit, need sync operation" %
(peer_group.peer_group_name, self.peer.peer_name))
LOG.warning(
"peer group %s not found on remote DC %s "
"nothing to audit, need sync operation"
% (peer_group.peer_group_name, self.peer.peer_name)
)
def _set_require_audit_flag_to_associated_peer_groups(self):
for pgam_obj in self.peer_group_audit_obj_map.values():
@ -248,7 +272,7 @@ class PeerMonitor(object):
pgam_obj = self.peer_group_audit_obj_map[peer_group.id]
pgam_obj.audit(self.peer, remote_peer_group, peer_group)
else:
msg = ("No peer group id %s found" % peer_group.peer_group_name)
msg = "No peer group id %s found" % peer_group.peer_group_name
return msg
def _clean_peer_group_audit_threads(self):
@ -262,25 +286,30 @@ class PeerMonitor(object):
# destroy removed peer_group audit object
for peer_group_id in removed_peer_groups:
LOG.info("Peer group [%s] removed from peer [%s]" %
(peer_group_id, self.peer.peer_name))
LOG.info(
"Peer group [%s] removed from peer [%s]"
% (peer_group_id, self.peer.peer_name)
)
if peer_group_id in self.peer_group_audit_obj_map:
self.peer_group_audit_obj_map[peer_group_id].stop()
del self.peer_group_audit_obj_map[peer_group_id]
# Add new peer_group audit object
for peer_group_id in new_peer_groups:
LOG.info("New peer group [%s] found for peer [%s]" %
(peer_group_id, self.peer.peer_name))
self.peer_group_audit_obj_map[peer_group_id] = \
pgam.PeerGroupAuditManager(self.subcloud_manager,
peer_group_id)
LOG.info(
"New peer group [%s] found for peer [%s]"
% (peer_group_id, self.peer.peer_name)
)
self.peer_group_audit_obj_map[peer_group_id] = pgam.PeerGroupAuditManager(
self.subcloud_manager, peer_group_id
)
self.peer_group_id_set = peer_group_id_set
self._set_require_audit_flag_to_associated_peer_groups()
def start(self):
if self.thread is not None:
LOG.error('Peer monitor thread for %s has already started' %
self.peer.peer_name)
LOG.error(
"Peer monitor thread for %s has already started" % self.peer.peer_name
)
else:
self.thread = threading.Thread(target=self._do_monitor_peer)
self.thread.start()
@ -296,10 +325,9 @@ class PeerMonitorManager(manager.Manager):
"""Manages tasks related to peer monitor."""
def __init__(self, subcloud_manager):
LOG.debug('PeerMonitorManager initialization...')
LOG.debug("PeerMonitorManager initialization...")
super(PeerMonitorManager, self).__init__(
service_name="peer_monitor_manager")
super(PeerMonitorManager, self).__init__(service_name="peer_monitor_manager")
self.peer_monitor = dict()
self.context = context.get_admin_context()
self.subcloud_manager = subcloud_manager
@ -314,12 +342,11 @@ class PeerMonitorManager(manager.Manager):
del self.peer_monitor_thread_map[system_peer_id]
def _create_peer_monitor_task(self, system_peer_id):
peer = db_api.system_peer_get(self.context,
system_peer_id)
LOG.info("Create monitoring thread for peer: %s" %
peer.peer_name)
peer = db_api.system_peer_get(self.context, system_peer_id)
LOG.info("Create monitoring thread for peer: %s" % peer.peer_name)
self.peer_monitor_thread_map[system_peer_id] = PeerMonitor(
peer, self.context, self.subcloud_manager)
peer, self.context, self.subcloud_manager
)
self.peer_monitor_thread_map[system_peer_id].start()
@staticmethod
@ -327,10 +354,12 @@ class PeerMonitorManager(manager.Manager):
return {key: value for key, value in dict1.items() if key not in dict2}
def _create_or_destroy_peer_monitor_task(self, peer_system_peer_group_map):
new_peers = self._diff_dict(peer_system_peer_group_map,
self.peer_monitor_thread_map)
removed_peers = self._diff_dict(self.peer_monitor_thread_map,
peer_system_peer_group_map)
new_peers = self._diff_dict(
peer_system_peer_group_map, self.peer_monitor_thread_map
)
removed_peers = self._diff_dict(
self.peer_monitor_thread_map, peer_system_peer_group_map
)
for peer_id in new_peers:
self._create_peer_monitor_task(peer_id)
for peer_id in removed_peers:
@ -338,8 +367,7 @@ class PeerMonitorManager(manager.Manager):
# Update peer_group_id set
for peer_id, pm_obj in self.peer_monitor_thread_map.items():
pm_obj.update_peer_group_id_set(
peer_system_peer_group_map[peer_id])
pm_obj.update_peer_group_id_set(peer_system_peer_group_map[peer_id])
def peer_monitor_notify(self, context):
LOG.info("Caught peer monitor notify...")
@ -348,31 +376,32 @@ class PeerMonitorManager(manager.Manager):
associations = db_api.peer_group_association_get_all(context)
for association in associations:
peer_system_peer_group_map[association.system_peer_id].add(
association.peer_group_id)
association.peer_group_id
)
self._create_or_destroy_peer_monitor_task(peer_system_peer_group_map)
def peer_group_audit_notify(self, context, peer_group_name, payload):
LOG.info("Caught peer group audit notification for peer group %s" %
peer_group_name)
LOG.info(
"Caught peer group audit notification for peer group %s" % peer_group_name
)
msg = None
try:
peer_group = db_api.subcloud_peer_group_get_by_name(
context, peer_group_name)
system_uuid = payload.get('peer_uuid')
system_peer = db_api.system_peer_get_by_uuid(context,
system_uuid)
context, peer_group_name
)
system_uuid = payload.get("peer_uuid")
system_peer = db_api.system_peer_get_by_uuid(context, system_uuid)
if system_peer.id in self.peer_monitor_thread_map:
pmobj = self.peer_monitor_thread_map[system_peer.id]
msg = pmobj.audit_specific_local_peer_group(peer_group,
payload)
msg = pmobj.audit_specific_local_peer_group(peer_group, payload)
else:
msg = ("System peer with UUID=%s is not under monitoring. "
"Skipping audit for peer group %s" %
(system_uuid, peer_group_name))
msg = (
"System peer with UUID=%s is not under monitoring. "
"Skipping audit for peer group %s" % (system_uuid, peer_group_name)
)
LOG.warning(msg)
return msg
except Exception as e:
LOG.exception('Handling peer group audit notify error: %s' %
str(e))
LOG.exception("Handling peer group audit notify error: %s" % str(e))
return str(e)

View File

@ -46,9 +46,11 @@ LOG = logging.getLogger(__name__)
# run multiple operations in parallel past the RPC limit.
def run_in_thread(fn):
"""Decorator to run a function in a separate thread."""
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return wrapper
@ -101,9 +103,9 @@ class DCManagerService(service.Service):
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
self.dcmanager_id = uuidutils.generate_uuid()
self.init_managers()
target = oslo_messaging.Target(version=self.rpc_api_version,
server=self.host,
topic=self.topic)
target = oslo_messaging.Target(
version=self.rpc_api_version, server=self.host, topic=self.topic
)
self.target = target
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
self._rpc_server.start()
@ -127,14 +129,15 @@ class DCManagerService(service.Service):
@request_context
def add_subcloud(self, context, subcloud_id, payload):
# Adds a subcloud
LOG.info("Handling add_subcloud request for: %s" % payload.get('name'))
LOG.info("Handling add_subcloud request for: %s" % payload.get("name"))
return self.subcloud_manager.add_subcloud(context, subcloud_id, payload)
@request_context
def add_secondary_subcloud(self, context, subcloud_id, payload):
# Adds a secondary subcloud
LOG.info("Handling add_secondary_subcloud request for: %s" %
payload.get('name'))
LOG.info(
"Handling add_secondary_subcloud request for: %s" % payload.get("name")
)
return self.subcloud_manager.add_subcloud(context, subcloud_id, payload)
@request_context
@ -144,22 +147,23 @@ class DCManagerService(service.Service):
return self.subcloud_manager.delete_subcloud(context, subcloud_id)
@request_context
def rename_subcloud(self, context, subcloud_id, curr_subcloud_name,
new_subcloud_name=None):
def rename_subcloud(
self, context, subcloud_id, curr_subcloud_name, new_subcloud_name=None
):
# Rename a subcloud
LOG.info("Handling rename_subcloud request for: %s" %
curr_subcloud_name)
subcloud = self.subcloud_manager.rename_subcloud(context,
subcloud_id,
curr_subcloud_name,
new_subcloud_name)
LOG.info("Handling rename_subcloud request for: %s" % curr_subcloud_name)
subcloud = self.subcloud_manager.rename_subcloud(
context, subcloud_id, curr_subcloud_name, new_subcloud_name
)
return subcloud
@request_context
def get_subcloud_name_by_region_name(self, context, subcloud_region):
# get subcloud by region name
LOG.debug("Handling get_subcloud_name_by_region_name request for "
"region: %s" % subcloud_region)
LOG.debug(
"Handling get_subcloud_name_by_region_name request for region: %s"
% subcloud_region
)
subcloud = self.subcloud_manager.get_subcloud_name_by_region_name(
context, subcloud_region
)
@ -167,128 +171,156 @@ class DCManagerService(service.Service):
@request_context
def update_subcloud(
self, context, subcloud_id, management_state=None, description=None,
location=None, group_id=None, data_install=None, force=None,
deploy_status=None, peer_group_id=None, bootstrap_values=None,
bootstrap_address=None
self,
context,
subcloud_id,
management_state=None,
description=None,
location=None,
group_id=None,
data_install=None,
force=None,
deploy_status=None,
peer_group_id=None,
bootstrap_values=None,
bootstrap_address=None,
):
# Updates a subcloud
LOG.info("Handling update_subcloud request for: %s" % subcloud_id)
subcloud = self.subcloud_manager.update_subcloud(context, subcloud_id,
management_state,
description,
location,
group_id,
data_install,
force,
deploy_status,
peer_group_id,
bootstrap_values,
bootstrap_address)
subcloud = self.subcloud_manager.update_subcloud(
context,
subcloud_id,
management_state,
description,
location,
group_id,
data_install,
force,
deploy_status,
peer_group_id,
bootstrap_values,
bootstrap_address,
)
return subcloud
@request_context
def update_subcloud_with_network_reconfig(self, context, subcloud_id, payload):
LOG.info("Handling update_subcloud_with_network_reconfig request for: %s",
subcloud_id)
LOG.info(
"Handling update_subcloud_with_network_reconfig request for: %s",
subcloud_id,
)
return self.subcloud_manager.update_subcloud_with_network_reconfig(
context, subcloud_id, payload)
context, subcloud_id, payload
)
@run_in_thread
@request_context
def redeploy_subcloud(self, context, subcloud_id, payload):
# Redeploy a subcloud
LOG.info("Handling redeploy_subcloud request for: %s" % subcloud_id)
return self.subcloud_manager.redeploy_subcloud(context,
subcloud_id,
payload)
return self.subcloud_manager.redeploy_subcloud(context, subcloud_id, payload)
@request_context
def backup_subclouds(self, context, payload):
# Backup a subcloud or group of subclouds
entity = 'subcloud' if payload.get('subcloud') else 'group'
LOG.info("Handling backup_subclouds request for %s ID: %s" %
(entity, (payload.get('subcloud') or payload.get('group'))))
entity = "subcloud" if payload.get("subcloud") else "group"
LOG.info(
"Handling backup_subclouds request for %s ID: %s"
% (entity, (payload.get("subcloud") or payload.get("group")))
)
return self.subcloud_manager.create_subcloud_backups(context, payload)
@request_context
def delete_subcloud_backups(self, context, release_version, payload):
# Delete backup on subcloud or group of subclouds
entity = 'subcloud' if payload.get('subcloud') else 'group'
LOG.info("Handling delete_subcloud_backups request for %s ID: %s" %
(entity, (payload.get('subcloud') or payload.get('group'))))
return self.subcloud_manager.delete_subcloud_backups(context,
release_version,
payload)
entity = "subcloud" if payload.get("subcloud") else "group"
LOG.info(
"Handling delete_subcloud_backups request for %s ID: %s"
% (entity, (payload.get("subcloud") or payload.get("group")))
)
return self.subcloud_manager.delete_subcloud_backups(
context, release_version, payload
)
@request_context
def restore_subcloud_backups(self, context, payload):
# Restore a subcloud backup or a group of subclouds backups
entity = 'subcloud' if payload.get('subcloud') else 'group'
LOG.info("Handling restore_subcloud_backups request for %s ID: %s" %
(entity, (payload.get('subcloud') or payload.get('group'))))
entity = "subcloud" if payload.get("subcloud") else "group"
LOG.info(
"Handling restore_subcloud_backups request for %s ID: %s"
% (entity, (payload.get("subcloud") or payload.get("group")))
)
return self.subcloud_manager.restore_subcloud_backups(context, payload)
@request_context
def update_subcloud_sync_endpoint_type(self, context, subcloud_name,
endpoint_type_list,
openstack_installed):
def update_subcloud_sync_endpoint_type(
self, context, subcloud_name, endpoint_type_list, openstack_installed
):
# Updates subcloud sync endpoint type
LOG.info("Handling update_subcloud_sync_endpoint_type request for: %s"
% subcloud_name)
LOG.info(
"Handling update_subcloud_sync_endpoint_type request for: %s"
% subcloud_name
)
self.subcloud_manager.update_subcloud_sync_endpoint_type(
context, subcloud_name, endpoint_type_list, openstack_installed)
context, subcloud_name, endpoint_type_list, openstack_installed
)
@request_context
def prestage_subcloud(self, context, payload):
LOG.info("Handling prestage_subcloud request for: %s",
payload['subcloud_name'])
LOG.info("Handling prestage_subcloud request for: %s", payload["subcloud_name"])
return self.subcloud_manager.prestage_subcloud(context, payload)
@request_context
def subcloud_deploy_create(self, context, subcloud_id, payload):
# Adds a subcloud
LOG.info("Handling subcloud_deploy_create request for: %s" %
payload.get('name'))
return self.subcloud_manager.subcloud_deploy_create(context,
subcloud_id,
payload)
LOG.info(
"Handling subcloud_deploy_create request for: %s" % payload.get("name")
)
return self.subcloud_manager.subcloud_deploy_create(
context, subcloud_id, payload
)
@run_in_thread
@request_context
def subcloud_deploy_bootstrap(self, context, subcloud_id, payload,
initial_deployment):
def subcloud_deploy_bootstrap(
self, context, subcloud_id, payload, initial_deployment
):
# Bootstraps a subcloud
LOG.info("Handling subcloud_deploy_bootstrap request for: %s" %
payload.get('name'))
LOG.info(
"Handling subcloud_deploy_bootstrap request for: %s" % payload.get("name")
)
return self.subcloud_manager.subcloud_deploy_bootstrap(
context, subcloud_id, payload, initial_deployment)
context, subcloud_id, payload, initial_deployment
)
@run_in_thread
@request_context
def subcloud_deploy_config(self, context, subcloud_id, payload,
initial_deployment):
def subcloud_deploy_config(self, context, subcloud_id, payload, initial_deployment):
# Configures a subcloud
LOG.info("Handling subcloud_deploy_config request for: %s" % subcloud_id)
return self.subcloud_manager.subcloud_deploy_config(
context, subcloud_id, payload, initial_deployment)
context, subcloud_id, payload, initial_deployment
)
@run_in_thread
@request_context
def subcloud_deploy_install(self, context, subcloud_id, payload,
initial_deployment):
def subcloud_deploy_install(
self, context, subcloud_id, payload, initial_deployment
):
# Install a subcloud
LOG.info("Handling subcloud_deploy_install request for: %s" % subcloud_id)
return self.subcloud_manager.subcloud_deploy_install(
context, subcloud_id, payload, initial_deployment)
context, subcloud_id, payload, initial_deployment
)
@run_in_thread
@request_context
def subcloud_deploy_enroll(self, context, subcloud_id, payload):
# Enroll a subcloud
LOG.info(f'Handling subcloud_deploy_enroll request for: {subcloud_id}')
LOG.info(f"Handling subcloud_deploy_enroll request for: {subcloud_id}")
return self.subcloud_manager.subcloud_deploy_enroll(
context, subcloud_id, payload)
context, subcloud_id, payload
)
@request_context
def subcloud_deploy_complete(self, context, subcloud_id):
@ -301,26 +333,27 @@ class DCManagerService(service.Service):
def subcloud_deploy_abort(self, context, subcloud_id, deploy_status):
# Abort the subcloud deployment
LOG.info("Handling subcloud_deploy_abort request for: %s" % subcloud_id)
return self.subcloud_manager.subcloud_deploy_abort(context,
subcloud_id,
deploy_status)
return self.subcloud_manager.subcloud_deploy_abort(
context, subcloud_id, deploy_status
)
@run_in_thread
@request_context
def subcloud_deploy_resume(self, context, subcloud_id, subcloud_name,
payload, deploy_states_to_run):
def subcloud_deploy_resume(
self, context, subcloud_id, subcloud_name, payload, deploy_states_to_run
):
# Adds a subcloud
LOG.info("Handling subcloud_deploy_resume request for: %s" % subcloud_name)
return self.subcloud_manager.subcloud_deploy_resume(context,
subcloud_id,
subcloud_name,
payload,
deploy_states_to_run)
return self.subcloud_manager.subcloud_deploy_resume(
context, subcloud_id, subcloud_name, payload, deploy_states_to_run
)
@request_context
def batch_migrate_subcloud(self, context, payload):
LOG.info("Handling batch_migrate_subcloud request for peer_group: %s",
payload['peer_group'])
LOG.info(
"Handling batch_migrate_subcloud request for peer_group: %s",
payload["peer_group"],
)
return self.subcloud_manager.batch_migrate_subcloud(context, payload)
@request_context
@ -330,44 +363,62 @@ class DCManagerService(service.Service):
@request_context
def peer_group_audit_notify(self, context, peer_group_name, payload):
LOG.info("Handling peer group audit notify of peer group "
f"{peer_group_name}")
LOG.info("Handling peer group audit notify of peer group {peer_group_name}")
return self.peer_monitor_manager.peer_group_audit_notify(
context, peer_group_name, payload)
context, peer_group_name, payload
)
@request_context
def sync_subcloud_peer_group(self, context, association_id,
sync_subclouds=True):
LOG.info("Handling sync_subcloud_peer_group request for: %s",
association_id)
def sync_subcloud_peer_group(self, context, association_id, sync_subclouds=True):
LOG.info("Handling sync_subcloud_peer_group request for: %s", association_id)
return self.system_peer_manager.sync_subcloud_peer_group(
context, association_id, sync_subclouds)
context, association_id, sync_subclouds
)
@request_context
def update_subcloud_peer_group(self, context, peer_group_id,
group_state, max_subcloud_rehoming,
group_name, new_group_name=None):
LOG.info("Handling update_subcloud_peer_group request for "
"peer group %s" % peer_group_id)
def update_subcloud_peer_group(
self,
context,
peer_group_id,
group_state,
max_subcloud_rehoming,
group_name,
new_group_name=None,
):
LOG.info(
"Handling update_subcloud_peer_group request for peer group %s"
% peer_group_id
)
return self.system_peer_manager.update_subcloud_peer_group(
context, peer_group_id, group_state, max_subcloud_rehoming,
group_name, new_group_name)
context,
peer_group_id,
group_state,
max_subcloud_rehoming,
group_name,
new_group_name,
)
@request_context
def delete_peer_group_association(self, context, association_id):
LOG.info("Handling delete_peer_group_association request for: %s",
association_id)
LOG.info(
"Handling delete_peer_group_association request for: %s", association_id
)
return self.system_peer_manager.delete_peer_group_association(
context, association_id)
context, association_id
)
@request_context
def update_association_sync_status(self, context, peer_group_id,
sync_status, sync_message=None):
def update_association_sync_status(
self, context, peer_group_id, sync_status, sync_message=None
):
# Updates peer group association sync_status
LOG.info("Handling update_peer_association_sync_status request for: %s"
% peer_group_id)
LOG.info(
"Handling update_peer_association_sync_status request for: %s"
% peer_group_id
)
return self.system_peer_manager.update_association_sync_status(
context, peer_group_id, sync_status, sync_message)
context, peer_group_id, sync_status, sync_message
)
def _stop_rpc_server(self):
# Stop RPC connection to prevent new requests
@ -375,9 +426,9 @@ class DCManagerService(service.Service):
try:
self._rpc_server.stop()
self._rpc_server.wait()
LOG.info('RPC service stopped successfully')
LOG.info("RPC service stopped successfully")
except Exception as ex:
LOG.error('Failed to stop RPC service: %s', str(ex))
LOG.error("Failed to stop RPC service: %s", str(ex))
def stop(self):
SubprocessCleanup.shutdown_cleanup(origin="service")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
# Copyright (c) 2015 Ericsson AB.
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -34,16 +34,16 @@ class DCManagerObject(base.VersionedObject):
"save" object method.
"""
OBJ_PROJECT_NAMESPACE = 'dcmanager'
VERSION = '1.0'
OBJ_PROJECT_NAMESPACE = "dcmanager"
VERSION = "1.0"
@staticmethod
def _from_db_object(context, obj, db_obj):
if db_obj is None:
return None
for field in obj.fields:
if field == 'metadata':
obj['metadata'] = db_obj['meta_data']
if field == "metadata":
obj["metadata"] = db_obj["meta_data"]
else:
obj[field] = db_obj[field]
@ -66,6 +66,7 @@ class DCManagerObjectRegistry(base.VersionedObjectRegistry):
setattr(objects, cls.obj_name(), cls)
else:
curr_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
getattr(objects, cls.obj_name()).VERSION
)
if version >= curr_version:
setattr(objects, cls.obj_name(), cls)

View File

@ -34,8 +34,9 @@ class RPCClient(object):
"""
def __init__(self, timeout, topic, version):
self._client = messaging.get_rpc_client(timeout=timeout, topic=topic,
version=version)
self._client = messaging.get_rpc_client(
timeout=timeout, topic=topic, version=version
)
@staticmethod
def make_msg(method, **kwargs):
@ -61,71 +62,98 @@ class RPCClient(object):
class SubcloudStateClient(RPCClient):
"""Client to update subcloud availability."""
BASE_RPC_API_VERSION = '1.0'
BASE_RPC_API_VERSION = "1.0"
def __init__(self, timeout=None):
super(SubcloudStateClient, self).__init__(
timeout,
consts.TOPIC_DC_MANAGER_STATE,
self.BASE_RPC_API_VERSION)
def bulk_update_subcloud_availability_and_endpoint_status(
self, ctxt, subcloud_name, subcloud_region, availability_data,
endpoint_data
):
# Note: This is an asynchronous operation.
return self.cast(ctxt, self.make_msg(
'bulk_update_subcloud_availability_and_endpoint_status',
subcloud_name=subcloud_name,
subcloud_region=subcloud_region,
availability_data=availability_data,
endpoint_data=endpoint_data)
timeout, consts.TOPIC_DC_MANAGER_STATE, self.BASE_RPC_API_VERSION
)
def update_subcloud_availability(self, ctxt,
subcloud_name,
subcloud_region,
availability_status,
update_state_only=False,
audit_fail_count=None):
def bulk_update_subcloud_availability_and_endpoint_status(
self, ctxt, subcloud_name, subcloud_region, availability_data, endpoint_data
):
# Note: This is an asynchronous operation.
return self.cast(
ctxt,
self.make_msg(
"bulk_update_subcloud_availability_and_endpoint_status",
subcloud_name=subcloud_name,
subcloud_region=subcloud_region,
availability_data=availability_data,
endpoint_data=endpoint_data,
),
)
def update_subcloud_availability(
self,
ctxt,
subcloud_name,
subcloud_region,
availability_status,
update_state_only=False,
audit_fail_count=None,
):
# Note: synchronous
return self.call(
ctxt,
self.make_msg('update_subcloud_availability',
subcloud_name=subcloud_name,
subcloud_region=subcloud_region,
availability_status=availability_status,
update_state_only=update_state_only,
audit_fail_count=audit_fail_count))
self.make_msg(
"update_subcloud_availability",
subcloud_name=subcloud_name,
subcloud_region=subcloud_region,
availability_status=availability_status,
update_state_only=update_state_only,
audit_fail_count=audit_fail_count,
),
)
def update_subcloud_endpoint_status(
self, ctxt, subcloud_name=None, subcloud_region=None, endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, ignore_endpoints=None,
alarmable=True
self,
ctxt,
subcloud_name=None,
subcloud_region=None,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
ignore_endpoints=None,
alarmable=True,
):
# Note: This is an asynchronous operation.
# See below for synchronous method call
return self.cast(ctxt, self.make_msg('update_subcloud_endpoint_status',
subcloud_name=subcloud_name,
subcloud_region=subcloud_region,
endpoint_type=endpoint_type,
sync_status=sync_status,
ignore_endpoints=ignore_endpoints,
alarmable=alarmable))
return self.cast(
ctxt,
self.make_msg(
"update_subcloud_endpoint_status",
subcloud_name=subcloud_name,
subcloud_region=subcloud_region,
endpoint_type=endpoint_type,
sync_status=sync_status,
ignore_endpoints=ignore_endpoints,
alarmable=alarmable,
),
)
def update_subcloud_endpoint_status_sync(
self, ctxt, subcloud_name=None, subcloud_region=None, endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, ignore_endpoints=None,
alarmable=True
self,
ctxt,
subcloud_name=None,
subcloud_region=None,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
ignore_endpoints=None,
alarmable=True,
):
# Note: synchronous
return self.call(ctxt, self.make_msg('update_subcloud_endpoint_status',
subcloud_name=subcloud_name,
subcloud_region=subcloud_region,
endpoint_type=endpoint_type,
sync_status=sync_status,
ignore_endpoints=ignore_endpoints,
alarmable=alarmable))
return self.call(
ctxt,
self.make_msg(
"update_subcloud_endpoint_status",
subcloud_name=subcloud_name,
subcloud_region=subcloud_region,
endpoint_type=endpoint_type,
sync_status=sync_status,
ignore_endpoints=ignore_endpoints,
alarmable=alarmable,
),
)
class ManagerClient(RPCClient):
@ -135,188 +163,290 @@ class ManagerClient(RPCClient):
1.0 - Initial version (Mitaka 1.0 release)
"""
BASE_RPC_API_VERSION = '1.0'
BASE_RPC_API_VERSION = "1.0"
def __init__(self, timeout=None):
super(ManagerClient, self).__init__(
timeout,
consts.TOPIC_DC_MANAGER,
self.BASE_RPC_API_VERSION)
timeout, consts.TOPIC_DC_MANAGER, self.BASE_RPC_API_VERSION
)
def add_subcloud(self, ctxt, subcloud_id, payload):
return self.cast(ctxt, self.make_msg('add_subcloud',
subcloud_id=subcloud_id,
payload=payload))
return self.cast(
ctxt,
self.make_msg("add_subcloud", subcloud_id=subcloud_id, payload=payload),
)
def add_secondary_subcloud(self, ctxt, subcloud_id, payload):
return self.call(ctxt, self.make_msg('add_secondary_subcloud',
subcloud_id=subcloud_id,
payload=payload))
return self.call(
ctxt,
self.make_msg(
"add_secondary_subcloud", subcloud_id=subcloud_id, payload=payload
),
)
def delete_subcloud(self, ctxt, subcloud_id):
return self.call(ctxt, self.make_msg('delete_subcloud',
subcloud_id=subcloud_id))
return self.call(
ctxt, self.make_msg("delete_subcloud", subcloud_id=subcloud_id)
)
def rename_subcloud(
self, ctxt, subcloud_id, curr_subcloud_name, new_subcloud_name=None
):
return self.call(ctxt, self.make_msg('rename_subcloud',
subcloud_id=subcloud_id,
curr_subcloud_name=curr_subcloud_name,
new_subcloud_name=new_subcloud_name))
return self.call(
ctxt,
self.make_msg(
"rename_subcloud",
subcloud_id=subcloud_id,
curr_subcloud_name=curr_subcloud_name,
new_subcloud_name=new_subcloud_name,
),
)
def update_subcloud(
self, ctxt, subcloud_id, management_state=None, description=None,
location=None, group_id=None, data_install=None, force=None,
deploy_status=None, peer_group_id=None, bootstrap_values=None,
bootstrap_address=None
self,
ctxt,
subcloud_id,
management_state=None,
description=None,
location=None,
group_id=None,
data_install=None,
force=None,
deploy_status=None,
peer_group_id=None,
bootstrap_values=None,
bootstrap_address=None,
):
return self.call(ctxt, self.make_msg('update_subcloud',
subcloud_id=subcloud_id,
management_state=management_state,
description=description,
location=location,
group_id=group_id,
data_install=data_install,
force=force,
deploy_status=deploy_status,
peer_group_id=peer_group_id,
bootstrap_values=bootstrap_values,
bootstrap_address=bootstrap_address))
return self.call(
ctxt,
self.make_msg(
"update_subcloud",
subcloud_id=subcloud_id,
management_state=management_state,
description=description,
location=location,
group_id=group_id,
data_install=data_install,
force=force,
deploy_status=deploy_status,
peer_group_id=peer_group_id,
bootstrap_values=bootstrap_values,
bootstrap_address=bootstrap_address,
),
)
def update_subcloud_with_network_reconfig(self, ctxt, subcloud_id, payload):
return self.cast(ctxt, self.make_msg('update_subcloud_with_network_reconfig',
subcloud_id=subcloud_id,
payload=payload))
def redeploy_subcloud(self, ctxt, subcloud_id, payload):
return self.cast(ctxt, self.make_msg('redeploy_subcloud',
subcloud_id=subcloud_id,
payload=payload))
def backup_subclouds(self, ctxt, payload):
return self.cast(ctxt, self.make_msg('backup_subclouds',
payload=payload))
def delete_subcloud_backups(self, ctxt, release_version, payload):
return self.call(ctxt, self.make_msg('delete_subcloud_backups',
release_version=release_version,
payload=payload))
def restore_subcloud_backups(self, ctxt, payload):
return self.cast(ctxt, self.make_msg('restore_subcloud_backups',
payload=payload))
def update_subcloud_sync_endpoint_type(self, ctxt,
subcloud_region,
endpoint_type_list,
openstack_installed):
return self.cast(
ctxt,
self.make_msg('update_subcloud_sync_endpoint_type',
subcloud_region=subcloud_region,
endpoint_type_list=endpoint_type_list,
openstack_installed=openstack_installed))
self.make_msg(
"update_subcloud_with_network_reconfig",
subcloud_id=subcloud_id,
payload=payload,
),
)
def redeploy_subcloud(self, ctxt, subcloud_id, payload):
return self.cast(
ctxt,
self.make_msg(
"redeploy_subcloud", subcloud_id=subcloud_id, payload=payload
),
)
def backup_subclouds(self, ctxt, payload):
return self.cast(ctxt, self.make_msg("backup_subclouds", payload=payload))
def delete_subcloud_backups(self, ctxt, release_version, payload):
return self.call(
ctxt,
self.make_msg(
"delete_subcloud_backups",
release_version=release_version,
payload=payload,
),
)
def restore_subcloud_backups(self, ctxt, payload):
return self.cast(
ctxt, self.make_msg("restore_subcloud_backups", payload=payload)
)
def update_subcloud_sync_endpoint_type(
self, ctxt, subcloud_region, endpoint_type_list, openstack_installed
):
return self.cast(
ctxt,
self.make_msg(
"update_subcloud_sync_endpoint_type",
subcloud_region=subcloud_region,
endpoint_type_list=endpoint_type_list,
openstack_installed=openstack_installed,
),
)
def prestage_subcloud(self, ctxt, payload):
return self.call(ctxt, self.make_msg('prestage_subcloud',
payload=payload))
return self.call(ctxt, self.make_msg("prestage_subcloud", payload=payload))
def subcloud_deploy_create(self, ctxt, subcloud_id, payload):
return self.call(ctxt, self.make_msg('subcloud_deploy_create',
subcloud_id=subcloud_id,
payload=payload))
return self.call(
ctxt,
self.make_msg(
"subcloud_deploy_create", subcloud_id=subcloud_id, payload=payload
),
)
def subcloud_deploy_install(self, ctxt, subcloud_id, payload,
initial_deployment):
return self.cast(ctxt, self.make_msg('subcloud_deploy_install',
subcloud_id=subcloud_id,
payload=payload,
initial_deployment=initial_deployment))
def subcloud_deploy_install(self, ctxt, subcloud_id, payload, initial_deployment):
return self.cast(
ctxt,
self.make_msg(
"subcloud_deploy_install",
subcloud_id=subcloud_id,
payload=payload,
initial_deployment=initial_deployment,
),
)
def subcloud_deploy_enroll(self, ctxt, subcloud_id, payload):
return self.cast(ctxt, self.make_msg('subcloud_deploy_enroll',
subcloud_id=subcloud_id,
payload=payload))
return self.cast(
ctxt,
self.make_msg(
"subcloud_deploy_enroll", subcloud_id=subcloud_id, payload=payload
),
)
def subcloud_deploy_bootstrap(self, ctxt, subcloud_id, payload,
initial_deployment):
return self.cast(ctxt, self.make_msg('subcloud_deploy_bootstrap',
subcloud_id=subcloud_id,
payload=payload,
initial_deployment=initial_deployment))
def subcloud_deploy_bootstrap(self, ctxt, subcloud_id, payload, initial_deployment):
return self.cast(
ctxt,
self.make_msg(
"subcloud_deploy_bootstrap",
subcloud_id=subcloud_id,
payload=payload,
initial_deployment=initial_deployment,
),
)
def subcloud_deploy_config(self, ctxt, subcloud_id, payload,
initial_deployment):
return self.cast(ctxt, self.make_msg('subcloud_deploy_config',
subcloud_id=subcloud_id,
payload=payload,
initial_deployment=initial_deployment))
def subcloud_deploy_config(self, ctxt, subcloud_id, payload, initial_deployment):
return self.cast(
ctxt,
self.make_msg(
"subcloud_deploy_config",
subcloud_id=subcloud_id,
payload=payload,
initial_deployment=initial_deployment,
),
)
def subcloud_deploy_complete(self, ctxt, subcloud_id):
return self.call(ctxt, self.make_msg('subcloud_deploy_complete',
subcloud_id=subcloud_id))
return self.call(
ctxt, self.make_msg("subcloud_deploy_complete", subcloud_id=subcloud_id)
)
def subcloud_deploy_abort(self, ctxt, subcloud_id, deploy_status):
return self.cast(ctxt, self.make_msg('subcloud_deploy_abort',
subcloud_id=subcloud_id,
deploy_status=deploy_status))
return self.cast(
ctxt,
self.make_msg(
"subcloud_deploy_abort",
subcloud_id=subcloud_id,
deploy_status=deploy_status,
),
)
def subcloud_deploy_resume(self, ctxt, subcloud_id, subcloud_name,
payload, deploy_states_to_run):
return self.cast(ctxt, self.make_msg(
'subcloud_deploy_resume',
subcloud_id=subcloud_id,
subcloud_name=subcloud_name,
payload=payload,
deploy_states_to_run=deploy_states_to_run))
def subcloud_deploy_resume(
self, ctxt, subcloud_id, subcloud_name, payload, deploy_states_to_run
):
return self.cast(
ctxt,
self.make_msg(
"subcloud_deploy_resume",
subcloud_id=subcloud_id,
subcloud_name=subcloud_name,
payload=payload,
deploy_states_to_run=deploy_states_to_run,
),
)
def get_subcloud_name_by_region_name(self, ctxt, subcloud_region):
return self.call(ctxt, self.make_msg('get_subcloud_name_by_region_name',
subcloud_region=subcloud_region))
return self.call(
ctxt,
self.make_msg(
"get_subcloud_name_by_region_name", subcloud_region=subcloud_region
),
)
def batch_migrate_subcloud(self, ctxt, payload):
return self.cast(ctxt, self.make_msg('batch_migrate_subcloud',
payload=payload))
return self.cast(ctxt, self.make_msg("batch_migrate_subcloud", payload=payload))
def sync_subcloud_peer_group(self, ctxt, association_id):
return self.cast(ctxt, self.make_msg(
'sync_subcloud_peer_group', association_id=association_id))
return self.cast(
ctxt,
self.make_msg("sync_subcloud_peer_group", association_id=association_id),
)
def sync_subcloud_peer_group_only(self, ctxt, association_id):
# Without synchronizing subclouds
return self.call(ctxt, self.make_msg(
'sync_subcloud_peer_group', association_id=association_id,
sync_subclouds=False))
return self.call(
ctxt,
self.make_msg(
"sync_subcloud_peer_group",
association_id=association_id,
sync_subclouds=False,
),
)
def update_subcloud_peer_group(self, ctxt, peer_group_id,
group_state, max_subcloud_rehoming,
group_name, new_group_name=None):
return self.call(ctxt, self.make_msg(
'update_subcloud_peer_group',
peer_group_id=peer_group_id,
group_state=group_state,
max_subcloud_rehoming=max_subcloud_rehoming,
group_name=group_name, new_group_name=new_group_name))
def update_subcloud_peer_group(
self,
ctxt,
peer_group_id,
group_state,
max_subcloud_rehoming,
group_name,
new_group_name=None,
):
return self.call(
ctxt,
self.make_msg(
"update_subcloud_peer_group",
peer_group_id=peer_group_id,
group_state=group_state,
max_subcloud_rehoming=max_subcloud_rehoming,
group_name=group_name,
new_group_name=new_group_name,
),
)
def delete_peer_group_association(self, ctxt, association_id):
return self.call(ctxt, self.make_msg('delete_peer_group_association',
association_id=association_id))
return self.call(
ctxt,
self.make_msg(
"delete_peer_group_association", association_id=association_id
),
)
def update_association_sync_status(self, ctxt, peer_group_id,
sync_status, sync_message=None):
return self.call(ctxt, self.make_msg('update_association_sync_status',
peer_group_id=peer_group_id,
sync_status=sync_status,
sync_message=sync_message))
def update_association_sync_status(
self, ctxt, peer_group_id, sync_status, sync_message=None
):
return self.call(
ctxt,
self.make_msg(
"update_association_sync_status",
peer_group_id=peer_group_id,
sync_status=sync_status,
sync_message=sync_message,
),
)
def peer_monitor_notify(self, ctxt):
return self.call(ctxt, self.make_msg('peer_monitor_notify'))
return self.call(ctxt, self.make_msg("peer_monitor_notify"))
def peer_group_audit_notify(self, ctxt, peer_group_name, payload):
return self.call(ctxt, self.make_msg('peer_group_audit_notify',
peer_group_name=peer_group_name,
payload=payload))
return self.call(
ctxt,
self.make_msg(
"peer_group_audit_notify",
peer_group_name=peer_group_name,
payload=payload,
),
)
class DCManagerNotifications(RPCClient):
@ -325,24 +455,33 @@ class DCManagerNotifications(RPCClient):
Version History:
1.0 - Initial version
"""
DCMANAGER_RPC_API_VERSION = '1.0'
TOPIC_DC_NOTIFICIATION = 'DCMANAGER-NOTIFICATION'
DCMANAGER_RPC_API_VERSION = "1.0"
TOPIC_DC_NOTIFICIATION = "DCMANAGER-NOTIFICATION"
def __init__(self, timeout=None):
super(DCManagerNotifications, self).__init__(
timeout,
self.TOPIC_DC_NOTIFICIATION,
self.DCMANAGER_RPC_API_VERSION)
timeout, self.TOPIC_DC_NOTIFICIATION, self.DCMANAGER_RPC_API_VERSION
)
def subcloud_online(self, ctxt, subcloud_name):
return self.cast(ctxt, self.make_msg('subcloud_online',
subcloud_name=subcloud_name))
return self.cast(
ctxt, self.make_msg("subcloud_online", subcloud_name=subcloud_name)
)
def subcloud_managed(self, ctxt, subcloud_name):
return self.cast(ctxt, self.make_msg('subcloud_managed',
subcloud_name=subcloud_name))
return self.cast(
ctxt, self.make_msg("subcloud_managed", subcloud_name=subcloud_name)
)
def subcloud_sysinv_endpoint_update(self, ctxt, subcloud_name, endpoint):
return self.cast(ctxt, self.make_msg(
'subcloud_sysinv_endpoint_update', subcloud_name=subcloud_name,
endpoint=endpoint), fanout=True, version=self.DCMANAGER_RPC_API_VERSION)
return self.cast(
ctxt,
self.make_msg(
"subcloud_sysinv_endpoint_update",
subcloud_name=subcloud_name,
endpoint=endpoint,
),
fanout=True,
version=self.DCMANAGER_RPC_API_VERSION,
)

View File

@ -192,16 +192,14 @@ class DCManagerStateService(service.Service):
)
def bulk_update_subcloud_availability_and_endpoint_status(
self, context, subcloud_name, subcloud_region, availability_data,
endpoint_data
self, context, subcloud_name, subcloud_region, availability_data, endpoint_data
):
LOG.info(
"Handling bulk_update_subcloud_availability_and_endpoint_status request "
f"for subcloud: {subcloud_name}"
)
self.subcloud_state_manager.\
bulk_update_subcloud_availability_and_endpoint_status(
context, subcloud_name, subcloud_region, availability_data,
endpoint_data
)
manager = self.subcloud_state_manager
manager.bulk_update_subcloud_availability_and_endpoint_status(
context, subcloud_name, subcloud_region, availability_data, endpoint_data
)

View File

@ -59,18 +59,25 @@ class SubcloudStateManager(manager.Manager):
"""Manages tasks related to subclouds."""
def __init__(self, *args, **kwargs):
LOG.debug('SubcloudStateManager initialization...')
LOG.debug("SubcloudStateManager initialization...")
super(SubcloudStateManager,
self).__init__(service_name="subcloud_manager", *args, **kwargs)
super(SubcloudStateManager, self).__init__(
service_name="subcloud_manager", *args, **kwargs
)
self.context = context.get_admin_context()
self.dcorch_rpc_client = dcorch_rpc_client.EngineWorkerClient()
self.fm_api = fm_api.FaultAPIs()
self.audit_rpc_client = dcmanager_audit_rpc_client.ManagerAuditClient()
def _do_update_subcloud_endpoint_status(self, context, subcloud_id,
endpoint_type, sync_status,
alarmable, ignore_endpoints=None):
def _do_update_subcloud_endpoint_status(
self,
context,
subcloud_id,
endpoint_type,
sync_status,
alarmable,
ignore_endpoints=None,
):
"""Update online/managed subcloud endpoint status
:param context: request context object
@ -91,14 +98,19 @@ class SubcloudStateManager(manager.Manager):
# retrieve the info from the db for this subcloud.
# subcloud_id should not be None
try:
for subcloud, subcloud_status in db_api. \
subcloud_get_with_status(context, subcloud_id):
for subcloud, subcloud_status in db_api.subcloud_get_with_status(
context, subcloud_id
):
if subcloud_status:
subcloud_status_list.append(
db_api.subcloud_endpoint_status_db_model_to_dict(
subcloud_status))
if subcloud_status.endpoint_type == \
dccommon_consts.ENDPOINT_TYPE_IDENTITY:
subcloud_status
)
)
if (
subcloud_status.endpoint_type
== dccommon_consts.ENDPOINT_TYPE_IDENTITY
):
original_identity_status = subcloud_status.sync_status
except Exception as e:
LOG.exception(e)
@ -108,28 +120,30 @@ class SubcloudStateManager(manager.Manager):
if endpoint_type:
# updating a single endpoint on a single subcloud
for subcloud_status in subcloud_status_list:
if subcloud_status['endpoint_type'] == endpoint_type:
if subcloud_status['sync_status'] == sync_status:
if subcloud_status["endpoint_type"] == endpoint_type:
if subcloud_status["sync_status"] == sync_status:
# No change in the sync_status
LOG.debug("Sync status (%s) for subcloud %s did "
"not change - ignore update" %
(sync_status, subcloud.name))
LOG.debug(
"Sync status (%s) for subcloud %s did not change "
"- ignore update" % (sync_status, subcloud.name)
)
return
# We found the endpoint
break
else:
# We did not find the endpoint
raise exceptions.BadRequest(
resource='subcloud',
msg='Endpoint %s not found for subcloud' %
endpoint_type)
resource="subcloud",
msg="Endpoint %s not found for subcloud" % endpoint_type,
)
LOG.info("Updating subcloud:%s endpoint:%s sync:%s" %
(subcloud.name, endpoint_type, sync_status))
db_api.subcloud_status_update(context,
subcloud_id,
endpoint_type,
sync_status)
LOG.info(
"Updating subcloud:%s endpoint:%s sync:%s"
% (subcloud.name, endpoint_type, sync_status)
)
db_api.subcloud_status_update(
context, subcloud_id, endpoint_type, sync_status
)
# Trigger subcloud audits for the subcloud after
# its identity endpoint turns to other status from unknown
@ -137,33 +151,38 @@ class SubcloudStateManager(manager.Manager):
is_identity_unknown = (
original_identity_status == dccommon_consts.SYNC_STATUS_UNKNOWN
)
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_IDENTITY \
and is_sync_unknown and is_identity_unknown:
if (
endpoint_type == dccommon_consts.ENDPOINT_TYPE_IDENTITY
and is_sync_unknown
and is_identity_unknown
):
if not subcloud.first_identity_sync_complete:
db_api.subcloud_update(context, subcloud_id,
first_identity_sync_complete=True)
LOG.debug('Request for audits for %s after updating '
'identity out of unknown' % subcloud.name)
self.audit_rpc_client.trigger_subcloud_audits(
context, subcloud_id)
db_api.subcloud_update(
context, subcloud_id, first_identity_sync_complete=True
)
LOG.debug(
"Request for audits for %s after updating "
"identity out of unknown" % subcloud.name
)
self.audit_rpc_client.trigger_subcloud_audits(context, subcloud_id)
entity_instance_id = "subcloud=%s.resource=%s" % \
(subcloud.name, endpoint_type)
fault = self.fm_api.get_fault(
ALARM_OUT_OF_SYNC,
entity_instance_id)
entity_instance_id = "subcloud=%s.resource=%s" % (
subcloud.name,
endpoint_type,
)
fault = self.fm_api.get_fault(ALARM_OUT_OF_SYNC, entity_instance_id)
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) \
and fault:
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) and fault:
try:
self.fm_api.clear_fault(
ALARM_OUT_OF_SYNC,
entity_instance_id)
self.fm_api.clear_fault(ALARM_OUT_OF_SYNC, entity_instance_id)
except Exception as e:
LOG.exception(e)
elif not fault and alarmable and \
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
elif (
not fault
and alarmable
and (sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
):
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
try:
@ -173,15 +192,17 @@ class SubcloudStateManager(manager.Manager):
entity_type_id=entity_type_id,
entity_instance_id=entity_instance_id,
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
reason_text=("%s %s sync_status is "
"out-of-sync" %
(subcloud.name, endpoint_type)),
reason_text=(
"%s %s sync_status is out-of-sync"
% (subcloud.name, endpoint_type)
),
alarm_type=fm_const.FM_ALARM_TYPE_0,
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
proposed_repair_action="If problem persists "
"contact next level "
"of support",
service_affecting=False)
proposed_repair_action=(
"If problem persists contact next level of support"
),
service_affecting=False,
)
self.fm_api.set_fault(fault)
@ -190,9 +211,11 @@ class SubcloudStateManager(manager.Manager):
else:
# update all endpoints on this subcloud
LOG.info("Updating all endpoints on subcloud: %s sync: %s "
"ignore_endpoints: %s" %
(subcloud.name, sync_status, ignore_endpoints))
LOG.info(
"Updating all endpoints on subcloud: %s sync: %s "
"ignore_endpoints: %s"
% (subcloud.name, sync_status, ignore_endpoints)
)
# TODO(yuxing): The following code can be further optimized when
# batch alarm clearance APIs are available, so we don't need to
@ -209,28 +232,32 @@ class SubcloudStateManager(manager.Manager):
continue
endpoint_to_update_list.append(endpoint)
entity_instance_id = "subcloud=%s.resource=%s" % \
(subcloud.name, endpoint)
entity_instance_id = "subcloud=%s.resource=%s" % (
subcloud.name,
endpoint,
)
fault = self.fm_api.get_fault(
ALARM_OUT_OF_SYNC,
entity_instance_id)
fault = self.fm_api.get_fault(ALARM_OUT_OF_SYNC, entity_instance_id)
# TODO(yuxing): batch clear all the out-of-sync alarms of a
# given subcloud if fm_api support it. Be careful with the
# dc-cert endpoint when adding the above; the endpoint
# alarm must remain for offline subclouds.
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) \
and fault:
if (
sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
) and fault:
try:
self.fm_api.clear_fault(
ALARM_OUT_OF_SYNC,
entity_instance_id)
ALARM_OUT_OF_SYNC, entity_instance_id
)
except Exception as e:
LOG.exception(e)
elif not fault and alarmable and \
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
elif (
not fault
and alarmable
and (sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
):
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
try:
fault = fm_api.Fault(
@ -239,15 +266,17 @@ class SubcloudStateManager(manager.Manager):
entity_type_id=entity_type_id,
entity_instance_id=entity_instance_id,
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
reason_text=("%s %s sync_status is "
"out-of-sync" %
(subcloud.name, endpoint)),
reason_text=(
"%s %s sync_status is out-of-sync"
% (subcloud.name, endpoint)
),
alarm_type=fm_const.FM_ALARM_TYPE_0,
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
proposed_repair_action="If problem persists "
"contact next level "
"of support",
service_affecting=False)
proposed_repair_action=(
"If problem persists contact next level of support"
),
service_affecting=False,
)
self.fm_api.set_fault(fault)
except Exception as e:
@ -256,10 +285,8 @@ class SubcloudStateManager(manager.Manager):
if endpoint_to_update_list:
try:
db_api.subcloud_status_update_endpoints(
context,
subcloud_id,
endpoint_to_update_list,
sync_status)
context, subcloud_id, endpoint_to_update_list, sync_status
)
except Exception as e:
LOG.exception(e)
@ -287,30 +314,30 @@ class SubcloudStateManager(manager.Manager):
# the sync status update must be done first.
#
is_in_sync = sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC
is_online = subcloud.availability_status == \
dccommon_consts.AVAILABILITY_ONLINE
is_managed = subcloud.management_state == \
dccommon_consts.MANAGEMENT_MANAGED
is_endpoint_type_dc_cert = endpoint_type == \
dccommon_consts.ENDPOINT_TYPE_DC_CERT
is_online = subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE
is_managed = subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED
is_endpoint_type_dc_cert = (
endpoint_type == dccommon_consts.ENDPOINT_TYPE_DC_CERT
)
is_secondary = subcloud.deploy_status == consts.DEPLOY_STATE_SECONDARY
is_sync_unknown = sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
is_secondary_and_sync_unknown = is_secondary and is_sync_unknown
return (
(not is_in_sync
or (is_online and (is_managed or is_endpoint_type_dc_cert)))
(not is_in_sync or (is_online and (is_managed or is_endpoint_type_dc_cert)))
and not is_secondary
) or is_secondary_and_sync_unknown
@sync_update_subcloud_endpoint_status
def _update_subcloud_endpoint_status(
self, context,
subcloud_region,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
alarmable=True,
ignore_endpoints=None):
self,
context,
subcloud_region,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
alarmable=True,
ignore_endpoints=None,
):
"""Update subcloud endpoint status
:param context: request context object
@ -327,8 +354,8 @@ class SubcloudStateManager(manager.Manager):
if not subcloud_region:
raise exceptions.BadRequest(
resource='subcloud',
msg='Subcloud region not provided')
resource="subcloud", msg="Subcloud region not provided"
)
try:
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
@ -340,21 +367,31 @@ class SubcloudStateManager(manager.Manager):
# update a single subcloud
try:
self._do_update_subcloud_endpoint_status(
context, subcloud.id, endpoint_type, sync_status,
alarmable, ignore_endpoints
context,
subcloud.id,
endpoint_type,
sync_status,
alarmable,
ignore_endpoints,
)
except Exception as e:
LOG.exception(e)
raise e
else:
LOG.info("Ignoring subcloud sync_status update for subcloud:%s "
"availability:%s management:%s endpoint:%s sync:%s" %
(subcloud.name, subcloud.availability_status,
subcloud.management_state, endpoint_type, sync_status))
LOG.info(
"Ignoring subcloud sync_status update for subcloud:%s "
"availability:%s management:%s endpoint:%s sync:%s"
% (
subcloud.name,
subcloud.availability_status,
subcloud.management_state,
endpoint_type,
sync_status,
)
)
def bulk_update_subcloud_availability_and_endpoint_status(
self, context, subcloud_name, subcloud_region, availability_data,
endpoint_data
self, context, subcloud_name, subcloud_region, availability_data, endpoint_data
):
# This bulk update is executed as part of the audit process in dcmanager and
# its related endpoints. This method is not used by dcorch and cert-mon.
@ -362,21 +399,20 @@ class SubcloudStateManager(manager.Manager):
try:
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
except Exception:
LOG.exception(
f"Failed to get subcloud by region name {subcloud_region}"
)
LOG.exception(f"Failed to get subcloud by region name {subcloud_region}")
raise
if availability_data:
self.update_subcloud_availability(
context, subcloud_region, availability_data["availability_status"],
context,
subcloud_region,
availability_data["availability_status"],
availability_data["update_state_only"],
availability_data["audit_fail_count"], subcloud
availability_data["audit_fail_count"],
subcloud,
)
if endpoint_data:
self._bulk_update_subcloud_endpoint_status(
context, subcloud, endpoint_data
)
self._bulk_update_subcloud_endpoint_status(context, subcloud, endpoint_data)
@lockutils.synchronized(LOCK_NAME)
def _do_bulk_update_subcloud_endpoint_status(
@ -413,8 +449,7 @@ class SubcloudStateManager(manager.Manager):
except Exception as e:
LOG.exception(e)
elif not fault and \
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
elif not fault and (sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
try:
fault = fm_api.Fault(
@ -423,15 +458,17 @@ class SubcloudStateManager(manager.Manager):
entity_type_id=entity_type_id,
entity_instance_id=entity_instance_id,
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
reason_text=("%s %s sync_status is "
"out-of-sync" %
(subcloud.name, endpoint)),
reason_text=(
"%s %s sync_status is "
"out-of-sync" % (subcloud.name, endpoint)
),
alarm_type=fm_const.FM_ALARM_TYPE_0,
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
proposed_repair_action="If problem persists "
"contact next level "
"of support",
service_affecting=False)
"contact next level "
"of support",
service_affecting=False,
)
self.fm_api.set_fault(fault)
except Exception as e:
@ -439,7 +476,9 @@ class SubcloudStateManager(manager.Manager):
try:
db_api.subcloud_status_bulk_update_endpoints(
context, subcloud.id, endpoint_list,
context,
subcloud.id,
endpoint_list,
)
except Exception as e:
LOG.exception(
@ -447,9 +486,7 @@ class SubcloudStateManager(manager.Manager):
f"endpoint status: {e}"
)
def _bulk_update_subcloud_endpoint_status(
self, context, subcloud, endpoint_list
):
def _bulk_update_subcloud_endpoint_status(self, context, subcloud, endpoint_list):
"""Update the sync status of a list of subcloud endpoints
:param context: current context object
@ -483,12 +520,14 @@ class SubcloudStateManager(manager.Manager):
)
def update_subcloud_endpoint_status(
self, context,
subcloud_region=None,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
alarmable=True,
ignore_endpoints=None):
self,
context,
subcloud_region=None,
endpoint_type=None,
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
alarmable=True,
ignore_endpoints=None,
):
"""Update subcloud endpoint status
:param context: request context object
@ -505,83 +544,108 @@ class SubcloudStateManager(manager.Manager):
if subcloud_region:
self._update_subcloud_endpoint_status(
context, subcloud_region, endpoint_type, sync_status, alarmable,
ignore_endpoints)
context,
subcloud_region,
endpoint_type,
sync_status,
alarmable,
ignore_endpoints,
)
else:
# update all subclouds
for subcloud in db_api.subcloud_get_all(context):
self._update_subcloud_endpoint_status(
context, subcloud.region_name, endpoint_type, sync_status,
alarmable, ignore_endpoints)
context,
subcloud.region_name,
endpoint_type,
sync_status,
alarmable,
ignore_endpoints,
)
def _update_subcloud_state(self, context, subcloud_name, subcloud_region,
management_state, availability_status):
def _update_subcloud_state(
self,
context,
subcloud_name,
subcloud_region,
management_state,
availability_status,
):
try:
LOG.info('Notifying dcorch, subcloud:%s management: %s, '
'availability:%s' %
(subcloud_name,
management_state,
availability_status))
LOG.info(
"Notifying dcorch, subcloud:%s management: %s, availability:%s"
% (subcloud_name, management_state, availability_status)
)
self.dcorch_rpc_client.update_subcloud_states(
context, subcloud_region, management_state, availability_status)
context, subcloud_region, management_state, availability_status
)
except Exception:
LOG.exception('Problem informing dcorch of subcloud state change,'
'subcloud: %s' % subcloud_name)
LOG.exception(
"Problem informing dcorch of subcloud state change, subcloud: %s"
% subcloud_name
)
def _raise_or_clear_subcloud_status_alarm(self, subcloud_name,
availability_status,
deploy_status=None):
def _raise_or_clear_subcloud_status_alarm(
self, subcloud_name, availability_status, deploy_status=None
):
entity_instance_id = "subcloud=%s" % subcloud_name
fault = self.fm_api.get_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
entity_instance_id)
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE, entity_instance_id
)
if fault and (availability_status == dccommon_consts.AVAILABILITY_ONLINE):
try:
self.fm_api.clear_fault(
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
entity_instance_id)
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE, entity_instance_id
)
except Exception:
LOG.exception("Failed to clear offline alarm for subcloud: %s",
subcloud_name)
LOG.exception(
"Failed to clear offline alarm for subcloud: %s", subcloud_name
)
# Raise the alarm if the subcloud became offline and it's not a
# secondary subcloud
elif not fault and \
(availability_status == dccommon_consts.AVAILABILITY_OFFLINE and
deploy_status != consts.DEPLOY_STATE_SECONDARY):
elif not fault and (
availability_status == dccommon_consts.AVAILABILITY_OFFLINE
and deploy_status != consts.DEPLOY_STATE_SECONDARY
):
try:
fault = fm_api.Fault(
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
alarm_state=fm_const.FM_ALARM_STATE_SET,
entity_type_id=fm_const.FM_ENTITY_TYPE_SUBCLOUD,
entity_instance_id=entity_instance_id,
severity=fm_const.FM_ALARM_SEVERITY_CRITICAL,
reason_text=('%s is offline' % subcloud_name),
reason_text=("%s is offline" % subcloud_name),
alarm_type=fm_const.FM_ALARM_TYPE_0,
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_29,
proposed_repair_action="Wait for subcloud to "
"become online; if "
"problem persists contact "
"next level of support.",
service_affecting=True)
proposed_repair_action=(
"Wait for subcloud to become online; if problem persists "
"contact next level of support."
),
service_affecting=True,
)
self.fm_api.set_fault(fault)
except Exception:
LOG.exception("Failed to raise offline alarm for subcloud: %s",
subcloud_name)
LOG.exception(
"Failed to raise offline alarm for subcloud: %s", subcloud_name
)
def update_subcloud_availability(self, context, subcloud_region,
availability_status,
update_state_only=False,
audit_fail_count=None, subcloud=None):
def update_subcloud_availability(
self,
context,
subcloud_region,
availability_status,
update_state_only=False,
audit_fail_count=None,
subcloud=None,
):
if subcloud is None:
try:
subcloud = db_api.subcloud_get_by_region_name(context,
subcloud_region)
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
except Exception:
LOG.exception(
"Failed to get subcloud by region name %s" % subcloud_region
@ -593,29 +657,37 @@ class SubcloudStateManager(manager.Manager):
# subcloud's availability. This is required to compensate
# for rare alarm update failures, which may occur during
# availability updates.
self._raise_or_clear_subcloud_status_alarm(subcloud.name,
availability_status)
self._raise_or_clear_subcloud_status_alarm(
subcloud.name, availability_status
)
# Nothing has changed, but we want to send a state update for this
# subcloud as an audit. Get the most up-to-date data.
self._update_subcloud_state(context, subcloud.name,
subcloud.region_name,
subcloud.management_state,
availability_status)
self._update_subcloud_state(
context,
subcloud.name,
subcloud.region_name,
subcloud.management_state,
availability_status,
)
elif availability_status is None:
# only update the audit fail count
try:
db_api.subcloud_update(self.context, subcloud.id,
audit_fail_count=audit_fail_count)
db_api.subcloud_update(
self.context, subcloud.id, audit_fail_count=audit_fail_count
)
except exceptions.SubcloudNotFound:
# slim possibility subcloud could have been deleted since
# we found it in db, ignore this benign error.
LOG.info('Ignoring SubcloudNotFound when attempting '
'audit_fail_count update: %s' % subcloud.name)
LOG.info(
"Ignoring SubcloudNotFound when attempting "
"audit_fail_count update: %s" % subcloud.name
)
return
else:
self._raise_or_clear_subcloud_status_alarm(subcloud.name,
availability_status)
self._raise_or_clear_subcloud_status_alarm(
subcloud.name, availability_status
)
if availability_status == dccommon_consts.AVAILABILITY_OFFLINE:
# Subcloud is going offline, set all endpoint statuses to
@ -634,70 +706,77 @@ class SubcloudStateManager(manager.Manager):
context,
subcloud.id,
availability_status=availability_status,
audit_fail_count=audit_fail_count)
audit_fail_count=audit_fail_count,
)
except exceptions.SubcloudNotFound:
# slim possibility subcloud could have been deleted since
# we found it in db, ignore this benign error.
LOG.info('Ignoring SubcloudNotFound when attempting state'
' update: %s' % subcloud.name)
LOG.info(
"Ignoring SubcloudNotFound when attempting state update: %s"
% subcloud.name
)
return
if availability_status == dccommon_consts.AVAILABILITY_ONLINE:
# Subcloud is going online
# Tell cert-mon to audit endpoint certificate.
LOG.info('Request for online audit for %s' % subcloud.name)
LOG.info("Request for online audit for %s" % subcloud.name)
dc_notification = rpc_client.DCManagerNotifications()
dc_notification.subcloud_online(context, subcloud.region_name)
# Trigger all the audits for the subcloud so it can update the
# sync status ASAP.
self.audit_rpc_client.trigger_subcloud_audits(context,
subcloud.id)
self.audit_rpc_client.trigger_subcloud_audits(context, subcloud.id)
# Send dcorch a state update
self._update_subcloud_state(context, subcloud.name,
subcloud.region_name,
updated_subcloud.management_state,
availability_status)
def update_subcloud_sync_endpoint_type(self, context,
subcloud_region,
endpoint_type_list,
openstack_installed):
operation = 'add' if openstack_installed else 'remove'
func_switcher = {
'add': (
self.dcorch_rpc_client.add_subcloud_sync_endpoint_type,
db_api.subcloud_status_create
),
'remove': (
self.dcorch_rpc_client.remove_subcloud_sync_endpoint_type,
db_api.subcloud_status_delete
self._update_subcloud_state(
context,
subcloud.name,
subcloud.region_name,
updated_subcloud.management_state,
availability_status,
)
def update_subcloud_sync_endpoint_type(
self, context, subcloud_region, endpoint_type_list, openstack_installed
):
operation = "add" if openstack_installed else "remove"
func_switcher = {
"add": (
self.dcorch_rpc_client.add_subcloud_sync_endpoint_type,
db_api.subcloud_status_create,
),
"remove": (
self.dcorch_rpc_client.remove_subcloud_sync_endpoint_type,
db_api.subcloud_status_delete,
),
}
try:
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
except Exception:
LOG.exception(
"Failed to get subcloud by region name: %s" % subcloud_region
)
LOG.exception("Failed to get subcloud by region name: %s" % subcloud_region)
raise
try:
# Notify dcorch to add/remove sync endpoint type list
func_switcher[operation][0](self.context, subcloud_region,
endpoint_type_list)
LOG.info('Notifying dcorch, subcloud: %s new sync endpoint: %s' %
(subcloud.name, endpoint_type_list))
func_switcher[operation][0](
self.context, subcloud_region, endpoint_type_list
)
LOG.info(
"Notifying dcorch, subcloud: %s new sync endpoint: %s"
% (subcloud.name, endpoint_type_list)
)
# Update subcloud status table by adding/removing openstack sync
# endpoint types
for endpoint_type in endpoint_type_list:
func_switcher[operation][1](self.context, subcloud.id,
endpoint_type)
func_switcher[operation][1](self.context, subcloud.id, endpoint_type)
# Update openstack_installed of subcloud table
db_api.subcloud_update(self.context, subcloud.id,
openstack_installed=openstack_installed)
db_api.subcloud_update(
self.context, subcloud.id, openstack_installed=openstack_installed
)
except Exception:
LOG.exception('Problem informing dcorch of subcloud sync endpoint'
' type change, subcloud: %s' % subcloud.name)
LOG.exception(
"Problem informing dcorch of subcloud sync endpoint "
"type change, subcloud: %s" % subcloud.name
)

View File

@ -1792,8 +1792,10 @@ class TestSubcloudUpdate(BaseTestSubcloudManager):
fake_bootstrap_address = "10.10.20.12"
self.assertRaisesRegex(
exceptions.BadRequest,
"Cannot update bootstrap_address"
" into rehome data, need to import bootstrap_values first",
(
"Cannot update bootstrap_address into rehome data, need to "
"import bootstrap_values first"
),
self.sm.update_subcloud,
self.ctx,
self.subcloud.id,
@ -1933,10 +1935,12 @@ class TestSubcloudUpdate(BaseTestSubcloudManager):
'"admin_password": "dGVzdHBhc3M=", '
'"bootstrap-address": "123.123.123.123"}}'
)
fake_bootstrap_values = '{"name": "TestSubcloud", \
"system_mode": "simplex", "sysadmin_password": "testpass", \
"ansible_ssh_pass": "fakepass", "ansible_become_pass": "fakepass",\
"admin_password": "testpass"}'
fake_bootstrap_values = (
'{"name": "TestSubcloud",'
'"system_mode": "simplex", "sysadmin_password": "testpass",'
'"ansible_ssh_pass": "fakepass", "ansible_become_pass": "fakepass",'
'"admin_password": "testpass"}'
)
fake_bootstrap_address = "123.123.123.123"
self.sm.update_subcloud(
@ -2030,10 +2034,11 @@ class TestSubcloudUpdate(BaseTestSubcloudManager):
self.subcloud["deploy_status"] = consts.DEPLOY_STATE_DEPLOY_FAILED
self.assertRaisesRegex(
exceptions.BadRequest,
f"Unable to manage {self.subcloud.name}: "
"its deploy_status must be either"
f" '{consts.DEPLOY_STATE_DONE}' or "
f"'{consts.DEPLOY_STATE_REHOME_PENDING}'",
(
f"Unable to manage {self.subcloud.name}: its deploy_status "
f"must be either '{consts.DEPLOY_STATE_DONE}' or "
f"'{consts.DEPLOY_STATE_REHOME_PENDING}'"
),
self.sm.update_subcloud,
self.ctx,
self.subcloud.id,
@ -4042,8 +4047,8 @@ class TestSubcloudBackupRestore(BaseTestSubcloudManager):
self.assertIn(expected_log, return_log)
self.mock_log.info.assert_called_with(
"Subcloud restore backup operation finished.\nRestored subclouds: 0."
" Invalid subclouds: 1. Failed subclouds: 0."
"Subcloud restore backup operation finished.\nRestored subclouds: 0. "
"Invalid subclouds: 1. Failed subclouds: 0."
)
@mock.patch.object(

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2021 Wind River Systems, Inc.
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -15,4 +15,4 @@
import pbr.version
version_info = pbr.version.VersionInfo('distributedcloud')
version_info = pbr.version.VersionInfo("distributedcloud")

View File

@ -126,9 +126,7 @@ class PatchAPIController(Middleware):
os.remove(fn)
return
except OSError:
msg = (
f"Unable to remove patch file {fn} from the central " "storage."
)
msg = f"Unable to remove patch file {fn} from the central storage."
raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
LOG.info(f"Patch {patch} was not found in {vault}")

View File

@ -294,7 +294,7 @@ endpoint_cache_opt_group = cfg.OptGroup(
)
openstack_cache_opt_group = cfg.OptGroup(
name="openstack_cache", title="Containerized OpenStack" " Credentials"
name="openstack_cache", title="Containerized OpenStack Credentials"
)
fernet_opt_group = cfg.OptGroup(name="fernet", title="Fernet Options")

View File

@ -56,8 +56,7 @@ class GenericSyncWorkerManager(object):
for endpoint_type in endpoint_type_list:
LOG.debug(
f"Engine id:({self.engine_id}) create "
f"{subcloud_name}/{endpoint_type}/{management_ip} "
f"sync obj"
f"{subcloud_name}/{endpoint_type}/{management_ip} sync obj"
)
sync_obj = sync_object_class_map[endpoint_type](
subcloud_name, endpoint_type, management_ip
@ -70,9 +69,7 @@ class GenericSyncWorkerManager(object):
f"Engine id:({self.engine_id}) Start to sync "
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs."
)
LOG.debug(
f"Engine id:({self.engine_id}) Start to sync " f"{subcloud_sync_list}."
)
LOG.debug(f"Engine id:({self.engine_id}) Start to sync {subcloud_sync_list}.")
for sc_region_name, ept, ip in subcloud_sync_list:
try:
@ -373,13 +370,11 @@ class GenericSyncWorkerManager(object):
f"Engine id:({self.engine_id}) Start to audit "
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs."
)
LOG.debug(
f"Engine id:({self.engine_id}) Start to audit " f"{subcloud_sync_list}."
)
LOG.debug(f"Engine id:({self.engine_id}) Start to audit {subcloud_sync_list}.")
for sc_region_name, ept, ip in subcloud_sync_list:
LOG.debug(
f"Attempt audit_subcloud: " f"{self.engine_id}/{sc_region_name}/{ept}"
f"Attempt audit_subcloud: {self.engine_id}/{sc_region_name}/{ept}"
)
try:
sync_obj = sync_object_class_map[ept](sc_region_name, ept, ip)
@ -391,7 +386,7 @@ class GenericSyncWorkerManager(object):
LOG.debug(
f"Engine id:({self.engine_id}/{sc_region_name}/{ept}) "
f"SubcloudSyncNotFound: The endpoint in subcloud_sync "
f"has been removed"
"has been removed"
)
except Exception as e:
LOG.error(

View File

@ -59,7 +59,7 @@ class InitialSyncWorkerManager(object):
)
except Exception as e:
LOG.error(
f"Exception occurred when running initial_sync for "
"Exception occurred when running initial_sync for "
f"subcloud {sc_region_name}: {e}"
)
@ -82,9 +82,7 @@ class InitialSyncWorkerManager(object):
)
if result == 0:
# Sync is no longer required
LOG.debug(
f"Initial sync for subcloud {subcloud_name} " f"no longer required"
)
LOG.debug(f"Initial sync for subcloud {subcloud_name} no longer required")
return
# sync_objs stores the sync object per endpoint
@ -131,12 +129,12 @@ class InitialSyncWorkerManager(object):
pass
else:
LOG.error(
f"Unexpected new_state {new_state} for " f"subcloud {subcloud_name}"
f"Unexpected new_state {new_state} for subcloud {subcloud_name}"
)
else:
LOG.debug(
f"Initial sync was cancelled for subcloud "
f"{subcloud_name} while in progress"
f"Initial sync was cancelled for subcloud {subcloud_name} "
"while in progress"
)
def _reattempt_sync(self, subcloud_name):
@ -159,9 +157,8 @@ class InitialSyncWorkerManager(object):
LOG.debug(f"enabling subcloud {subcloud_name}")
for endpoint_type, sync_obj in sync_objs.items():
LOG.debug(
f"Engine id: {self.engine_id} enabling sync thread "
f"for subcloud {subcloud_name} and "
f"endpoint type {endpoint_type}."
f"Engine id: {self.engine_id} enabling sync thread for subcloud "
f"{subcloud_name} and endpoint type {endpoint_type}."
)
sync_obj.enable()

View File

@ -1,4 +1,4 @@
# Copyright (c) 2017-2024, 2024 Wind River Systems, Inc.
# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -95,7 +95,7 @@ class SysinvSyncThread(SyncThread):
sc_sysinv_url = build_subcloud_endpoint(self.management_ip, "sysinv")
LOG.debug(
f"Built sc_sysinv_url {sc_sysinv_url} for subcloud " f"{self.subcloud_name}"
f"Built sc_sysinv_url {sc_sysinv_url} for subcloud {self.subcloud_name}"
)
self.sc_sysinv_client = SysinvClient(
@ -266,8 +266,9 @@ class SysinvSyncThread(SyncThread):
]
LOG.info(
"certificate {} {} [{}] updated with subcloud certificates:"
" {}".format(rsrc.id, subcloud_rsrc_id, signature, sub_certs_updated),
"certificate {} {} [{}] updated with subcloud certificates: {}".format(
rsrc.id, subcloud_rsrc_id, signature, sub_certs_updated
),
extra=self.log_extra,
)
@ -381,8 +382,9 @@ class SysinvSyncThread(SyncThread):
if not passwd_hash:
LOG.info(
"sync_user no user update found in resource_info"
"{}".format(request.orch_job.resource_info),
"sync_user no user update found in resource_info {}".format(
request.orch_job.resource_info
),
extra=self.log_extra,
)
return
@ -531,16 +533,18 @@ class SysinvSyncThread(SyncThread):
keystone_exceptions.ConnectFailure,
) as e:
LOG.info(
"get subcloud_resources {}: subcloud {} is not reachable"
"[{}]".format(resource_type, self.region_name, str(e)),
"get subcloud_resources {}: subcloud {} is not reachable [{}]".format(
resource_type, self.region_name, str(e)
),
extra=self.log_extra,
)
# None will force skip of audit
return None
except exceptions.NotAuthorized as e:
LOG.info(
"get subcloud_resources {}: subcloud {} not authorized"
"[{}]".format(resource_type, self.region_name, str(e)),
"get subcloud_resources {}: subcloud {} not authorized [{}]".format(
resource_type, self.region_name, str(e)
),
extra=self.log_extra,
)
OpenStackDriver.delete_region_clients(self.region_name)

View File

@ -392,7 +392,7 @@ class SyncThread(object):
# Early exit in case there are no pending sync requests
if not sync_requests:
LOG.debug(
"Sync resources done for subcloud - " "no sync requests",
"Sync resources done for subcloud - no sync requests",
extra=self.log_extra,
)
self.set_sync_status(dccommon_consts.SYNC_STATUS_IN_SYNC)
@ -432,13 +432,13 @@ class SyncThread(object):
if not actual_sync_requests:
LOG.info(
"Sync resources done for subcloud - " "no valid sync requests",
"Sync resources done for subcloud - no valid sync requests",
extra=self.log_extra,
)
return
elif not self.is_subcloud_enabled():
LOG.info(
"Sync resources done for subcloud - " "subcloud is disabled",
"Sync resources done for subcloud - subcloud is disabled",
extra=self.log_extra,
)
return

View File

@ -50,7 +50,7 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a " "resource_id set",
reason="cannot create a Subcloud object without a resource_id set",
)
updates = self.obj_get_changes()
@ -59,7 +59,7 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a " "endpoint_type set",
reason="cannot create a Subcloud object without a endpoint_type set",
)
updates = self.obj_get_changes()
@ -68,8 +68,7 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a "
"operation_type set",
reason="cannot create a Subcloud object without a operation_type set",
)
db_orch_job = db_api.orch_job_create(

View File

@ -54,7 +54,7 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a " "orch_job_id set",
reason="cannot create a Subcloud object without a orch_job_id set",
)
updates = self.obj_get_changes()
@ -63,8 +63,9 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a "
"target_region_name set",
reason=(
"cannot create a Subcloud object without a target_region_name set"
),
)
db_orch_request = db_api.orch_request_create(

View File

@ -46,7 +46,7 @@ class Resource(base.OrchestratorObject, base.VersionedObjectDictCompat):
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Resource object without a " "resource_type set",
reason="cannot create a Resource object without a resource_type set",
)
db_resource = db_api.resource_create(self._context, resource_type, updates)

View File

@ -56,7 +56,7 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a " "region_name set",
reason="cannot create a Subcloud object without a region_name set",
)
try:
db_subcloud = db_api.subcloud_create(self._context, region_name, updates)

View File

@ -9,12 +9,6 @@ modules = [
"dcdbsync",
"dcagent",
"dcorch",
"dcmanager/api",
"dcmanager/audit",
"dcmanager/common",
"dcmanager/db",
"dcmanager/orchestrator",
"dcmanager/tests",
"dcmanager",
]
@ -22,14 +16,9 @@ modules = [
formatted_modules = [
"dccommon",
"dcdbsync",
"dcorch",
"dcagent",
"dcmanager/api",
"dcmanager/audit",
"dcmanager/common",
"dcmanager/db",
"dcmanager/orchestrator",
"dcmanager/tests",
"dcorch",
"dcmanager",
]

View File

@ -1,4 +1,5 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2024 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -25,6 +26,4 @@ except ImportError:
pass
# Danger - pbr requirement >= 2.0.0 not satisfied...
setuptools.setup(
setup_requires=['pbr>=1.8.0'],
pbr=True)
setuptools.setup(setup_requires=["pbr>=1.8.0"], pbr=True)