Apply black formatter to dcmanager
This commit applies the Black format to the `dcmanager` files to ensure that it adheres to the Black code style guidelines. Test Plan: PASS: Success in stx-distcloud-tox-black Story: 2011149 Task: 50444 Change-Id: I4a8af46e24d4b5da2757f0a4e20a50a69523c44a Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
parent
2967ee254c
commit
8c27f069dd
@ -1,5 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
|
||||||
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
# a copy of the License at
|
# a copy of the License at
|
||||||
@ -16,4 +15,4 @@
|
|||||||
import pbr.version
|
import pbr.version
|
||||||
|
|
||||||
|
|
||||||
__version__ = pbr.version.VersionInfo('distributedcloud').version_string()
|
__version__ = pbr.version.VersionInfo("distributedcloud").version_string()
|
||||||
|
@ -23,6 +23,7 @@ import logging as std_logging
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
|
||||||
eventlet.monkey_patch(os=False)
|
eventlet.monkey_patch(os=False)
|
||||||
|
|
||||||
# pylint: disable=wrong-import-position
|
# pylint: disable=wrong-import-position
|
||||||
@ -36,11 +37,12 @@ from dcmanager.api import app # noqa: E402
|
|||||||
from dcmanager.common import config # noqa: E402
|
from dcmanager.common import config # noqa: E402
|
||||||
from dcmanager.common import messaging # noqa: E402
|
from dcmanager.common import messaging # noqa: E402
|
||||||
from dcorch.common import messaging as dcorch_messaging # noqa: E402
|
from dcorch.common import messaging as dcorch_messaging # noqa: E402
|
||||||
|
|
||||||
# pylint: enable=wrong-import-position
|
# pylint: enable=wrong-import-position
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
config.register_options()
|
config.register_options()
|
||||||
LOG = logging.getLogger('dcmanager.api')
|
LOG = logging.getLogger("dcmanager.api")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -56,8 +58,10 @@ def main():
|
|||||||
LOG.warning("Wrong worker number, worker = %(workers)s", workers)
|
LOG.warning("Wrong worker number, worker = %(workers)s", workers)
|
||||||
workers = 1
|
workers = 1
|
||||||
|
|
||||||
LOG.info("Server on http://%(host)s:%(port)s with %(workers)s",
|
LOG.info(
|
||||||
{'host': host, 'port': port, 'workers': workers})
|
"Server on http://%(host)s:%(port)s with %(workers)s",
|
||||||
|
{"host": host, "port": port, "workers": workers},
|
||||||
|
)
|
||||||
messaging.setup()
|
messaging.setup()
|
||||||
dcorch_messaging.setup()
|
dcorch_messaging.setup()
|
||||||
systemd.notify_once()
|
systemd.notify_once()
|
||||||
@ -72,5 +76,5 @@ def main():
|
|||||||
app.wait()
|
app.wait()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -19,6 +19,7 @@ DC Manager Audit Service.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
|
||||||
eventlet.monkey_patch()
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
# pylint: disable=wrong-import-position
|
# pylint: disable=wrong-import-position
|
||||||
@ -29,28 +30,28 @@ from oslo_service import service # noqa: E402
|
|||||||
|
|
||||||
from dcmanager.common import config # noqa: E402
|
from dcmanager.common import config # noqa: E402
|
||||||
from dcmanager.common import messaging # noqa: E402
|
from dcmanager.common import messaging # noqa: E402
|
||||||
|
|
||||||
# pylint: enable=wrong-import-position
|
# pylint: enable=wrong-import-position
|
||||||
|
|
||||||
_lazy.enable_lazy()
|
_lazy.enable_lazy()
|
||||||
config.register_options()
|
config.register_options()
|
||||||
config.register_keystone_options()
|
config.register_keystone_options()
|
||||||
LOG = logging.getLogger('dcmanager.audit')
|
LOG = logging.getLogger("dcmanager.audit")
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.register_options(CONF)
|
logging.register_options(CONF)
|
||||||
CONF(project='dcmanager', prog='dcmanager-audit')
|
CONF(project="dcmanager", prog="dcmanager-audit")
|
||||||
logging.setup(cfg.CONF, 'dcmanager-audit')
|
logging.setup(cfg.CONF, "dcmanager-audit")
|
||||||
logging.set_defaults()
|
logging.set_defaults()
|
||||||
messaging.setup()
|
messaging.setup()
|
||||||
|
|
||||||
from dcmanager.audit import service as audit
|
from dcmanager.audit import service as audit
|
||||||
|
|
||||||
srv = audit.DCManagerAuditService()
|
srv = audit.DCManagerAuditService()
|
||||||
launcher = service.launch(cfg.CONF,
|
launcher = service.launch(cfg.CONF, srv, workers=CONF.audit_workers)
|
||||||
srv, workers=CONF.audit_workers)
|
|
||||||
|
|
||||||
LOG.info("Starting...")
|
LOG.info("Starting...")
|
||||||
LOG.debug("Configuration:")
|
LOG.debug("Configuration:")
|
||||||
@ -59,5 +60,5 @@ def main():
|
|||||||
launcher.wait()
|
launcher.wait()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -19,6 +19,7 @@ DC Manager Audit Worker Service.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
|
||||||
eventlet.monkey_patch()
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
# pylint: disable=wrong-import-position
|
# pylint: disable=wrong-import-position
|
||||||
@ -29,28 +30,28 @@ from oslo_service import service # noqa: E402
|
|||||||
|
|
||||||
from dcmanager.common import config # noqa: E402
|
from dcmanager.common import config # noqa: E402
|
||||||
from dcmanager.common import messaging # noqa: E402
|
from dcmanager.common import messaging # noqa: E402
|
||||||
|
|
||||||
# pylint: enable=wrong-import-position
|
# pylint: enable=wrong-import-position
|
||||||
|
|
||||||
_lazy.enable_lazy()
|
_lazy.enable_lazy()
|
||||||
config.register_options()
|
config.register_options()
|
||||||
config.register_keystone_options()
|
config.register_keystone_options()
|
||||||
LOG = logging.getLogger('dcmanager.audit-worker')
|
LOG = logging.getLogger("dcmanager.audit-worker")
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.register_options(CONF)
|
logging.register_options(CONF)
|
||||||
CONF(project='dcmanager', prog='dcmanager-audit-worker')
|
CONF(project="dcmanager", prog="dcmanager-audit-worker")
|
||||||
logging.setup(cfg.CONF, 'dcmanager-audit-worker')
|
logging.setup(cfg.CONF, "dcmanager-audit-worker")
|
||||||
logging.set_defaults()
|
logging.set_defaults()
|
||||||
messaging.setup()
|
messaging.setup()
|
||||||
|
|
||||||
from dcmanager.audit import service as audit
|
from dcmanager.audit import service as audit
|
||||||
|
|
||||||
srv = audit.DCManagerAuditWorkerService()
|
srv = audit.DCManagerAuditWorkerService()
|
||||||
launcher = service.launch(cfg.CONF,
|
launcher = service.launch(cfg.CONF, srv, workers=CONF.audit_worker_workers)
|
||||||
srv, workers=CONF.audit_worker_workers)
|
|
||||||
|
|
||||||
LOG.info("Starting...")
|
LOG.info("Starting...")
|
||||||
LOG.debug("Configuration:")
|
LOG.debug("Configuration:")
|
||||||
@ -59,5 +60,5 @@ def main():
|
|||||||
launcher.wait()
|
launcher.wait()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -45,32 +45,37 @@ def do_db_sync():
|
|||||||
|
|
||||||
|
|
||||||
def add_command_parsers(subparsers):
|
def add_command_parsers(subparsers):
|
||||||
parser = subparsers.add_parser('db_version')
|
parser = subparsers.add_parser("db_version")
|
||||||
parser.set_defaults(func=do_db_version)
|
parser.set_defaults(func=do_db_version)
|
||||||
|
|
||||||
parser = subparsers.add_parser('db_sync')
|
parser = subparsers.add_parser("db_sync")
|
||||||
parser.set_defaults(func=do_db_sync)
|
parser.set_defaults(func=do_db_sync)
|
||||||
parser.add_argument('version', nargs='?')
|
parser.add_argument("version", nargs="?")
|
||||||
parser.add_argument('current_version', nargs='?')
|
parser.add_argument("current_version", nargs="?")
|
||||||
|
|
||||||
|
|
||||||
command_opt = cfg.SubCommandOpt('command',
|
command_opt = cfg.SubCommandOpt(
|
||||||
title='Commands',
|
"command",
|
||||||
help='Show available commands.',
|
title="Commands",
|
||||||
handler=add_command_parsers)
|
help="Show available commands.",
|
||||||
|
handler=add_command_parsers,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.register_options(CONF)
|
logging.register_options(CONF)
|
||||||
logging.setup(CONF, 'dcmanager-manage')
|
logging.setup(CONF, "dcmanager-manage")
|
||||||
CONF.register_cli_opt(command_opt)
|
CONF.register_cli_opt(command_opt)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
default_config_files = cfg.find_config_files('dcmanager',
|
default_config_files = cfg.find_config_files("dcmanager", "dcmanager-engine")
|
||||||
'dcmanager-engine')
|
CONF(
|
||||||
CONF(sys.argv[1:], project='dcmanager', prog='dcmanager-manage',
|
sys.argv[1:],
|
||||||
version=version.version_info.version_string(),
|
project="dcmanager",
|
||||||
default_config_files=default_config_files)
|
prog="dcmanager-manage",
|
||||||
|
version=version.version_info.version_string(),
|
||||||
|
default_config_files=default_config_files,
|
||||||
|
)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
sys.exit("ERROR: %s" % e)
|
sys.exit("ERROR: %s" % e)
|
||||||
|
|
||||||
@ -80,5 +85,5 @@ def main():
|
|||||||
sys.exit("ERROR: %s" % e)
|
sys.exit("ERROR: %s" % e)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -33,28 +33,27 @@ from dcmanager.common import config # noqa: E402
|
|||||||
from dcmanager.common import consts # noqa: E402
|
from dcmanager.common import consts # noqa: E402
|
||||||
from dcmanager.common import messaging # noqa: E402
|
from dcmanager.common import messaging # noqa: E402
|
||||||
from dcorch.common import messaging as dcorch_messaging # noqa: E402
|
from dcorch.common import messaging as dcorch_messaging # noqa: E402
|
||||||
|
|
||||||
# pylint: enable=wrong-import-position
|
# pylint: enable=wrong-import-position
|
||||||
|
|
||||||
_lazy.enable_lazy()
|
_lazy.enable_lazy()
|
||||||
config.register_options()
|
config.register_options()
|
||||||
config.register_keystone_options()
|
config.register_keystone_options()
|
||||||
LOG = logging.getLogger('dcmanager.engine')
|
LOG = logging.getLogger("dcmanager.engine")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.register_options(cfg.CONF)
|
logging.register_options(cfg.CONF)
|
||||||
cfg.CONF(project='dcmanager', prog='dcmanager-engine')
|
cfg.CONF(project="dcmanager", prog="dcmanager-engine")
|
||||||
logging.setup(cfg.CONF, 'dcmanager-engine')
|
logging.setup(cfg.CONF, "dcmanager-engine")
|
||||||
logging.set_defaults()
|
logging.set_defaults()
|
||||||
messaging.setup()
|
messaging.setup()
|
||||||
dcorch_messaging.setup()
|
dcorch_messaging.setup()
|
||||||
|
|
||||||
from dcmanager.manager import service as manager
|
from dcmanager.manager import service as manager
|
||||||
|
|
||||||
srv = manager.DCManagerService(cfg.CONF.host,
|
srv = manager.DCManagerService(cfg.CONF.host, consts.TOPIC_DC_MANAGER)
|
||||||
consts.TOPIC_DC_MANAGER)
|
launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.workers)
|
||||||
launcher = service.launch(cfg.CONF,
|
|
||||||
srv, workers=cfg.CONF.workers)
|
|
||||||
|
|
||||||
LOG.info("Starting...")
|
LOG.info("Starting...")
|
||||||
LOG.debug("Configuration:")
|
LOG.debug("Configuration:")
|
||||||
@ -65,5 +64,5 @@ def main():
|
|||||||
launcher.wait()
|
launcher.wait()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -19,6 +19,7 @@ DC Manager Orchestrator Service.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
|
||||||
eventlet.monkey_patch()
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
# pylint: disable=wrong-import-position
|
# pylint: disable=wrong-import-position
|
||||||
@ -29,10 +30,11 @@ from oslo_service import service # noqa: E402
|
|||||||
|
|
||||||
from dcmanager.common import config # noqa: E402
|
from dcmanager.common import config # noqa: E402
|
||||||
from dcmanager.common import messaging # noqa: E402
|
from dcmanager.common import messaging # noqa: E402
|
||||||
|
|
||||||
# pylint: enable=wrong-import-position
|
# pylint: enable=wrong-import-position
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger('dcmanager.orchestrator')
|
LOG = logging.getLogger("dcmanager.orchestrator")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -40,16 +42,15 @@ def main():
|
|||||||
config.register_options()
|
config.register_options()
|
||||||
config.register_keystone_options()
|
config.register_keystone_options()
|
||||||
logging.register_options(CONF)
|
logging.register_options(CONF)
|
||||||
CONF(project='dcmanager', prog='dcmanager-orchestrator')
|
CONF(project="dcmanager", prog="dcmanager-orchestrator")
|
||||||
logging.setup(CONF, 'dcmanager-orchestrator')
|
logging.setup(CONF, "dcmanager-orchestrator")
|
||||||
logging.set_defaults()
|
logging.set_defaults()
|
||||||
messaging.setup()
|
messaging.setup()
|
||||||
|
|
||||||
from dcmanager.orchestrator import service as orchestrator
|
from dcmanager.orchestrator import service as orchestrator
|
||||||
|
|
||||||
srv = orchestrator.DCManagerOrchestratorService()
|
srv = orchestrator.DCManagerOrchestratorService()
|
||||||
launcher = service.launch(CONF,
|
launcher = service.launch(CONF, srv, workers=cfg.CONF.orch_workers)
|
||||||
srv, workers=cfg.CONF.orch_workers)
|
|
||||||
|
|
||||||
LOG.info("Starting...")
|
LOG.info("Starting...")
|
||||||
LOG.debug("Configuration:")
|
LOG.debug("Configuration:")
|
||||||
@ -58,5 +59,5 @@ def main():
|
|||||||
launcher.wait()
|
launcher.wait()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -24,6 +24,7 @@ DC Manager State Engine Server.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
|
||||||
eventlet.monkey_patch()
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
# pylint: disable=wrong-import-position
|
# pylint: disable=wrong-import-position
|
||||||
@ -35,18 +36,19 @@ from oslo_service import service # noqa: E402
|
|||||||
from dcmanager.common import config # noqa: E402
|
from dcmanager.common import config # noqa: E402
|
||||||
from dcmanager.common import messaging # noqa: E402
|
from dcmanager.common import messaging # noqa: E402
|
||||||
from dcorch.common import messaging as dcorch_messaging # noqa: E402
|
from dcorch.common import messaging as dcorch_messaging # noqa: E402
|
||||||
|
|
||||||
# pylint: enable=wrong-import-position
|
# pylint: enable=wrong-import-position
|
||||||
|
|
||||||
_lazy.enable_lazy()
|
_lazy.enable_lazy()
|
||||||
config.register_options()
|
config.register_options()
|
||||||
config.register_keystone_options()
|
config.register_keystone_options()
|
||||||
LOG = logging.getLogger('dcmanager.state')
|
LOG = logging.getLogger("dcmanager.state")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.register_options(cfg.CONF)
|
logging.register_options(cfg.CONF)
|
||||||
cfg.CONF(project='dcmanager', prog='dcmanager-state')
|
cfg.CONF(project="dcmanager", prog="dcmanager-state")
|
||||||
logging.setup(cfg.CONF, 'dcmanager-state')
|
logging.setup(cfg.CONF, "dcmanager-state")
|
||||||
logging.set_defaults()
|
logging.set_defaults()
|
||||||
messaging.setup()
|
messaging.setup()
|
||||||
dcorch_messaging.setup()
|
dcorch_messaging.setup()
|
||||||
@ -55,18 +57,21 @@ def main():
|
|||||||
|
|
||||||
# Override values from /etc/dcmanager/dcmanager.conf specific
|
# Override values from /etc/dcmanager/dcmanager.conf specific
|
||||||
# to dcmanager-state:
|
# to dcmanager-state:
|
||||||
cfg.CONF.set_override('max_pool_size', 10, group='database')
|
cfg.CONF.set_override("max_pool_size", 10, group="database")
|
||||||
cfg.CONF.set_override('max_overflow', 100, group='database')
|
cfg.CONF.set_override("max_overflow", 100, group="database")
|
||||||
LOG.info("Starting...")
|
LOG.info("Starting...")
|
||||||
LOG.debug("Configuration:")
|
LOG.debug("Configuration:")
|
||||||
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
|
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
|
||||||
|
|
||||||
LOG.info("Launching service, host=%s, state_workers=%s ...",
|
LOG.info(
|
||||||
cfg.CONF.host, cfg.CONF.state_workers)
|
"Launching service, host=%s, state_workers=%s ...",
|
||||||
|
cfg.CONF.host,
|
||||||
|
cfg.CONF.state_workers,
|
||||||
|
)
|
||||||
srv = state.DCManagerStateService(cfg.CONF.host)
|
srv = state.DCManagerStateService(cfg.CONF.host)
|
||||||
launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.state_workers)
|
launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.state_workers)
|
||||||
launcher.wait()
|
launcher.wait()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -183,8 +183,7 @@ class PeerGroupAssociationCombinationNotFound(NotFound):
|
|||||||
|
|
||||||
class PeerGroupAssociationTargetNotMatch(NotFound):
|
class PeerGroupAssociationTargetNotMatch(NotFound):
|
||||||
message = _(
|
message = _(
|
||||||
"Peer Group Association with peer site controller "
|
"Peer Group Association with peer site controller UUID %(uuid)s doesn't match."
|
||||||
"UUID %(uuid)s doesn't match."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -237,8 +236,7 @@ class CertificateUploadError(DCManagerException):
|
|||||||
|
|
||||||
class LicenseInstallError(DCManagerException):
|
class LicenseInstallError(DCManagerException):
|
||||||
message = _(
|
message = _(
|
||||||
"Error while installing license on subcloud: "
|
"Error while installing license on subcloud: %(subcloud_id)s. %(error_message)s"
|
||||||
"%(subcloud_id)s. %(error_message)s"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -623,10 +623,7 @@ def validate_install_values(payload, ip_version=None, subcloud=None):
|
|||||||
# the expected value is less than the default. so throw an error.
|
# the expected value is less than the default. so throw an error.
|
||||||
pecan.abort(
|
pecan.abort(
|
||||||
400,
|
400,
|
||||||
_(
|
_("persistent_size of %s MB is less than the permitted minimum %s MB")
|
||||||
"persistent_size of %s MB is less than "
|
|
||||||
"the permitted minimum %s MB "
|
|
||||||
)
|
|
||||||
% (str(persistent_size), consts.DEFAULT_PERSISTENT_SIZE),
|
% (str(persistent_size), consts.DEFAULT_PERSISTENT_SIZE),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -6,12 +6,10 @@
|
|||||||
|
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
from fm_api import constants as fm_const
|
from fm_api import constants as fm_const
|
||||||
from fm_api import fm_api
|
from fm_api import fm_api
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from dccommon import consts as dccommon_consts
|
from dccommon import consts as dccommon_consts
|
||||||
from dcmanager.common import consts
|
from dcmanager.common import consts
|
||||||
@ -22,7 +20,6 @@ from dcmanager.common import utils
|
|||||||
from dcmanager.db import api as db_api
|
from dcmanager.db import api as db_api
|
||||||
from dcmanager.manager.system_peer_manager import SystemPeerManager
|
from dcmanager.manager.system_peer_manager import SystemPeerManager
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -31,9 +28,8 @@ class PeerGroupAuditManager(manager.Manager):
|
|||||||
"""Manages audit related tasks."""
|
"""Manages audit related tasks."""
|
||||||
|
|
||||||
def __init__(self, subcloud_manager, peer_group_id, *args, **kwargs):
|
def __init__(self, subcloud_manager, peer_group_id, *args, **kwargs):
|
||||||
LOG.debug(_('PeerGroupAuditManager initialization...'))
|
LOG.debug(_("PeerGroupAuditManager initialization..."))
|
||||||
super().__init__(service_name="peer_group_audit_manager",
|
super().__init__(service_name="peer_group_audit_manager", *args, **kwargs)
|
||||||
*args, **kwargs)
|
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
self.fm_api = fm_api.FaultAPIs()
|
self.fm_api = fm_api.FaultAPIs()
|
||||||
self.subcloud_manager = subcloud_manager
|
self.subcloud_manager = subcloud_manager
|
||||||
@ -42,118 +38,121 @@ class PeerGroupAuditManager(manager.Manager):
|
|||||||
self.thread = None
|
self.thread = None
|
||||||
self.thread_lock = threading.Lock()
|
self.thread_lock = threading.Lock()
|
||||||
|
|
||||||
def _get_subclouds_by_peer_group_from_system_peer(self,
|
def _get_subclouds_by_peer_group_from_system_peer(
|
||||||
dc_client,
|
self, dc_client, system_peer, peer_group_name
|
||||||
system_peer,
|
):
|
||||||
peer_group_name):
|
|
||||||
try:
|
try:
|
||||||
subclouds = dc_client.get_subcloud_list_by_peer_group(
|
subclouds = dc_client.get_subcloud_list_by_peer_group(peer_group_name)
|
||||||
peer_group_name)
|
|
||||||
return subclouds
|
return subclouds
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(f"Failed to get subclouds of peer group "
|
LOG.exception(
|
||||||
f"{peer_group_name} from DC: "
|
f"Failed to get subclouds of peer group {peer_group_name} "
|
||||||
f"{system_peer.peer_name}")
|
f"from DC: {system_peer.peer_name}"
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_association_sync_status_from_peer_site(dc_client,
|
def _get_association_sync_status_from_peer_site(
|
||||||
system_peer,
|
dc_client, system_peer, peer_group_id
|
||||||
peer_group_id):
|
):
|
||||||
try:
|
try:
|
||||||
# Get peer site system peer
|
# Get peer site system peer
|
||||||
dc_peer_system_peer = dc_client.get_system_peer(
|
dc_peer_system_peer = dc_client.get_system_peer(
|
||||||
utils.get_local_system().uuid)
|
utils.get_local_system().uuid
|
||||||
|
)
|
||||||
association = dc_client.get_peer_group_association_with_peer_id_and_pg_id(
|
association = dc_client.get_peer_group_association_with_peer_id_and_pg_id(
|
||||||
dc_peer_system_peer.get("id"), peer_group_id
|
dc_peer_system_peer.get("id"), peer_group_id
|
||||||
)
|
)
|
||||||
return association.get("sync-status")
|
return association.get("sync-status")
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(f"Failed to get subclouds of peer group "
|
LOG.exception(
|
||||||
f"{peer_group_id} from DC: {system_peer.peer_name}")
|
f"Failed to get subclouds of peer group {peer_group_id} "
|
||||||
|
f"from DC: {system_peer.peer_name}"
|
||||||
|
)
|
||||||
|
|
||||||
def _update_remote_peer_group_migration_status(self,
|
def _update_remote_peer_group_migration_status(
|
||||||
system_peer,
|
self, system_peer, peer_group_name, migration_status
|
||||||
peer_group_name,
|
):
|
||||||
migration_status):
|
|
||||||
dc_client = SystemPeerManager.get_peer_dc_client(system_peer)
|
dc_client = SystemPeerManager.get_peer_dc_client(system_peer)
|
||||||
peer_group_kwargs = {
|
peer_group_kwargs = {"migration_status": migration_status}
|
||||||
'migration_status': migration_status
|
dc_client.update_subcloud_peer_group(peer_group_name, **peer_group_kwargs)
|
||||||
}
|
LOG.info(
|
||||||
dc_client.update_subcloud_peer_group(peer_group_name,
|
f"Updated Subcloud Peer Group {peer_group_name} on peer site "
|
||||||
**peer_group_kwargs)
|
f"{system_peer.peer_name}, set migration_status to: {migration_status}"
|
||||||
LOG.info(f"Updated Subcloud Peer Group {peer_group_name} on "
|
)
|
||||||
f"peer site {system_peer.peer_name}, set migration_status "
|
|
||||||
f"to: {migration_status}")
|
|
||||||
|
|
||||||
def _get_local_subclouds_to_update_and_delete(self,
|
def _get_local_subclouds_to_update_and_delete(
|
||||||
local_peer_group,
|
self, local_peer_group, remote_subclouds, remote_sync_status
|
||||||
remote_subclouds,
|
):
|
||||||
remote_sync_status):
|
|
||||||
local_subclouds_to_update = list()
|
local_subclouds_to_update = list()
|
||||||
local_subclouds_to_delete = list()
|
local_subclouds_to_delete = list()
|
||||||
any_rehome_failed = False
|
any_rehome_failed = False
|
||||||
remote_subclouds_dict = {remote_subcloud.get('region-name'):
|
remote_subclouds_dict = {
|
||||||
remote_subcloud for remote_subcloud
|
remote_subcloud.get("region-name"): remote_subcloud
|
||||||
in remote_subclouds}
|
for remote_subcloud in remote_subclouds
|
||||||
|
}
|
||||||
local_subclouds = db_api.subcloud_get_for_peer_group(
|
local_subclouds = db_api.subcloud_get_for_peer_group(
|
||||||
self.context, local_peer_group.id)
|
self.context, local_peer_group.id
|
||||||
|
)
|
||||||
|
|
||||||
for local_subcloud in local_subclouds:
|
for local_subcloud in local_subclouds:
|
||||||
remote_subcloud = remote_subclouds_dict.get(
|
remote_subcloud = remote_subclouds_dict.get(local_subcloud.region_name)
|
||||||
local_subcloud.region_name)
|
|
||||||
if remote_subcloud:
|
if remote_subcloud:
|
||||||
# Check if the remote subcloud meets the conditions for update
|
# Check if the remote subcloud meets the conditions for update
|
||||||
# if it is 'managed' and the local subcloud is not
|
# if it is 'managed' and the local subcloud is not
|
||||||
# in 'secondary' status
|
# in 'secondary' status
|
||||||
if (remote_subcloud.get('management-state') ==
|
MANAGED = dccommon_consts.MANAGEMENT_MANAGED
|
||||||
dccommon_consts.MANAGEMENT_MANAGED and
|
if remote_subcloud.get(
|
||||||
not utils.subcloud_is_secondary_state(
|
"management-state"
|
||||||
local_subcloud.deploy_status)):
|
) == MANAGED and not utils.subcloud_is_secondary_state(
|
||||||
|
local_subcloud.deploy_status
|
||||||
|
):
|
||||||
local_subclouds_to_update.append(local_subcloud)
|
local_subclouds_to_update.append(local_subcloud)
|
||||||
# Sync rehome_data from remote to local subcloud if the remote
|
# Sync rehome_data from remote to local subcloud if the remote
|
||||||
# PGA sync_status is out-of-sync once migration completes,
|
# PGA sync_status is out-of-sync once migration completes,
|
||||||
# indicating any bootstrap values/address updates to
|
# indicating any bootstrap values/address updates to
|
||||||
# the subcloud on the remote site.
|
# the subcloud on the remote site.
|
||||||
if remote_sync_status == \
|
if remote_sync_status == consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC:
|
||||||
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC:
|
|
||||||
self._sync_rehome_data(
|
self._sync_rehome_data(
|
||||||
local_subcloud.id, remote_subcloud.get('rehome_data'))
|
local_subcloud.id, remote_subcloud.get("rehome_data")
|
||||||
elif remote_subcloud.get('deploy-status') in \
|
)
|
||||||
(consts.DEPLOY_STATE_REHOME_FAILED,
|
elif remote_subcloud.get("deploy-status") in (
|
||||||
consts.DEPLOY_STATE_REHOME_PREP_FAILED):
|
consts.DEPLOY_STATE_REHOME_FAILED,
|
||||||
|
consts.DEPLOY_STATE_REHOME_PREP_FAILED,
|
||||||
|
):
|
||||||
# Set local subcloud to rehome-failed if the remote is
|
# Set local subcloud to rehome-failed if the remote is
|
||||||
# rehome-failed or rehome-prep-failed, otherwise, the
|
# rehome-failed or rehome-prep-failed, otherwise, the
|
||||||
# deploy_status will remain rehome-pending, which will
|
# deploy_status will remain rehome-pending, which will
|
||||||
# block the correction of the bootstrap values/address.
|
# block the correction of the bootstrap values/address.
|
||||||
db_api.subcloud_update(
|
db_api.subcloud_update(
|
||||||
self.context, local_subcloud.id,
|
self.context,
|
||||||
deploy_status=consts.DEPLOY_STATE_REHOME_FAILED)
|
local_subcloud.id,
|
||||||
|
deploy_status=consts.DEPLOY_STATE_REHOME_FAILED,
|
||||||
|
)
|
||||||
any_rehome_failed = True
|
any_rehome_failed = True
|
||||||
else:
|
else:
|
||||||
local_subclouds_to_delete.append(local_subcloud)
|
local_subclouds_to_delete.append(local_subcloud)
|
||||||
|
|
||||||
return local_subclouds_to_update, local_subclouds_to_delete, \
|
return local_subclouds_to_update, local_subclouds_to_delete, any_rehome_failed
|
||||||
any_rehome_failed
|
|
||||||
|
|
||||||
def _set_local_subcloud_to_secondary(self, subcloud):
|
def _set_local_subcloud_to_secondary(self, subcloud):
|
||||||
try:
|
try:
|
||||||
LOG.info("Set local subcloud %s to secondary" % subcloud.name)
|
LOG.info("Set local subcloud %s to secondary" % subcloud.name)
|
||||||
# There will be an exception when unmanage
|
# There will be an exception when unmanage
|
||||||
# a subcloud in 'unamaged' state.
|
# a subcloud in 'unamaged' state.
|
||||||
if subcloud.management_state != \
|
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED:
|
||||||
dccommon_consts.MANAGEMENT_UNMANAGED:
|
|
||||||
self.subcloud_manager.update_subcloud(
|
self.subcloud_manager.update_subcloud(
|
||||||
self.context,
|
self.context,
|
||||||
subcloud.id,
|
subcloud.id,
|
||||||
management_state=dccommon_consts.
|
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
|
||||||
MANAGEMENT_UNMANAGED)
|
)
|
||||||
self.subcloud_manager.update_subcloud(
|
self.subcloud_manager.update_subcloud(
|
||||||
self.context,
|
self.context, subcloud.id, deploy_status=consts.DEPLOY_STATE_SECONDARY
|
||||||
subcloud.id,
|
)
|
||||||
deploy_status=consts.DEPLOY_STATE_SECONDARY)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(f"Failed to update local non-secondary "
|
LOG.exception(
|
||||||
f"and offline subcloud [{subcloud.name}], err: {e}")
|
"Failed to update local non-secondary and offline subcloud "
|
||||||
|
f"[{subcloud.name}], err: {e}"
|
||||||
|
)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
def _sync_rehome_data(self, subcloud_id, rehome_data):
|
def _sync_rehome_data(self, subcloud_id, rehome_data):
|
||||||
@ -164,86 +163,99 @@ class PeerGroupAuditManager(manager.Manager):
|
|||||||
LOG.info("Local peer group in migrating state, quit audit")
|
LOG.info("Local peer group in migrating state, quit audit")
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info("Auditing remote subcloud peer group:[%s] "
|
LOG.info(
|
||||||
"migration_status:[%s] group_priority[%s], "
|
"Auditing remote subcloud peer group:[%s] migration_status:[%s] "
|
||||||
"local subcloud peer group:[%s] "
|
"group_priority[%s], local subcloud peer group:[%s] "
|
||||||
"migration_status:[%s] group_priority[%s]" %
|
"migration_status:[%s] group_priority[%s]"
|
||||||
(remote_peer_group.get("peer_group_name"),
|
% (
|
||||||
remote_peer_group.get("migration_status"),
|
remote_peer_group.get("peer_group_name"),
|
||||||
remote_peer_group.get("group_priority"),
|
remote_peer_group.get("migration_status"),
|
||||||
local_peer_group.peer_group_name,
|
remote_peer_group.get("group_priority"),
|
||||||
local_peer_group.migration_status,
|
local_peer_group.peer_group_name,
|
||||||
local_peer_group.group_priority))
|
local_peer_group.migration_status,
|
||||||
|
local_peer_group.group_priority,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# if remote subcloud peer group's migration_status is 'migrating',
|
# if remote subcloud peer group's migration_status is 'migrating',
|
||||||
# 'unmanaged' all local subclouds in local peer group and change its
|
# 'unmanaged' all local subclouds in local peer group and change its
|
||||||
# deploy status to consts.DEPLOY_STATE_REHOME_PENDING to stop cert-mon
|
# deploy status to consts.DEPLOY_STATE_REHOME_PENDING to stop cert-mon
|
||||||
# audits.
|
# audits.
|
||||||
if remote_peer_group.get("migration_status") == \
|
if remote_peer_group.get("migration_status") == consts.PEER_GROUP_MIGRATING:
|
||||||
consts.PEER_GROUP_MIGRATING:
|
|
||||||
# Unmanaged all local subclouds of peer group
|
# Unmanaged all local subclouds of peer group
|
||||||
LOG.info(f"Unmanaged all local subclouds of peer group "
|
LOG.info(
|
||||||
f"{local_peer_group.peer_group_name} "
|
"Unmanaged all local subclouds of peer group "
|
||||||
f"since remote is in migrating state")
|
f"{local_peer_group.peer_group_name} since remote is in migrating state"
|
||||||
subclouds = db_api.subcloud_get_for_peer_group(self.context,
|
)
|
||||||
local_peer_group.id)
|
subclouds = db_api.subcloud_get_for_peer_group(
|
||||||
|
self.context, local_peer_group.id
|
||||||
|
)
|
||||||
for subcloud in subclouds:
|
for subcloud in subclouds:
|
||||||
try:
|
try:
|
||||||
# update_subcloud raises an exception when trying to umanage
|
# update_subcloud raises an exception when trying to umanage
|
||||||
# an already unmanaged subcloud, so the deploy status
|
# an already unmanaged subcloud, so the deploy status
|
||||||
# update must be done separately
|
# update must be done separately
|
||||||
if subcloud.management_state != \
|
if (
|
||||||
dccommon_consts.MANAGEMENT_UNMANAGED:
|
subcloud.management_state
|
||||||
|
!= dccommon_consts.MANAGEMENT_UNMANAGED
|
||||||
|
):
|
||||||
# Unmanage and update the deploy-status
|
# Unmanage and update the deploy-status
|
||||||
LOG.info("Unmanaging and setting the local subcloud "
|
LOG.info(
|
||||||
f"{subcloud.name} deploy status to "
|
"Unmanaging and setting the local subcloud "
|
||||||
f"{consts.DEPLOY_STATE_REHOME_PENDING}")
|
f"{subcloud.name} deploy status to "
|
||||||
|
f"{consts.DEPLOY_STATE_REHOME_PENDING}"
|
||||||
|
)
|
||||||
self.subcloud_manager.update_subcloud(
|
self.subcloud_manager.update_subcloud(
|
||||||
self.context,
|
self.context,
|
||||||
subcloud.id,
|
subcloud.id,
|
||||||
management_state=dccommon_consts.
|
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
|
||||||
MANAGEMENT_UNMANAGED,
|
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING,
|
||||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING)
|
)
|
||||||
else:
|
else:
|
||||||
# Already unmanaged, just update the deploy-status
|
# Already unmanaged, just update the deploy-status
|
||||||
LOG.info(f"Setting the local subcloud {subcloud.name} "
|
LOG.info(
|
||||||
"deploy status to "
|
f"Setting the local subcloud {subcloud.name} "
|
||||||
f"{consts.DEPLOY_STATE_REHOME_PENDING}")
|
f"deploy status to {consts.DEPLOY_STATE_REHOME_PENDING}"
|
||||||
|
)
|
||||||
self.subcloud_manager.update_subcloud(
|
self.subcloud_manager.update_subcloud(
|
||||||
self.context,
|
self.context,
|
||||||
subcloud.id,
|
subcloud.id,
|
||||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING)
|
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING,
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(f"Fail to unmanage local subcloud "
|
LOG.exception(
|
||||||
f"{subcloud.name}, err: {e}")
|
f"Fail to unmanage local subcloud {subcloud.name}, err: {e}"
|
||||||
|
)
|
||||||
raise e
|
raise e
|
||||||
SystemPeerManager.update_sync_status(
|
SystemPeerManager.update_sync_status(
|
||||||
self.context, system_peer,
|
self.context,
|
||||||
|
system_peer,
|
||||||
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
|
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
|
||||||
local_peer_group, remote_peer_group)
|
local_peer_group,
|
||||||
|
remote_peer_group,
|
||||||
|
)
|
||||||
self.require_audit_flag = False
|
self.require_audit_flag = False
|
||||||
|
|
||||||
# if remote subcloud peer group's migration_status is 'complete',
|
# if remote subcloud peer group's migration_status is 'complete',
|
||||||
# get remote subclouds. For 'managed+online' subclouds,
|
# get remote subclouds. For 'managed+online' subclouds,
|
||||||
# set 'unmanaged+secondary' to local on same subclouds
|
# set 'unmanaged+secondary' to local on same subclouds
|
||||||
elif remote_peer_group.get("migration_status") == \
|
elif (
|
||||||
consts.PEER_GROUP_MIGRATION_COMPLETE:
|
remote_peer_group.get("migration_status")
|
||||||
|
== consts.PEER_GROUP_MIGRATION_COMPLETE
|
||||||
|
):
|
||||||
dc_client = SystemPeerManager.get_peer_dc_client(system_peer)
|
dc_client = SystemPeerManager.get_peer_dc_client(system_peer)
|
||||||
remote_subclouds = \
|
remote_subclouds = self._get_subclouds_by_peer_group_from_system_peer(
|
||||||
self._get_subclouds_by_peer_group_from_system_peer(
|
dc_client, system_peer, remote_peer_group.get("peer_group_name")
|
||||||
dc_client,
|
)
|
||||||
system_peer,
|
remote_sync_status = self._get_association_sync_status_from_peer_site(
|
||||||
remote_peer_group.get("peer_group_name"))
|
dc_client, system_peer, remote_peer_group.get("id")
|
||||||
remote_sync_status = \
|
)
|
||||||
self._get_association_sync_status_from_peer_site(
|
|
||||||
dc_client,
|
|
||||||
system_peer,
|
|
||||||
remote_peer_group.get("id"))
|
|
||||||
|
|
||||||
local_subclouds_to_update, local_subclouds_to_delete, \
|
local_subclouds_to_update, local_subclouds_to_delete, any_rehome_failed = (
|
||||||
any_rehome_failed = \
|
|
||||||
self._get_local_subclouds_to_update_and_delete(
|
self._get_local_subclouds_to_update_and_delete(
|
||||||
local_peer_group, remote_subclouds, remote_sync_status)
|
local_peer_group, remote_subclouds, remote_sync_status
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
for subcloud in local_subclouds_to_update:
|
for subcloud in local_subclouds_to_update:
|
||||||
self._set_local_subcloud_to_secondary(subcloud)
|
self._set_local_subcloud_to_secondary(subcloud)
|
||||||
@ -253,85 +265,90 @@ class PeerGroupAuditManager(manager.Manager):
|
|||||||
for subcloud in local_subclouds_to_delete:
|
for subcloud in local_subclouds_to_delete:
|
||||||
self._set_local_subcloud_to_secondary(subcloud)
|
self._set_local_subcloud_to_secondary(subcloud)
|
||||||
try:
|
try:
|
||||||
self.subcloud_manager.delete_subcloud(
|
self.subcloud_manager.delete_subcloud(self.context, subcloud.id)
|
||||||
self.context, subcloud.id)
|
|
||||||
LOG.info(f"Deleted local subcloud {subcloud.name}")
|
LOG.info(f"Deleted local subcloud {subcloud.name}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
SystemPeerManager.update_sync_status(
|
SystemPeerManager.update_sync_status(
|
||||||
self.context, system_peer,
|
self.context,
|
||||||
|
system_peer,
|
||||||
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
|
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
|
||||||
local_peer_group, remote_peer_group)
|
local_peer_group,
|
||||||
LOG.exception(f"Failed to delete local subcloud "
|
remote_peer_group,
|
||||||
f"[{subcloud.name}] that does not exist "
|
)
|
||||||
f"under the same subcloud_peer_group on "
|
LOG.exception(
|
||||||
f"peer site, err: {e}")
|
f"Failed to delete local subcloud [{subcloud.name}] that does "
|
||||||
|
"not exist under the same subcloud_peer_group on peer site, "
|
||||||
|
f"err: {e}"
|
||||||
|
)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
if remote_peer_group.get("system_leader_id") == system_peer.peer_uuid:
|
if remote_peer_group.get("system_leader_id") == system_peer.peer_uuid:
|
||||||
self._clear_or_raise_alarm(system_peer,
|
self._clear_or_raise_alarm(
|
||||||
local_peer_group,
|
system_peer, local_peer_group, remote_peer_group
|
||||||
remote_peer_group)
|
)
|
||||||
db_api.subcloud_peer_group_update(
|
db_api.subcloud_peer_group_update(
|
||||||
self.context,
|
self.context,
|
||||||
local_peer_group.id,
|
local_peer_group.id,
|
||||||
system_leader_id=system_peer.peer_uuid,
|
system_leader_id=system_peer.peer_uuid,
|
||||||
system_leader_name=system_peer.peer_name)
|
system_leader_name=system_peer.peer_name,
|
||||||
|
)
|
||||||
|
|
||||||
self._update_remote_peer_group_migration_status(
|
self._update_remote_peer_group_migration_status(
|
||||||
system_peer,
|
system_peer, remote_peer_group.get("peer_group_name"), None
|
||||||
remote_peer_group.get("peer_group_name"),
|
)
|
||||||
None)
|
|
||||||
|
|
||||||
if not (remote_sync_status == consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
|
if not (
|
||||||
and any_rehome_failed):
|
remote_sync_status == consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
|
||||||
|
and any_rehome_failed
|
||||||
|
):
|
||||||
SystemPeerManager.update_sync_status(
|
SystemPeerManager.update_sync_status(
|
||||||
self.context, system_peer,
|
self.context,
|
||||||
|
system_peer,
|
||||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||||
local_peer_group, remote_peer_group)
|
local_peer_group,
|
||||||
|
remote_peer_group,
|
||||||
|
)
|
||||||
self.require_audit_flag = False
|
self.require_audit_flag = False
|
||||||
else:
|
else:
|
||||||
# If remote peer group migration_status is 'None'
|
# If remote peer group migration_status is 'None'
|
||||||
self.require_audit_flag = False
|
self.require_audit_flag = False
|
||||||
|
|
||||||
def _clear_or_raise_alarm(self,
|
def _clear_or_raise_alarm(self, system_peer, local_peer_group, remote_peer_group):
|
||||||
system_peer,
|
|
||||||
local_peer_group,
|
|
||||||
remote_peer_group):
|
|
||||||
# If local subcloud peer group's group_priority is
|
# If local subcloud peer group's group_priority is
|
||||||
# lower than remote subcloud peer group's group_priority,
|
# lower than remote subcloud peer group's group_priority,
|
||||||
# an alarm will be raised.
|
# an alarm will be raised.
|
||||||
# lower number means higher priority
|
# lower number means higher priority
|
||||||
entity_instance_id = "peer_group=%s,peer=%s" % \
|
entity_instance_id = "peer_group=%s,peer=%s" % (
|
||||||
(local_peer_group.peer_group_name, system_peer.peer_uuid)
|
local_peer_group.peer_group_name,
|
||||||
if local_peer_group.group_priority < remote_peer_group.get('group_priority'):
|
system_peer.peer_uuid,
|
||||||
LOG.warning("Alarm: local subcloud peer group ["
|
)
|
||||||
f"{local_peer_group.peer_group_name}] "
|
if local_peer_group.group_priority < remote_peer_group.get("group_priority"):
|
||||||
f"is managed by remote system ["
|
LOG.warning(
|
||||||
f"{system_peer.peer_name}]")
|
f"Alarm: local subcloud peer group [{local_peer_group.peer_group_name}]"
|
||||||
|
f" is managed by remote system [{system_peer.peer_name}]"
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
fault = fm_api.Fault(
|
fault = fm_api.Fault(
|
||||||
alarm_id=fm_const.
|
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
|
||||||
FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
|
|
||||||
alarm_state=fm_const.FM_ALARM_STATE_SET,
|
alarm_state=fm_const.FM_ALARM_STATE_SET,
|
||||||
entity_type_id=fm_const.
|
entity_type_id=fm_const.FM_ENTITY_TYPE_SUBCLOUD_PEER_GROUP,
|
||||||
FM_ENTITY_TYPE_SUBCLOUD_PEER_GROUP,
|
|
||||||
entity_instance_id=entity_instance_id,
|
entity_instance_id=entity_instance_id,
|
||||||
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
||||||
reason_text=("Subcloud peer group "
|
reason_text=(
|
||||||
"(peer_group_name=%s) "
|
"Subcloud peer group (peer_group_name=%s) is managed by "
|
||||||
"is managed by remote "
|
"remote system (peer_uuid=%s) with a lower priority."
|
||||||
"system (peer_uuid=%s) "
|
% (local_peer_group.peer_group_name, system_peer.peer_uuid)
|
||||||
"with a lower priority." %
|
),
|
||||||
(local_peer_group.peer_group_name,
|
|
||||||
system_peer.peer_uuid)),
|
|
||||||
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
||||||
probable_cause=fm_const.
|
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_UNKNOWN,
|
||||||
ALARM_PROBABLE_CAUSE_UNKNOWN,
|
proposed_repair_action=(
|
||||||
proposed_repair_action="Check the reported peer group "
|
"Check the reported peer group state. Migrate it back to the "
|
||||||
"state. Migrate it back to the current system if the "
|
"current system if the state is 'rehomed' and the current "
|
||||||
"state is 'rehomed' and the current system is stable. "
|
"system is stable. Otherwise, wait until these conditions "
|
||||||
"Otherwise, wait until these conditions are met.",
|
"are met."
|
||||||
service_affecting=False)
|
),
|
||||||
|
service_affecting=False,
|
||||||
|
)
|
||||||
self.fm_api.set_fault(fault)
|
self.fm_api.set_fault(fault)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
@ -339,17 +356,19 @@ class PeerGroupAuditManager(manager.Manager):
|
|||||||
try:
|
try:
|
||||||
fault = self.fm_api.get_fault(
|
fault = self.fm_api.get_fault(
|
||||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
|
fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
|
||||||
entity_instance_id)
|
entity_instance_id,
|
||||||
|
)
|
||||||
if fault:
|
if fault:
|
||||||
LOG.info(f"Clear alarm: {entity_instance_id}")
|
LOG.info(f"Clear alarm: {entity_instance_id}")
|
||||||
self.fm_api.clear_fault(
|
self.fm_api.clear_fault(
|
||||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
|
fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED,
|
||||||
entity_instance_id)
|
entity_instance_id,
|
||||||
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(
|
LOG.exception(
|
||||||
f"Problem clearing fault [{entity_instance_id}], "
|
f"Problem clearing fault [{entity_instance_id}], alarm_id="
|
||||||
f"alarm_id="
|
f"{fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED}"
|
||||||
f"{fm_const.FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED}")
|
)
|
||||||
|
|
||||||
def _do_audit(self, system_peer, remote_peer_group, local_peer_group):
|
def _do_audit(self, system_peer, remote_peer_group, local_peer_group):
|
||||||
with self.thread_lock:
|
with self.thread_lock:
|
||||||
@ -367,20 +386,24 @@ class PeerGroupAuditManager(manager.Manager):
|
|||||||
|
|
||||||
def start(self, system_peer, remote_peer_group, local_peer_group):
|
def start(self, system_peer, remote_peer_group, local_peer_group):
|
||||||
if self.thread_lock.locked():
|
if self.thread_lock.locked():
|
||||||
LOG.warning(f"Audit thread for {local_peer_group.peer_group_name} "
|
LOG.warning(
|
||||||
f"has already started")
|
f"Audit thread for {local_peer_group.peer_group_name} "
|
||||||
|
"has already started"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.thread = threading.Thread(
|
self.thread = threading.Thread(
|
||||||
target=self._do_audit,
|
target=self._do_audit,
|
||||||
args=(system_peer, remote_peer_group, local_peer_group))
|
args=(system_peer, remote_peer_group, local_peer_group),
|
||||||
|
)
|
||||||
self.thread.start()
|
self.thread.start()
|
||||||
|
|
||||||
def audit_peer_group_from_system(self,
|
def audit_peer_group_from_system(
|
||||||
system_peer,
|
self, system_peer, remote_peer_group, local_peer_group
|
||||||
remote_peer_group,
|
):
|
||||||
local_peer_group):
|
LOG.info(
|
||||||
LOG.info(f"Audit peer group [{local_peer_group.peer_group_name}] "
|
f"Audit peer group [{local_peer_group.peer_group_name}] "
|
||||||
f"with remote system {system_peer.peer_name}")
|
f"with remote system {system_peer.peer_name}"
|
||||||
|
)
|
||||||
self.start(system_peer, remote_peer_group, local_peer_group)
|
self.start(system_peer, remote_peer_group, local_peer_group)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -391,21 +414,23 @@ class PeerGroupAuditManager(manager.Manager):
|
|||||||
for system in system_peers:
|
for system in system_peers:
|
||||||
try:
|
try:
|
||||||
dc_client = SystemPeerManager.get_peer_dc_client(system)
|
dc_client = SystemPeerManager.get_peer_dc_client(system)
|
||||||
payload = db_api.subcloud_peer_group_db_model_to_dict(
|
payload = db_api.subcloud_peer_group_db_model_to_dict(peer_group)
|
||||||
peer_group)
|
if "created-at" in payload:
|
||||||
if 'created-at' in payload:
|
del payload["created-at"]
|
||||||
del payload['created-at']
|
if "updated-at" in payload:
|
||||||
if 'updated-at' in payload:
|
del payload["updated-at"]
|
||||||
del payload['updated-at']
|
payload["peer_uuid"] = local_system.uuid
|
||||||
payload['peer_uuid'] = local_system.uuid
|
LOG.info(
|
||||||
LOG.info("Send audit payload [%s] of peer group %s" %
|
"Send audit payload [%s] of peer group %s"
|
||||||
(payload, peer_group.peer_group_name))
|
% (payload, peer_group.peer_group_name)
|
||||||
|
)
|
||||||
response = dc_client.audit_subcloud_peer_group(
|
response = dc_client.audit_subcloud_peer_group(
|
||||||
peer_group.peer_group_name,
|
peer_group.peer_group_name, **payload
|
||||||
**payload)
|
)
|
||||||
if response:
|
if response:
|
||||||
return response
|
return response
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Failed to send audit request for peer group "
|
LOG.exception(
|
||||||
f"{peer_group.peer_group_name} to DC: "
|
"Failed to send audit request for peer group "
|
||||||
f"{system.peer_name}")
|
f"{peer_group.peer_group_name} to DC: {system.peer_name}"
|
||||||
|
)
|
||||||
|
@ -19,7 +19,6 @@ from dcmanager.db import api as db_api
|
|||||||
from dcmanager.manager import peer_group_audit_manager as pgam
|
from dcmanager.manager import peer_group_audit_manager as pgam
|
||||||
from dcmanager.manager.system_peer_manager import SystemPeerManager
|
from dcmanager.manager.system_peer_manager import SystemPeerManager
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -46,27 +45,35 @@ class PeerMonitor(object):
|
|||||||
self.fm_api.clear_fault(alarm_id, entity_instance_id)
|
self.fm_api.clear_fault(alarm_id, entity_instance_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(
|
LOG.exception(
|
||||||
"Problem clearing fault for peer %s, alarm_id=%s "
|
"Problem clearing fault for peer %s, alarm_id=%s error: %s"
|
||||||
"error: %s" % (self.peer.peer_uuid, alarm_id, e))
|
% (self.peer.peer_uuid, alarm_id, e)
|
||||||
|
)
|
||||||
|
|
||||||
def _raise_failure(self):
|
def _raise_failure(self):
|
||||||
alarm_id = fm_const.FM_ALARM_ID_DC_SYSTEM_PEER_HEARTBEAT_FAILED
|
alarm_id = fm_const.FM_ALARM_ID_DC_SYSTEM_PEER_HEARTBEAT_FAILED
|
||||||
entity_instance_id = "peer=%s" % self.peer.peer_uuid
|
entity_instance_id = "peer=%s" % self.peer.peer_uuid
|
||||||
reason_text = ("Peer %s (peer_uuid=%s) connections in "
|
reason_text = "Peer %s (peer_uuid=%s) connections in disconnected state." % (
|
||||||
"disconnected state." % (self.peer.peer_name,
|
self.peer.peer_name,
|
||||||
self.peer.peer_uuid))
|
self.peer.peer_uuid,
|
||||||
|
)
|
||||||
severity = fm_const.FM_ALARM_SEVERITY_MAJOR
|
severity = fm_const.FM_ALARM_SEVERITY_MAJOR
|
||||||
|
|
||||||
peer_groups = db_api.subcloud_peer_group_get_by_leader_id(
|
peer_groups = db_api.subcloud_peer_group_get_by_leader_id(
|
||||||
self.context, self.peer.peer_uuid)
|
self.context, self.peer.peer_uuid
|
||||||
|
)
|
||||||
if len(peer_groups) > 0:
|
if len(peer_groups) > 0:
|
||||||
peer_group_names = [peer_group.peer_group_name
|
peer_group_names = [
|
||||||
for peer_group in peer_groups]
|
peer_group.peer_group_name for peer_group in peer_groups
|
||||||
reason_text = ("Peer %s (peer_uuid=%s) is in disconnected "
|
]
|
||||||
"state. The following subcloud peer groups "
|
reason_text = (
|
||||||
"are impacted: %s." %
|
"Peer %s (peer_uuid=%s) is in disconnected state. The following "
|
||||||
(self.peer.peer_name, self.peer.peer_uuid,
|
"subcloud peer groups are impacted: %s."
|
||||||
", ".join(peer_group_names)))
|
% (
|
||||||
|
self.peer.peer_name,
|
||||||
|
self.peer.peer_uuid,
|
||||||
|
", ".join(peer_group_names),
|
||||||
|
)
|
||||||
|
)
|
||||||
severity = fm_const.FM_ALARM_SEVERITY_CRITICAL
|
severity = fm_const.FM_ALARM_SEVERITY_CRITICAL
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -79,18 +86,22 @@ class PeerMonitor(object):
|
|||||||
reason_text=reason_text,
|
reason_text=reason_text,
|
||||||
alarm_type=fm_const.FM_ALARM_TYPE_1,
|
alarm_type=fm_const.FM_ALARM_TYPE_1,
|
||||||
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_UNKNOWN,
|
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_UNKNOWN,
|
||||||
proposed_repair_action="Check the connectivity between "
|
proposed_repair_action=(
|
||||||
"the current system and the reported peer site. If the "
|
"Check the connectivity between the current system and the "
|
||||||
"peer system is down, migrate the affected peer group(s) "
|
"reported peer site. If the peer system is down, migrate the "
|
||||||
"to the current system for continued subcloud management.",
|
"affected peer group(s) to the current system for continued "
|
||||||
service_affecting=False)
|
"subcloud management."
|
||||||
|
),
|
||||||
|
service_affecting=False,
|
||||||
|
)
|
||||||
|
|
||||||
self.fm_api.set_fault(fault)
|
self.fm_api.set_fault(fault)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(
|
LOG.exception(
|
||||||
"Problem setting fault for peer %s, alarm_id=%s, "
|
"Problem setting fault for peer %s, alarm_id=%s, error: %s"
|
||||||
"error: %s" % (self.peer.peer_uuid, alarm_id, e))
|
% (self.peer.peer_uuid, alarm_id, e)
|
||||||
|
)
|
||||||
|
|
||||||
def _heartbeat_check_via_get_peer_group_list(self):
|
def _heartbeat_check_via_get_peer_group_list(self):
|
||||||
"""Checking the heartbeat of system peer."""
|
"""Checking the heartbeat of system peer."""
|
||||||
@ -98,29 +109,28 @@ class PeerMonitor(object):
|
|||||||
dc_peer_subcloud_peer_group_list = list()
|
dc_peer_subcloud_peer_group_list = list()
|
||||||
try:
|
try:
|
||||||
dc_client = SystemPeerManager.get_peer_dc_client(self.peer)
|
dc_client = SystemPeerManager.get_peer_dc_client(self.peer)
|
||||||
dc_peer_subcloud_peer_group_list = \
|
dc_peer_subcloud_peer_group_list = dc_client.get_subcloud_peer_group_list()
|
||||||
dc_client.get_subcloud_peer_group_list()
|
|
||||||
failed = False
|
failed = False
|
||||||
|
|
||||||
if not dc_peer_subcloud_peer_group_list:
|
if not dc_peer_subcloud_peer_group_list:
|
||||||
LOG.warning("Resource subcloud peer group of dc:%s "
|
LOG.warning(
|
||||||
"not found" % self.peer.manager_endpoint)
|
"Resource subcloud peer group of dc:%s not found"
|
||||||
|
% self.peer.manager_endpoint
|
||||||
|
)
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Failed to access the dc: %s" %
|
LOG.exception("Failed to access the dc: %s" % self.peer.peer_name)
|
||||||
self.peer.peer_name)
|
|
||||||
return failed, dc_peer_subcloud_peer_group_list
|
return failed, dc_peer_subcloud_peer_group_list
|
||||||
|
|
||||||
def _update_sync_status_secondary_site_becomes_unreachable(self):
|
def _update_sync_status_secondary_site_becomes_unreachable(self):
|
||||||
# Get associations by system peer
|
# Get associations by system peer
|
||||||
associations = SystemPeerManager.get_local_associations(self.context,
|
associations = SystemPeerManager.get_local_associations(self.context, self.peer)
|
||||||
self.peer)
|
|
||||||
for association in associations:
|
for association in associations:
|
||||||
# If the association is not primary, skip it.
|
# If the association is not primary, skip it.
|
||||||
if association.association_type == consts.\
|
if association.association_type == consts.ASSOCIATION_TYPE_NON_PRIMARY:
|
||||||
ASSOCIATION_TYPE_NON_PRIMARY:
|
LOG.debug(
|
||||||
LOG.debug("Skip update the Association sync_status as "
|
"Skip update the Association sync_status as it is not primary."
|
||||||
"it is not primary.")
|
)
|
||||||
continue
|
continue
|
||||||
# If the secondary site is down, set the association sync status
|
# If the secondary site is down, set the association sync status
|
||||||
# "in-sync" -> "unknown"
|
# "in-sync" -> "unknown"
|
||||||
@ -131,24 +141,27 @@ class PeerMonitor(object):
|
|||||||
sync_status = consts.ASSOCIATION_SYNC_STATUS_UNKNOWN
|
sync_status = consts.ASSOCIATION_SYNC_STATUS_UNKNOWN
|
||||||
message = f"Peer site ({self.peer.peer_name}) is unreachable."
|
message = f"Peer site ({self.peer.peer_name}) is unreachable."
|
||||||
if association.sync_status not in [
|
if association.sync_status not in [
|
||||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||||
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN]:
|
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
|
||||||
|
]:
|
||||||
sync_status = consts.ASSOCIATION_SYNC_STATUS_FAILED
|
sync_status = consts.ASSOCIATION_SYNC_STATUS_FAILED
|
||||||
db_api.peer_group_association_update(
|
db_api.peer_group_association_update(
|
||||||
self.context, association.id,
|
self.context,
|
||||||
|
association.id,
|
||||||
sync_status=sync_status,
|
sync_status=sync_status,
|
||||||
sync_message=message)
|
sync_message=message,
|
||||||
|
)
|
||||||
|
|
||||||
def _update_sync_status_secondary_site_becomes_reachable(self):
|
def _update_sync_status_secondary_site_becomes_reachable(self):
|
||||||
# Get associations by system peer
|
# Get associations by system peer
|
||||||
associations = SystemPeerManager.get_local_associations(self.context,
|
associations = SystemPeerManager.get_local_associations(self.context, self.peer)
|
||||||
self.peer)
|
|
||||||
for association in associations:
|
for association in associations:
|
||||||
# If the association is not primary, skip it.
|
# If the association is not primary, skip it.
|
||||||
if association.association_type == consts.\
|
if association.association_type == consts.ASSOCIATION_TYPE_NON_PRIMARY:
|
||||||
ASSOCIATION_TYPE_NON_PRIMARY:
|
LOG.debug(
|
||||||
LOG.debug("Skip update Peer Site Association sync_status as "
|
"Skip update Peer Site Association sync_status as "
|
||||||
"current site Association is not primary.")
|
"current site Association is not primary."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
# Upon detecting that the secondary site is reachable again,
|
# Upon detecting that the secondary site is reachable again,
|
||||||
# the PGA sync_status will be set for both sites by the primary
|
# the PGA sync_status will be set for both sites by the primary
|
||||||
@ -156,37 +169,43 @@ class PeerMonitor(object):
|
|||||||
# "unknown" -> "in-sync"
|
# "unknown" -> "in-sync"
|
||||||
# "failed" -> "out-of-sync"
|
# "failed" -> "out-of-sync"
|
||||||
sync_status = consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
|
sync_status = consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
|
||||||
if association.sync_status == \
|
if association.sync_status == consts.ASSOCIATION_SYNC_STATUS_UNKNOWN:
|
||||||
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN:
|
|
||||||
sync_status = consts.ASSOCIATION_SYNC_STATUS_IN_SYNC
|
sync_status = consts.ASSOCIATION_SYNC_STATUS_IN_SYNC
|
||||||
dc_local_pg = db_api.subcloud_peer_group_get(
|
dc_local_pg = db_api.subcloud_peer_group_get(
|
||||||
self.context, association.peer_group_id)
|
self.context, association.peer_group_id
|
||||||
|
)
|
||||||
SystemPeerManager.update_sync_status(
|
SystemPeerManager.update_sync_status(
|
||||||
self.context, self.peer, sync_status, dc_local_pg,
|
self.context,
|
||||||
association=association)
|
self.peer,
|
||||||
|
sync_status,
|
||||||
|
dc_local_pg,
|
||||||
|
association=association,
|
||||||
|
)
|
||||||
|
|
||||||
def _do_monitor_peer(self):
|
def _do_monitor_peer(self):
|
||||||
failure_count = 0
|
failure_count = 0
|
||||||
LOG.info("Start monitoring thread for peer %s" %
|
LOG.info("Start monitoring thread for peer %s" % self.peer.peer_name)
|
||||||
self.peer.peer_name)
|
UNAVAILABLE_STATE = consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE
|
||||||
|
AVAILABLE_STATE = consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE
|
||||||
# Do the actual peer monitor.
|
# Do the actual peer monitor.
|
||||||
while not self.exit_flag.wait(timeout=self.peer.heartbeat_interval):
|
while not self.exit_flag.wait(timeout=self.peer.heartbeat_interval):
|
||||||
try:
|
try:
|
||||||
# Get system peer from DB
|
# Get system peer from DB
|
||||||
self.peer = db_api.system_peer_get(self.context, self.peer.id)
|
self.peer = db_api.system_peer_get(self.context, self.peer.id)
|
||||||
failed, remote_pg_list = \
|
failed, remote_pg_list = self._heartbeat_check_via_get_peer_group_list()
|
||||||
self._heartbeat_check_via_get_peer_group_list()
|
|
||||||
if failed:
|
if failed:
|
||||||
failure_count += 1
|
failure_count += 1
|
||||||
if failure_count >= self.peer.heartbeat_failure_threshold:
|
if failure_count >= self.peer.heartbeat_failure_threshold:
|
||||||
# heartbeat_failure_threshold reached.
|
# heartbeat_failure_threshold reached.
|
||||||
LOG.warning("DC %s heartbeat failed, Raising alarm" %
|
LOG.warning(
|
||||||
self.peer.peer_name)
|
"DC %s heartbeat failed, Raising alarm"
|
||||||
|
% self.peer.peer_name
|
||||||
|
)
|
||||||
self._raise_failure()
|
self._raise_failure()
|
||||||
db_api.system_peer_update(
|
db_api.system_peer_update(
|
||||||
self.context, self.peer.id,
|
self.context,
|
||||||
availability_state= # noqa: E251
|
self.peer.id,
|
||||||
consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE
|
availability_state=UNAVAILABLE_STATE,
|
||||||
)
|
)
|
||||||
# pylint: disable=line-too-long
|
# pylint: disable=line-too-long
|
||||||
self._update_sync_status_secondary_site_becomes_unreachable()
|
self._update_sync_status_secondary_site_becomes_unreachable()
|
||||||
@ -195,23 +214,24 @@ class PeerMonitor(object):
|
|||||||
else:
|
else:
|
||||||
failure_count = 0
|
failure_count = 0
|
||||||
self._audit_local_peer_groups(remote_pg_list)
|
self._audit_local_peer_groups(remote_pg_list)
|
||||||
if self.peer.availability_state != \
|
if self.peer.availability_state != AVAILABLE_STATE:
|
||||||
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE:
|
|
||||||
db_api.system_peer_update(
|
db_api.system_peer_update(
|
||||||
self.context, self.peer.id,
|
self.context,
|
||||||
availability_state= # noqa: E251
|
self.peer.id,
|
||||||
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE
|
availability_state=AVAILABLE_STATE,
|
||||||
)
|
)
|
||||||
# pylint: disable=line-too-long
|
# pylint: disable=line-too-long
|
||||||
self._update_sync_status_secondary_site_becomes_reachable()
|
self._update_sync_status_secondary_site_becomes_reachable()
|
||||||
LOG.info("DC %s back online, clear alarm" %
|
LOG.info("DC %s back online, clear alarm" % self.peer.peer_name)
|
||||||
self.peer.peer_name)
|
|
||||||
self._clear_failure()
|
self._clear_failure()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception("Got exception monitoring peer %s error: %s" %
|
LOG.exception(
|
||||||
(self.peer.peer_name, e))
|
"Got exception monitoring peer %s error: %s"
|
||||||
LOG.info("Caught graceful exit signal for peer monitor %s" %
|
% (self.peer.peer_name, e)
|
||||||
self.peer.peer_name)
|
)
|
||||||
|
LOG.info(
|
||||||
|
"Caught graceful exit signal for peer monitor %s" % self.peer.peer_name
|
||||||
|
)
|
||||||
|
|
||||||
def _audit_local_peer_groups(self, remote_pg_list):
|
def _audit_local_peer_groups(self, remote_pg_list):
|
||||||
# Generate a dict index by remote peer group name
|
# Generate a dict index by remote peer group name
|
||||||
@ -222,21 +242,25 @@ class PeerMonitor(object):
|
|||||||
|
|
||||||
# Only audit peer groups existing on both side
|
# Only audit peer groups existing on both side
|
||||||
for peer_group_id, pgam_obj in self.peer_group_audit_obj_map.items():
|
for peer_group_id, pgam_obj in self.peer_group_audit_obj_map.items():
|
||||||
peer_group = db_api.subcloud_peer_group_get(self.context,
|
peer_group = db_api.subcloud_peer_group_get(self.context, peer_group_id)
|
||||||
peer_group_id)
|
|
||||||
if peer_group.peer_group_name in remote_pg_dict:
|
if peer_group.peer_group_name in remote_pg_dict:
|
||||||
remote_peer_group = remote_pg_dict[peer_group.peer_group_name]
|
remote_peer_group = remote_pg_dict[peer_group.peer_group_name]
|
||||||
# Audit for require_audit_flag is True or
|
# Audit for require_audit_flag is True or
|
||||||
# Remote peer group is in 'complete' state.
|
# Remote peer group is in 'complete' state.
|
||||||
if (pgam_obj.require_audit_flag
|
if (
|
||||||
or remote_peer_group.get("migration_status") ==
|
pgam_obj.require_audit_flag
|
||||||
consts.PEER_GROUP_MIGRATION_COMPLETE):
|
or remote_peer_group.get("migration_status")
|
||||||
|
== consts.PEER_GROUP_MIGRATION_COMPLETE
|
||||||
|
):
|
||||||
pgam_obj.audit_peer_group_from_system(
|
pgam_obj.audit_peer_group_from_system(
|
||||||
self.peer, remote_peer_group, peer_group)
|
self.peer, remote_peer_group, peer_group
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
LOG.warning("peer group %s not found on remote DC %s "
|
LOG.warning(
|
||||||
"nothing to audit, need sync operation" %
|
"peer group %s not found on remote DC %s "
|
||||||
(peer_group.peer_group_name, self.peer.peer_name))
|
"nothing to audit, need sync operation"
|
||||||
|
% (peer_group.peer_group_name, self.peer.peer_name)
|
||||||
|
)
|
||||||
|
|
||||||
def _set_require_audit_flag_to_associated_peer_groups(self):
|
def _set_require_audit_flag_to_associated_peer_groups(self):
|
||||||
for pgam_obj in self.peer_group_audit_obj_map.values():
|
for pgam_obj in self.peer_group_audit_obj_map.values():
|
||||||
@ -248,7 +272,7 @@ class PeerMonitor(object):
|
|||||||
pgam_obj = self.peer_group_audit_obj_map[peer_group.id]
|
pgam_obj = self.peer_group_audit_obj_map[peer_group.id]
|
||||||
pgam_obj.audit(self.peer, remote_peer_group, peer_group)
|
pgam_obj.audit(self.peer, remote_peer_group, peer_group)
|
||||||
else:
|
else:
|
||||||
msg = ("No peer group id %s found" % peer_group.peer_group_name)
|
msg = "No peer group id %s found" % peer_group.peer_group_name
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
def _clean_peer_group_audit_threads(self):
|
def _clean_peer_group_audit_threads(self):
|
||||||
@ -262,25 +286,30 @@ class PeerMonitor(object):
|
|||||||
|
|
||||||
# destroy removed peer_group audit object
|
# destroy removed peer_group audit object
|
||||||
for peer_group_id in removed_peer_groups:
|
for peer_group_id in removed_peer_groups:
|
||||||
LOG.info("Peer group [%s] removed from peer [%s]" %
|
LOG.info(
|
||||||
(peer_group_id, self.peer.peer_name))
|
"Peer group [%s] removed from peer [%s]"
|
||||||
|
% (peer_group_id, self.peer.peer_name)
|
||||||
|
)
|
||||||
if peer_group_id in self.peer_group_audit_obj_map:
|
if peer_group_id in self.peer_group_audit_obj_map:
|
||||||
self.peer_group_audit_obj_map[peer_group_id].stop()
|
self.peer_group_audit_obj_map[peer_group_id].stop()
|
||||||
del self.peer_group_audit_obj_map[peer_group_id]
|
del self.peer_group_audit_obj_map[peer_group_id]
|
||||||
# Add new peer_group audit object
|
# Add new peer_group audit object
|
||||||
for peer_group_id in new_peer_groups:
|
for peer_group_id in new_peer_groups:
|
||||||
LOG.info("New peer group [%s] found for peer [%s]" %
|
LOG.info(
|
||||||
(peer_group_id, self.peer.peer_name))
|
"New peer group [%s] found for peer [%s]"
|
||||||
self.peer_group_audit_obj_map[peer_group_id] = \
|
% (peer_group_id, self.peer.peer_name)
|
||||||
pgam.PeerGroupAuditManager(self.subcloud_manager,
|
)
|
||||||
peer_group_id)
|
self.peer_group_audit_obj_map[peer_group_id] = pgam.PeerGroupAuditManager(
|
||||||
|
self.subcloud_manager, peer_group_id
|
||||||
|
)
|
||||||
self.peer_group_id_set = peer_group_id_set
|
self.peer_group_id_set = peer_group_id_set
|
||||||
self._set_require_audit_flag_to_associated_peer_groups()
|
self._set_require_audit_flag_to_associated_peer_groups()
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
if self.thread is not None:
|
if self.thread is not None:
|
||||||
LOG.error('Peer monitor thread for %s has already started' %
|
LOG.error(
|
||||||
self.peer.peer_name)
|
"Peer monitor thread for %s has already started" % self.peer.peer_name
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.thread = threading.Thread(target=self._do_monitor_peer)
|
self.thread = threading.Thread(target=self._do_monitor_peer)
|
||||||
self.thread.start()
|
self.thread.start()
|
||||||
@ -296,10 +325,9 @@ class PeerMonitorManager(manager.Manager):
|
|||||||
"""Manages tasks related to peer monitor."""
|
"""Manages tasks related to peer monitor."""
|
||||||
|
|
||||||
def __init__(self, subcloud_manager):
|
def __init__(self, subcloud_manager):
|
||||||
LOG.debug('PeerMonitorManager initialization...')
|
LOG.debug("PeerMonitorManager initialization...")
|
||||||
|
|
||||||
super(PeerMonitorManager, self).__init__(
|
super(PeerMonitorManager, self).__init__(service_name="peer_monitor_manager")
|
||||||
service_name="peer_monitor_manager")
|
|
||||||
self.peer_monitor = dict()
|
self.peer_monitor = dict()
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
self.subcloud_manager = subcloud_manager
|
self.subcloud_manager = subcloud_manager
|
||||||
@ -314,12 +342,11 @@ class PeerMonitorManager(manager.Manager):
|
|||||||
del self.peer_monitor_thread_map[system_peer_id]
|
del self.peer_monitor_thread_map[system_peer_id]
|
||||||
|
|
||||||
def _create_peer_monitor_task(self, system_peer_id):
|
def _create_peer_monitor_task(self, system_peer_id):
|
||||||
peer = db_api.system_peer_get(self.context,
|
peer = db_api.system_peer_get(self.context, system_peer_id)
|
||||||
system_peer_id)
|
LOG.info("Create monitoring thread for peer: %s" % peer.peer_name)
|
||||||
LOG.info("Create monitoring thread for peer: %s" %
|
|
||||||
peer.peer_name)
|
|
||||||
self.peer_monitor_thread_map[system_peer_id] = PeerMonitor(
|
self.peer_monitor_thread_map[system_peer_id] = PeerMonitor(
|
||||||
peer, self.context, self.subcloud_manager)
|
peer, self.context, self.subcloud_manager
|
||||||
|
)
|
||||||
self.peer_monitor_thread_map[system_peer_id].start()
|
self.peer_monitor_thread_map[system_peer_id].start()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -327,10 +354,12 @@ class PeerMonitorManager(manager.Manager):
|
|||||||
return {key: value for key, value in dict1.items() if key not in dict2}
|
return {key: value for key, value in dict1.items() if key not in dict2}
|
||||||
|
|
||||||
def _create_or_destroy_peer_monitor_task(self, peer_system_peer_group_map):
|
def _create_or_destroy_peer_monitor_task(self, peer_system_peer_group_map):
|
||||||
new_peers = self._diff_dict(peer_system_peer_group_map,
|
new_peers = self._diff_dict(
|
||||||
self.peer_monitor_thread_map)
|
peer_system_peer_group_map, self.peer_monitor_thread_map
|
||||||
removed_peers = self._diff_dict(self.peer_monitor_thread_map,
|
)
|
||||||
peer_system_peer_group_map)
|
removed_peers = self._diff_dict(
|
||||||
|
self.peer_monitor_thread_map, peer_system_peer_group_map
|
||||||
|
)
|
||||||
for peer_id in new_peers:
|
for peer_id in new_peers:
|
||||||
self._create_peer_monitor_task(peer_id)
|
self._create_peer_monitor_task(peer_id)
|
||||||
for peer_id in removed_peers:
|
for peer_id in removed_peers:
|
||||||
@ -338,8 +367,7 @@ class PeerMonitorManager(manager.Manager):
|
|||||||
|
|
||||||
# Update peer_group_id set
|
# Update peer_group_id set
|
||||||
for peer_id, pm_obj in self.peer_monitor_thread_map.items():
|
for peer_id, pm_obj in self.peer_monitor_thread_map.items():
|
||||||
pm_obj.update_peer_group_id_set(
|
pm_obj.update_peer_group_id_set(peer_system_peer_group_map[peer_id])
|
||||||
peer_system_peer_group_map[peer_id])
|
|
||||||
|
|
||||||
def peer_monitor_notify(self, context):
|
def peer_monitor_notify(self, context):
|
||||||
LOG.info("Caught peer monitor notify...")
|
LOG.info("Caught peer monitor notify...")
|
||||||
@ -348,31 +376,32 @@ class PeerMonitorManager(manager.Manager):
|
|||||||
associations = db_api.peer_group_association_get_all(context)
|
associations = db_api.peer_group_association_get_all(context)
|
||||||
for association in associations:
|
for association in associations:
|
||||||
peer_system_peer_group_map[association.system_peer_id].add(
|
peer_system_peer_group_map[association.system_peer_id].add(
|
||||||
association.peer_group_id)
|
association.peer_group_id
|
||||||
|
)
|
||||||
|
|
||||||
self._create_or_destroy_peer_monitor_task(peer_system_peer_group_map)
|
self._create_or_destroy_peer_monitor_task(peer_system_peer_group_map)
|
||||||
|
|
||||||
def peer_group_audit_notify(self, context, peer_group_name, payload):
|
def peer_group_audit_notify(self, context, peer_group_name, payload):
|
||||||
LOG.info("Caught peer group audit notification for peer group %s" %
|
LOG.info(
|
||||||
peer_group_name)
|
"Caught peer group audit notification for peer group %s" % peer_group_name
|
||||||
|
)
|
||||||
msg = None
|
msg = None
|
||||||
try:
|
try:
|
||||||
peer_group = db_api.subcloud_peer_group_get_by_name(
|
peer_group = db_api.subcloud_peer_group_get_by_name(
|
||||||
context, peer_group_name)
|
context, peer_group_name
|
||||||
system_uuid = payload.get('peer_uuid')
|
)
|
||||||
system_peer = db_api.system_peer_get_by_uuid(context,
|
system_uuid = payload.get("peer_uuid")
|
||||||
system_uuid)
|
system_peer = db_api.system_peer_get_by_uuid(context, system_uuid)
|
||||||
if system_peer.id in self.peer_monitor_thread_map:
|
if system_peer.id in self.peer_monitor_thread_map:
|
||||||
pmobj = self.peer_monitor_thread_map[system_peer.id]
|
pmobj = self.peer_monitor_thread_map[system_peer.id]
|
||||||
msg = pmobj.audit_specific_local_peer_group(peer_group,
|
msg = pmobj.audit_specific_local_peer_group(peer_group, payload)
|
||||||
payload)
|
|
||||||
else:
|
else:
|
||||||
msg = ("System peer with UUID=%s is not under monitoring. "
|
msg = (
|
||||||
"Skipping audit for peer group %s" %
|
"System peer with UUID=%s is not under monitoring. "
|
||||||
(system_uuid, peer_group_name))
|
"Skipping audit for peer group %s" % (system_uuid, peer_group_name)
|
||||||
|
)
|
||||||
LOG.warning(msg)
|
LOG.warning(msg)
|
||||||
return msg
|
return msg
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception('Handling peer group audit notify error: %s' %
|
LOG.exception("Handling peer group audit notify error: %s" % str(e))
|
||||||
str(e))
|
|
||||||
return str(e)
|
return str(e)
|
||||||
|
@ -46,9 +46,11 @@ LOG = logging.getLogger(__name__)
|
|||||||
# run multiple operations in parallel past the RPC limit.
|
# run multiple operations in parallel past the RPC limit.
|
||||||
def run_in_thread(fn):
|
def run_in_thread(fn):
|
||||||
"""Decorator to run a function in a separate thread."""
|
"""Decorator to run a function in a separate thread."""
|
||||||
|
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
|
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
|
||||||
thread.start()
|
thread.start()
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
@ -101,9 +103,9 @@ class DCManagerService(service.Service):
|
|||||||
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
||||||
self.dcmanager_id = uuidutils.generate_uuid()
|
self.dcmanager_id = uuidutils.generate_uuid()
|
||||||
self.init_managers()
|
self.init_managers()
|
||||||
target = oslo_messaging.Target(version=self.rpc_api_version,
|
target = oslo_messaging.Target(
|
||||||
server=self.host,
|
version=self.rpc_api_version, server=self.host, topic=self.topic
|
||||||
topic=self.topic)
|
)
|
||||||
self.target = target
|
self.target = target
|
||||||
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
||||||
self._rpc_server.start()
|
self._rpc_server.start()
|
||||||
@ -127,14 +129,15 @@ class DCManagerService(service.Service):
|
|||||||
@request_context
|
@request_context
|
||||||
def add_subcloud(self, context, subcloud_id, payload):
|
def add_subcloud(self, context, subcloud_id, payload):
|
||||||
# Adds a subcloud
|
# Adds a subcloud
|
||||||
LOG.info("Handling add_subcloud request for: %s" % payload.get('name'))
|
LOG.info("Handling add_subcloud request for: %s" % payload.get("name"))
|
||||||
return self.subcloud_manager.add_subcloud(context, subcloud_id, payload)
|
return self.subcloud_manager.add_subcloud(context, subcloud_id, payload)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def add_secondary_subcloud(self, context, subcloud_id, payload):
|
def add_secondary_subcloud(self, context, subcloud_id, payload):
|
||||||
# Adds a secondary subcloud
|
# Adds a secondary subcloud
|
||||||
LOG.info("Handling add_secondary_subcloud request for: %s" %
|
LOG.info(
|
||||||
payload.get('name'))
|
"Handling add_secondary_subcloud request for: %s" % payload.get("name")
|
||||||
|
)
|
||||||
return self.subcloud_manager.add_subcloud(context, subcloud_id, payload)
|
return self.subcloud_manager.add_subcloud(context, subcloud_id, payload)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
@ -144,22 +147,23 @@ class DCManagerService(service.Service):
|
|||||||
return self.subcloud_manager.delete_subcloud(context, subcloud_id)
|
return self.subcloud_manager.delete_subcloud(context, subcloud_id)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def rename_subcloud(self, context, subcloud_id, curr_subcloud_name,
|
def rename_subcloud(
|
||||||
new_subcloud_name=None):
|
self, context, subcloud_id, curr_subcloud_name, new_subcloud_name=None
|
||||||
|
):
|
||||||
# Rename a subcloud
|
# Rename a subcloud
|
||||||
LOG.info("Handling rename_subcloud request for: %s" %
|
LOG.info("Handling rename_subcloud request for: %s" % curr_subcloud_name)
|
||||||
curr_subcloud_name)
|
subcloud = self.subcloud_manager.rename_subcloud(
|
||||||
subcloud = self.subcloud_manager.rename_subcloud(context,
|
context, subcloud_id, curr_subcloud_name, new_subcloud_name
|
||||||
subcloud_id,
|
)
|
||||||
curr_subcloud_name,
|
|
||||||
new_subcloud_name)
|
|
||||||
return subcloud
|
return subcloud
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def get_subcloud_name_by_region_name(self, context, subcloud_region):
|
def get_subcloud_name_by_region_name(self, context, subcloud_region):
|
||||||
# get subcloud by region name
|
# get subcloud by region name
|
||||||
LOG.debug("Handling get_subcloud_name_by_region_name request for "
|
LOG.debug(
|
||||||
"region: %s" % subcloud_region)
|
"Handling get_subcloud_name_by_region_name request for region: %s"
|
||||||
|
% subcloud_region
|
||||||
|
)
|
||||||
subcloud = self.subcloud_manager.get_subcloud_name_by_region_name(
|
subcloud = self.subcloud_manager.get_subcloud_name_by_region_name(
|
||||||
context, subcloud_region
|
context, subcloud_region
|
||||||
)
|
)
|
||||||
@ -167,128 +171,156 @@ class DCManagerService(service.Service):
|
|||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def update_subcloud(
|
def update_subcloud(
|
||||||
self, context, subcloud_id, management_state=None, description=None,
|
self,
|
||||||
location=None, group_id=None, data_install=None, force=None,
|
context,
|
||||||
deploy_status=None, peer_group_id=None, bootstrap_values=None,
|
subcloud_id,
|
||||||
bootstrap_address=None
|
management_state=None,
|
||||||
|
description=None,
|
||||||
|
location=None,
|
||||||
|
group_id=None,
|
||||||
|
data_install=None,
|
||||||
|
force=None,
|
||||||
|
deploy_status=None,
|
||||||
|
peer_group_id=None,
|
||||||
|
bootstrap_values=None,
|
||||||
|
bootstrap_address=None,
|
||||||
):
|
):
|
||||||
# Updates a subcloud
|
# Updates a subcloud
|
||||||
LOG.info("Handling update_subcloud request for: %s" % subcloud_id)
|
LOG.info("Handling update_subcloud request for: %s" % subcloud_id)
|
||||||
subcloud = self.subcloud_manager.update_subcloud(context, subcloud_id,
|
subcloud = self.subcloud_manager.update_subcloud(
|
||||||
management_state,
|
context,
|
||||||
description,
|
subcloud_id,
|
||||||
location,
|
management_state,
|
||||||
group_id,
|
description,
|
||||||
data_install,
|
location,
|
||||||
force,
|
group_id,
|
||||||
deploy_status,
|
data_install,
|
||||||
peer_group_id,
|
force,
|
||||||
bootstrap_values,
|
deploy_status,
|
||||||
bootstrap_address)
|
peer_group_id,
|
||||||
|
bootstrap_values,
|
||||||
|
bootstrap_address,
|
||||||
|
)
|
||||||
return subcloud
|
return subcloud
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def update_subcloud_with_network_reconfig(self, context, subcloud_id, payload):
|
def update_subcloud_with_network_reconfig(self, context, subcloud_id, payload):
|
||||||
LOG.info("Handling update_subcloud_with_network_reconfig request for: %s",
|
LOG.info(
|
||||||
subcloud_id)
|
"Handling update_subcloud_with_network_reconfig request for: %s",
|
||||||
|
subcloud_id,
|
||||||
|
)
|
||||||
return self.subcloud_manager.update_subcloud_with_network_reconfig(
|
return self.subcloud_manager.update_subcloud_with_network_reconfig(
|
||||||
context, subcloud_id, payload)
|
context, subcloud_id, payload
|
||||||
|
)
|
||||||
|
|
||||||
@run_in_thread
|
@run_in_thread
|
||||||
@request_context
|
@request_context
|
||||||
def redeploy_subcloud(self, context, subcloud_id, payload):
|
def redeploy_subcloud(self, context, subcloud_id, payload):
|
||||||
# Redeploy a subcloud
|
# Redeploy a subcloud
|
||||||
LOG.info("Handling redeploy_subcloud request for: %s" % subcloud_id)
|
LOG.info("Handling redeploy_subcloud request for: %s" % subcloud_id)
|
||||||
return self.subcloud_manager.redeploy_subcloud(context,
|
return self.subcloud_manager.redeploy_subcloud(context, subcloud_id, payload)
|
||||||
subcloud_id,
|
|
||||||
payload)
|
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def backup_subclouds(self, context, payload):
|
def backup_subclouds(self, context, payload):
|
||||||
# Backup a subcloud or group of subclouds
|
# Backup a subcloud or group of subclouds
|
||||||
entity = 'subcloud' if payload.get('subcloud') else 'group'
|
entity = "subcloud" if payload.get("subcloud") else "group"
|
||||||
LOG.info("Handling backup_subclouds request for %s ID: %s" %
|
LOG.info(
|
||||||
(entity, (payload.get('subcloud') or payload.get('group'))))
|
"Handling backup_subclouds request for %s ID: %s"
|
||||||
|
% (entity, (payload.get("subcloud") or payload.get("group")))
|
||||||
|
)
|
||||||
return self.subcloud_manager.create_subcloud_backups(context, payload)
|
return self.subcloud_manager.create_subcloud_backups(context, payload)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def delete_subcloud_backups(self, context, release_version, payload):
|
def delete_subcloud_backups(self, context, release_version, payload):
|
||||||
# Delete backup on subcloud or group of subclouds
|
# Delete backup on subcloud or group of subclouds
|
||||||
entity = 'subcloud' if payload.get('subcloud') else 'group'
|
entity = "subcloud" if payload.get("subcloud") else "group"
|
||||||
LOG.info("Handling delete_subcloud_backups request for %s ID: %s" %
|
LOG.info(
|
||||||
(entity, (payload.get('subcloud') or payload.get('group'))))
|
"Handling delete_subcloud_backups request for %s ID: %s"
|
||||||
return self.subcloud_manager.delete_subcloud_backups(context,
|
% (entity, (payload.get("subcloud") or payload.get("group")))
|
||||||
release_version,
|
)
|
||||||
payload)
|
return self.subcloud_manager.delete_subcloud_backups(
|
||||||
|
context, release_version, payload
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def restore_subcloud_backups(self, context, payload):
|
def restore_subcloud_backups(self, context, payload):
|
||||||
# Restore a subcloud backup or a group of subclouds backups
|
# Restore a subcloud backup or a group of subclouds backups
|
||||||
entity = 'subcloud' if payload.get('subcloud') else 'group'
|
entity = "subcloud" if payload.get("subcloud") else "group"
|
||||||
LOG.info("Handling restore_subcloud_backups request for %s ID: %s" %
|
LOG.info(
|
||||||
(entity, (payload.get('subcloud') or payload.get('group'))))
|
"Handling restore_subcloud_backups request for %s ID: %s"
|
||||||
|
% (entity, (payload.get("subcloud") or payload.get("group")))
|
||||||
|
)
|
||||||
return self.subcloud_manager.restore_subcloud_backups(context, payload)
|
return self.subcloud_manager.restore_subcloud_backups(context, payload)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def update_subcloud_sync_endpoint_type(self, context, subcloud_name,
|
def update_subcloud_sync_endpoint_type(
|
||||||
endpoint_type_list,
|
self, context, subcloud_name, endpoint_type_list, openstack_installed
|
||||||
openstack_installed):
|
):
|
||||||
# Updates subcloud sync endpoint type
|
# Updates subcloud sync endpoint type
|
||||||
LOG.info("Handling update_subcloud_sync_endpoint_type request for: %s"
|
LOG.info(
|
||||||
% subcloud_name)
|
"Handling update_subcloud_sync_endpoint_type request for: %s"
|
||||||
|
% subcloud_name
|
||||||
|
)
|
||||||
self.subcloud_manager.update_subcloud_sync_endpoint_type(
|
self.subcloud_manager.update_subcloud_sync_endpoint_type(
|
||||||
context, subcloud_name, endpoint_type_list, openstack_installed)
|
context, subcloud_name, endpoint_type_list, openstack_installed
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def prestage_subcloud(self, context, payload):
|
def prestage_subcloud(self, context, payload):
|
||||||
LOG.info("Handling prestage_subcloud request for: %s",
|
LOG.info("Handling prestage_subcloud request for: %s", payload["subcloud_name"])
|
||||||
payload['subcloud_name'])
|
|
||||||
return self.subcloud_manager.prestage_subcloud(context, payload)
|
return self.subcloud_manager.prestage_subcloud(context, payload)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def subcloud_deploy_create(self, context, subcloud_id, payload):
|
def subcloud_deploy_create(self, context, subcloud_id, payload):
|
||||||
# Adds a subcloud
|
# Adds a subcloud
|
||||||
LOG.info("Handling subcloud_deploy_create request for: %s" %
|
LOG.info(
|
||||||
payload.get('name'))
|
"Handling subcloud_deploy_create request for: %s" % payload.get("name")
|
||||||
return self.subcloud_manager.subcloud_deploy_create(context,
|
)
|
||||||
subcloud_id,
|
return self.subcloud_manager.subcloud_deploy_create(
|
||||||
payload)
|
context, subcloud_id, payload
|
||||||
|
)
|
||||||
|
|
||||||
@run_in_thread
|
@run_in_thread
|
||||||
@request_context
|
@request_context
|
||||||
def subcloud_deploy_bootstrap(self, context, subcloud_id, payload,
|
def subcloud_deploy_bootstrap(
|
||||||
initial_deployment):
|
self, context, subcloud_id, payload, initial_deployment
|
||||||
|
):
|
||||||
# Bootstraps a subcloud
|
# Bootstraps a subcloud
|
||||||
LOG.info("Handling subcloud_deploy_bootstrap request for: %s" %
|
LOG.info(
|
||||||
payload.get('name'))
|
"Handling subcloud_deploy_bootstrap request for: %s" % payload.get("name")
|
||||||
|
)
|
||||||
return self.subcloud_manager.subcloud_deploy_bootstrap(
|
return self.subcloud_manager.subcloud_deploy_bootstrap(
|
||||||
context, subcloud_id, payload, initial_deployment)
|
context, subcloud_id, payload, initial_deployment
|
||||||
|
)
|
||||||
|
|
||||||
@run_in_thread
|
@run_in_thread
|
||||||
@request_context
|
@request_context
|
||||||
def subcloud_deploy_config(self, context, subcloud_id, payload,
|
def subcloud_deploy_config(self, context, subcloud_id, payload, initial_deployment):
|
||||||
initial_deployment):
|
|
||||||
# Configures a subcloud
|
# Configures a subcloud
|
||||||
LOG.info("Handling subcloud_deploy_config request for: %s" % subcloud_id)
|
LOG.info("Handling subcloud_deploy_config request for: %s" % subcloud_id)
|
||||||
return self.subcloud_manager.subcloud_deploy_config(
|
return self.subcloud_manager.subcloud_deploy_config(
|
||||||
context, subcloud_id, payload, initial_deployment)
|
context, subcloud_id, payload, initial_deployment
|
||||||
|
)
|
||||||
|
|
||||||
@run_in_thread
|
@run_in_thread
|
||||||
@request_context
|
@request_context
|
||||||
def subcloud_deploy_install(self, context, subcloud_id, payload,
|
def subcloud_deploy_install(
|
||||||
initial_deployment):
|
self, context, subcloud_id, payload, initial_deployment
|
||||||
|
):
|
||||||
# Install a subcloud
|
# Install a subcloud
|
||||||
LOG.info("Handling subcloud_deploy_install request for: %s" % subcloud_id)
|
LOG.info("Handling subcloud_deploy_install request for: %s" % subcloud_id)
|
||||||
return self.subcloud_manager.subcloud_deploy_install(
|
return self.subcloud_manager.subcloud_deploy_install(
|
||||||
context, subcloud_id, payload, initial_deployment)
|
context, subcloud_id, payload, initial_deployment
|
||||||
|
)
|
||||||
|
|
||||||
@run_in_thread
|
@run_in_thread
|
||||||
@request_context
|
@request_context
|
||||||
def subcloud_deploy_enroll(self, context, subcloud_id, payload):
|
def subcloud_deploy_enroll(self, context, subcloud_id, payload):
|
||||||
# Enroll a subcloud
|
# Enroll a subcloud
|
||||||
LOG.info(f'Handling subcloud_deploy_enroll request for: {subcloud_id}')
|
LOG.info(f"Handling subcloud_deploy_enroll request for: {subcloud_id}")
|
||||||
return self.subcloud_manager.subcloud_deploy_enroll(
|
return self.subcloud_manager.subcloud_deploy_enroll(
|
||||||
context, subcloud_id, payload)
|
context, subcloud_id, payload
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def subcloud_deploy_complete(self, context, subcloud_id):
|
def subcloud_deploy_complete(self, context, subcloud_id):
|
||||||
@ -301,26 +333,27 @@ class DCManagerService(service.Service):
|
|||||||
def subcloud_deploy_abort(self, context, subcloud_id, deploy_status):
|
def subcloud_deploy_abort(self, context, subcloud_id, deploy_status):
|
||||||
# Abort the subcloud deployment
|
# Abort the subcloud deployment
|
||||||
LOG.info("Handling subcloud_deploy_abort request for: %s" % subcloud_id)
|
LOG.info("Handling subcloud_deploy_abort request for: %s" % subcloud_id)
|
||||||
return self.subcloud_manager.subcloud_deploy_abort(context,
|
return self.subcloud_manager.subcloud_deploy_abort(
|
||||||
subcloud_id,
|
context, subcloud_id, deploy_status
|
||||||
deploy_status)
|
)
|
||||||
|
|
||||||
@run_in_thread
|
@run_in_thread
|
||||||
@request_context
|
@request_context
|
||||||
def subcloud_deploy_resume(self, context, subcloud_id, subcloud_name,
|
def subcloud_deploy_resume(
|
||||||
payload, deploy_states_to_run):
|
self, context, subcloud_id, subcloud_name, payload, deploy_states_to_run
|
||||||
|
):
|
||||||
# Adds a subcloud
|
# Adds a subcloud
|
||||||
LOG.info("Handling subcloud_deploy_resume request for: %s" % subcloud_name)
|
LOG.info("Handling subcloud_deploy_resume request for: %s" % subcloud_name)
|
||||||
return self.subcloud_manager.subcloud_deploy_resume(context,
|
return self.subcloud_manager.subcloud_deploy_resume(
|
||||||
subcloud_id,
|
context, subcloud_id, subcloud_name, payload, deploy_states_to_run
|
||||||
subcloud_name,
|
)
|
||||||
payload,
|
|
||||||
deploy_states_to_run)
|
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def batch_migrate_subcloud(self, context, payload):
|
def batch_migrate_subcloud(self, context, payload):
|
||||||
LOG.info("Handling batch_migrate_subcloud request for peer_group: %s",
|
LOG.info(
|
||||||
payload['peer_group'])
|
"Handling batch_migrate_subcloud request for peer_group: %s",
|
||||||
|
payload["peer_group"],
|
||||||
|
)
|
||||||
return self.subcloud_manager.batch_migrate_subcloud(context, payload)
|
return self.subcloud_manager.batch_migrate_subcloud(context, payload)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
@ -330,44 +363,62 @@ class DCManagerService(service.Service):
|
|||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def peer_group_audit_notify(self, context, peer_group_name, payload):
|
def peer_group_audit_notify(self, context, peer_group_name, payload):
|
||||||
LOG.info("Handling peer group audit notify of peer group "
|
LOG.info("Handling peer group audit notify of peer group {peer_group_name}")
|
||||||
f"{peer_group_name}")
|
|
||||||
return self.peer_monitor_manager.peer_group_audit_notify(
|
return self.peer_monitor_manager.peer_group_audit_notify(
|
||||||
context, peer_group_name, payload)
|
context, peer_group_name, payload
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def sync_subcloud_peer_group(self, context, association_id,
|
def sync_subcloud_peer_group(self, context, association_id, sync_subclouds=True):
|
||||||
sync_subclouds=True):
|
LOG.info("Handling sync_subcloud_peer_group request for: %s", association_id)
|
||||||
LOG.info("Handling sync_subcloud_peer_group request for: %s",
|
|
||||||
association_id)
|
|
||||||
return self.system_peer_manager.sync_subcloud_peer_group(
|
return self.system_peer_manager.sync_subcloud_peer_group(
|
||||||
context, association_id, sync_subclouds)
|
context, association_id, sync_subclouds
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def update_subcloud_peer_group(self, context, peer_group_id,
|
def update_subcloud_peer_group(
|
||||||
group_state, max_subcloud_rehoming,
|
self,
|
||||||
group_name, new_group_name=None):
|
context,
|
||||||
LOG.info("Handling update_subcloud_peer_group request for "
|
peer_group_id,
|
||||||
"peer group %s" % peer_group_id)
|
group_state,
|
||||||
|
max_subcloud_rehoming,
|
||||||
|
group_name,
|
||||||
|
new_group_name=None,
|
||||||
|
):
|
||||||
|
LOG.info(
|
||||||
|
"Handling update_subcloud_peer_group request for peer group %s"
|
||||||
|
% peer_group_id
|
||||||
|
)
|
||||||
return self.system_peer_manager.update_subcloud_peer_group(
|
return self.system_peer_manager.update_subcloud_peer_group(
|
||||||
context, peer_group_id, group_state, max_subcloud_rehoming,
|
context,
|
||||||
group_name, new_group_name)
|
peer_group_id,
|
||||||
|
group_state,
|
||||||
|
max_subcloud_rehoming,
|
||||||
|
group_name,
|
||||||
|
new_group_name,
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def delete_peer_group_association(self, context, association_id):
|
def delete_peer_group_association(self, context, association_id):
|
||||||
LOG.info("Handling delete_peer_group_association request for: %s",
|
LOG.info(
|
||||||
association_id)
|
"Handling delete_peer_group_association request for: %s", association_id
|
||||||
|
)
|
||||||
return self.system_peer_manager.delete_peer_group_association(
|
return self.system_peer_manager.delete_peer_group_association(
|
||||||
context, association_id)
|
context, association_id
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def update_association_sync_status(self, context, peer_group_id,
|
def update_association_sync_status(
|
||||||
sync_status, sync_message=None):
|
self, context, peer_group_id, sync_status, sync_message=None
|
||||||
|
):
|
||||||
# Updates peer group association sync_status
|
# Updates peer group association sync_status
|
||||||
LOG.info("Handling update_peer_association_sync_status request for: %s"
|
LOG.info(
|
||||||
% peer_group_id)
|
"Handling update_peer_association_sync_status request for: %s"
|
||||||
|
% peer_group_id
|
||||||
|
)
|
||||||
return self.system_peer_manager.update_association_sync_status(
|
return self.system_peer_manager.update_association_sync_status(
|
||||||
context, peer_group_id, sync_status, sync_message)
|
context, peer_group_id, sync_status, sync_message
|
||||||
|
)
|
||||||
|
|
||||||
def _stop_rpc_server(self):
|
def _stop_rpc_server(self):
|
||||||
# Stop RPC connection to prevent new requests
|
# Stop RPC connection to prevent new requests
|
||||||
@ -375,9 +426,9 @@ class DCManagerService(service.Service):
|
|||||||
try:
|
try:
|
||||||
self._rpc_server.stop()
|
self._rpc_server.stop()
|
||||||
self._rpc_server.wait()
|
self._rpc_server.wait()
|
||||||
LOG.info('RPC service stopped successfully')
|
LOG.info("RPC service stopped successfully")
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error('Failed to stop RPC service: %s', str(ex))
|
LOG.error("Failed to stop RPC service: %s", str(ex))
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
SubprocessCleanup.shutdown_cleanup(origin="service")
|
SubprocessCleanup.shutdown_cleanup(origin="service")
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
|||||||
# Copyright (c) 2015 Ericsson AB.
|
# Copyright (c) 2015 Ericsson AB.
|
||||||
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
|
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@ -34,16 +34,16 @@ class DCManagerObject(base.VersionedObject):
|
|||||||
"save" object method.
|
"save" object method.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
OBJ_PROJECT_NAMESPACE = 'dcmanager'
|
OBJ_PROJECT_NAMESPACE = "dcmanager"
|
||||||
VERSION = '1.0'
|
VERSION = "1.0"
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _from_db_object(context, obj, db_obj):
|
def _from_db_object(context, obj, db_obj):
|
||||||
if db_obj is None:
|
if db_obj is None:
|
||||||
return None
|
return None
|
||||||
for field in obj.fields:
|
for field in obj.fields:
|
||||||
if field == 'metadata':
|
if field == "metadata":
|
||||||
obj['metadata'] = db_obj['meta_data']
|
obj["metadata"] = db_obj["meta_data"]
|
||||||
else:
|
else:
|
||||||
obj[field] = db_obj[field]
|
obj[field] = db_obj[field]
|
||||||
|
|
||||||
@ -66,6 +66,7 @@ class DCManagerObjectRegistry(base.VersionedObjectRegistry):
|
|||||||
setattr(objects, cls.obj_name(), cls)
|
setattr(objects, cls.obj_name(), cls)
|
||||||
else:
|
else:
|
||||||
curr_version = versionutils.convert_version_to_tuple(
|
curr_version = versionutils.convert_version_to_tuple(
|
||||||
getattr(objects, cls.obj_name()).VERSION)
|
getattr(objects, cls.obj_name()).VERSION
|
||||||
|
)
|
||||||
if version >= curr_version:
|
if version >= curr_version:
|
||||||
setattr(objects, cls.obj_name(), cls)
|
setattr(objects, cls.obj_name(), cls)
|
||||||
|
@ -34,8 +34,9 @@ class RPCClient(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, timeout, topic, version):
|
def __init__(self, timeout, topic, version):
|
||||||
self._client = messaging.get_rpc_client(timeout=timeout, topic=topic,
|
self._client = messaging.get_rpc_client(
|
||||||
version=version)
|
timeout=timeout, topic=topic, version=version
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def make_msg(method, **kwargs):
|
def make_msg(method, **kwargs):
|
||||||
@ -61,71 +62,98 @@ class RPCClient(object):
|
|||||||
class SubcloudStateClient(RPCClient):
|
class SubcloudStateClient(RPCClient):
|
||||||
"""Client to update subcloud availability."""
|
"""Client to update subcloud availability."""
|
||||||
|
|
||||||
BASE_RPC_API_VERSION = '1.0'
|
BASE_RPC_API_VERSION = "1.0"
|
||||||
|
|
||||||
def __init__(self, timeout=None):
|
def __init__(self, timeout=None):
|
||||||
super(SubcloudStateClient, self).__init__(
|
super(SubcloudStateClient, self).__init__(
|
||||||
timeout,
|
timeout, consts.TOPIC_DC_MANAGER_STATE, self.BASE_RPC_API_VERSION
|
||||||
consts.TOPIC_DC_MANAGER_STATE,
|
|
||||||
self.BASE_RPC_API_VERSION)
|
|
||||||
|
|
||||||
def bulk_update_subcloud_availability_and_endpoint_status(
|
|
||||||
self, ctxt, subcloud_name, subcloud_region, availability_data,
|
|
||||||
endpoint_data
|
|
||||||
):
|
|
||||||
# Note: This is an asynchronous operation.
|
|
||||||
return self.cast(ctxt, self.make_msg(
|
|
||||||
'bulk_update_subcloud_availability_and_endpoint_status',
|
|
||||||
subcloud_name=subcloud_name,
|
|
||||||
subcloud_region=subcloud_region,
|
|
||||||
availability_data=availability_data,
|
|
||||||
endpoint_data=endpoint_data)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def update_subcloud_availability(self, ctxt,
|
def bulk_update_subcloud_availability_and_endpoint_status(
|
||||||
subcloud_name,
|
self, ctxt, subcloud_name, subcloud_region, availability_data, endpoint_data
|
||||||
subcloud_region,
|
):
|
||||||
availability_status,
|
# Note: This is an asynchronous operation.
|
||||||
update_state_only=False,
|
return self.cast(
|
||||||
audit_fail_count=None):
|
ctxt,
|
||||||
|
self.make_msg(
|
||||||
|
"bulk_update_subcloud_availability_and_endpoint_status",
|
||||||
|
subcloud_name=subcloud_name,
|
||||||
|
subcloud_region=subcloud_region,
|
||||||
|
availability_data=availability_data,
|
||||||
|
endpoint_data=endpoint_data,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def update_subcloud_availability(
|
||||||
|
self,
|
||||||
|
ctxt,
|
||||||
|
subcloud_name,
|
||||||
|
subcloud_region,
|
||||||
|
availability_status,
|
||||||
|
update_state_only=False,
|
||||||
|
audit_fail_count=None,
|
||||||
|
):
|
||||||
# Note: synchronous
|
# Note: synchronous
|
||||||
return self.call(
|
return self.call(
|
||||||
ctxt,
|
ctxt,
|
||||||
self.make_msg('update_subcloud_availability',
|
self.make_msg(
|
||||||
subcloud_name=subcloud_name,
|
"update_subcloud_availability",
|
||||||
subcloud_region=subcloud_region,
|
subcloud_name=subcloud_name,
|
||||||
availability_status=availability_status,
|
subcloud_region=subcloud_region,
|
||||||
update_state_only=update_state_only,
|
availability_status=availability_status,
|
||||||
audit_fail_count=audit_fail_count))
|
update_state_only=update_state_only,
|
||||||
|
audit_fail_count=audit_fail_count,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def update_subcloud_endpoint_status(
|
def update_subcloud_endpoint_status(
|
||||||
self, ctxt, subcloud_name=None, subcloud_region=None, endpoint_type=None,
|
self,
|
||||||
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, ignore_endpoints=None,
|
ctxt,
|
||||||
alarmable=True
|
subcloud_name=None,
|
||||||
|
subcloud_region=None,
|
||||||
|
endpoint_type=None,
|
||||||
|
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||||
|
ignore_endpoints=None,
|
||||||
|
alarmable=True,
|
||||||
):
|
):
|
||||||
# Note: This is an asynchronous operation.
|
# Note: This is an asynchronous operation.
|
||||||
# See below for synchronous method call
|
# See below for synchronous method call
|
||||||
return self.cast(ctxt, self.make_msg('update_subcloud_endpoint_status',
|
return self.cast(
|
||||||
subcloud_name=subcloud_name,
|
ctxt,
|
||||||
subcloud_region=subcloud_region,
|
self.make_msg(
|
||||||
endpoint_type=endpoint_type,
|
"update_subcloud_endpoint_status",
|
||||||
sync_status=sync_status,
|
subcloud_name=subcloud_name,
|
||||||
ignore_endpoints=ignore_endpoints,
|
subcloud_region=subcloud_region,
|
||||||
alarmable=alarmable))
|
endpoint_type=endpoint_type,
|
||||||
|
sync_status=sync_status,
|
||||||
|
ignore_endpoints=ignore_endpoints,
|
||||||
|
alarmable=alarmable,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def update_subcloud_endpoint_status_sync(
|
def update_subcloud_endpoint_status_sync(
|
||||||
self, ctxt, subcloud_name=None, subcloud_region=None, endpoint_type=None,
|
self,
|
||||||
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, ignore_endpoints=None,
|
ctxt,
|
||||||
alarmable=True
|
subcloud_name=None,
|
||||||
|
subcloud_region=None,
|
||||||
|
endpoint_type=None,
|
||||||
|
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||||
|
ignore_endpoints=None,
|
||||||
|
alarmable=True,
|
||||||
):
|
):
|
||||||
# Note: synchronous
|
# Note: synchronous
|
||||||
return self.call(ctxt, self.make_msg('update_subcloud_endpoint_status',
|
return self.call(
|
||||||
subcloud_name=subcloud_name,
|
ctxt,
|
||||||
subcloud_region=subcloud_region,
|
self.make_msg(
|
||||||
endpoint_type=endpoint_type,
|
"update_subcloud_endpoint_status",
|
||||||
sync_status=sync_status,
|
subcloud_name=subcloud_name,
|
||||||
ignore_endpoints=ignore_endpoints,
|
subcloud_region=subcloud_region,
|
||||||
alarmable=alarmable))
|
endpoint_type=endpoint_type,
|
||||||
|
sync_status=sync_status,
|
||||||
|
ignore_endpoints=ignore_endpoints,
|
||||||
|
alarmable=alarmable,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ManagerClient(RPCClient):
|
class ManagerClient(RPCClient):
|
||||||
@ -135,188 +163,290 @@ class ManagerClient(RPCClient):
|
|||||||
1.0 - Initial version (Mitaka 1.0 release)
|
1.0 - Initial version (Mitaka 1.0 release)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
BASE_RPC_API_VERSION = '1.0'
|
BASE_RPC_API_VERSION = "1.0"
|
||||||
|
|
||||||
def __init__(self, timeout=None):
|
def __init__(self, timeout=None):
|
||||||
super(ManagerClient, self).__init__(
|
super(ManagerClient, self).__init__(
|
||||||
timeout,
|
timeout, consts.TOPIC_DC_MANAGER, self.BASE_RPC_API_VERSION
|
||||||
consts.TOPIC_DC_MANAGER,
|
)
|
||||||
self.BASE_RPC_API_VERSION)
|
|
||||||
|
|
||||||
def add_subcloud(self, ctxt, subcloud_id, payload):
|
def add_subcloud(self, ctxt, subcloud_id, payload):
|
||||||
return self.cast(ctxt, self.make_msg('add_subcloud',
|
return self.cast(
|
||||||
subcloud_id=subcloud_id,
|
ctxt,
|
||||||
payload=payload))
|
self.make_msg("add_subcloud", subcloud_id=subcloud_id, payload=payload),
|
||||||
|
)
|
||||||
|
|
||||||
def add_secondary_subcloud(self, ctxt, subcloud_id, payload):
|
def add_secondary_subcloud(self, ctxt, subcloud_id, payload):
|
||||||
return self.call(ctxt, self.make_msg('add_secondary_subcloud',
|
return self.call(
|
||||||
subcloud_id=subcloud_id,
|
ctxt,
|
||||||
payload=payload))
|
self.make_msg(
|
||||||
|
"add_secondary_subcloud", subcloud_id=subcloud_id, payload=payload
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def delete_subcloud(self, ctxt, subcloud_id):
|
def delete_subcloud(self, ctxt, subcloud_id):
|
||||||
return self.call(ctxt, self.make_msg('delete_subcloud',
|
return self.call(
|
||||||
subcloud_id=subcloud_id))
|
ctxt, self.make_msg("delete_subcloud", subcloud_id=subcloud_id)
|
||||||
|
)
|
||||||
|
|
||||||
def rename_subcloud(
|
def rename_subcloud(
|
||||||
self, ctxt, subcloud_id, curr_subcloud_name, new_subcloud_name=None
|
self, ctxt, subcloud_id, curr_subcloud_name, new_subcloud_name=None
|
||||||
):
|
):
|
||||||
return self.call(ctxt, self.make_msg('rename_subcloud',
|
return self.call(
|
||||||
subcloud_id=subcloud_id,
|
ctxt,
|
||||||
curr_subcloud_name=curr_subcloud_name,
|
self.make_msg(
|
||||||
new_subcloud_name=new_subcloud_name))
|
"rename_subcloud",
|
||||||
|
subcloud_id=subcloud_id,
|
||||||
|
curr_subcloud_name=curr_subcloud_name,
|
||||||
|
new_subcloud_name=new_subcloud_name,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def update_subcloud(
|
def update_subcloud(
|
||||||
self, ctxt, subcloud_id, management_state=None, description=None,
|
self,
|
||||||
location=None, group_id=None, data_install=None, force=None,
|
ctxt,
|
||||||
deploy_status=None, peer_group_id=None, bootstrap_values=None,
|
subcloud_id,
|
||||||
bootstrap_address=None
|
management_state=None,
|
||||||
|
description=None,
|
||||||
|
location=None,
|
||||||
|
group_id=None,
|
||||||
|
data_install=None,
|
||||||
|
force=None,
|
||||||
|
deploy_status=None,
|
||||||
|
peer_group_id=None,
|
||||||
|
bootstrap_values=None,
|
||||||
|
bootstrap_address=None,
|
||||||
):
|
):
|
||||||
return self.call(ctxt, self.make_msg('update_subcloud',
|
return self.call(
|
||||||
subcloud_id=subcloud_id,
|
ctxt,
|
||||||
management_state=management_state,
|
self.make_msg(
|
||||||
description=description,
|
"update_subcloud",
|
||||||
location=location,
|
subcloud_id=subcloud_id,
|
||||||
group_id=group_id,
|
management_state=management_state,
|
||||||
data_install=data_install,
|
description=description,
|
||||||
force=force,
|
location=location,
|
||||||
deploy_status=deploy_status,
|
group_id=group_id,
|
||||||
peer_group_id=peer_group_id,
|
data_install=data_install,
|
||||||
bootstrap_values=bootstrap_values,
|
force=force,
|
||||||
bootstrap_address=bootstrap_address))
|
deploy_status=deploy_status,
|
||||||
|
peer_group_id=peer_group_id,
|
||||||
|
bootstrap_values=bootstrap_values,
|
||||||
|
bootstrap_address=bootstrap_address,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def update_subcloud_with_network_reconfig(self, ctxt, subcloud_id, payload):
|
def update_subcloud_with_network_reconfig(self, ctxt, subcloud_id, payload):
|
||||||
return self.cast(ctxt, self.make_msg('update_subcloud_with_network_reconfig',
|
|
||||||
subcloud_id=subcloud_id,
|
|
||||||
payload=payload))
|
|
||||||
|
|
||||||
def redeploy_subcloud(self, ctxt, subcloud_id, payload):
|
|
||||||
return self.cast(ctxt, self.make_msg('redeploy_subcloud',
|
|
||||||
subcloud_id=subcloud_id,
|
|
||||||
payload=payload))
|
|
||||||
|
|
||||||
def backup_subclouds(self, ctxt, payload):
|
|
||||||
return self.cast(ctxt, self.make_msg('backup_subclouds',
|
|
||||||
payload=payload))
|
|
||||||
|
|
||||||
def delete_subcloud_backups(self, ctxt, release_version, payload):
|
|
||||||
return self.call(ctxt, self.make_msg('delete_subcloud_backups',
|
|
||||||
release_version=release_version,
|
|
||||||
payload=payload))
|
|
||||||
|
|
||||||
def restore_subcloud_backups(self, ctxt, payload):
|
|
||||||
return self.cast(ctxt, self.make_msg('restore_subcloud_backups',
|
|
||||||
payload=payload))
|
|
||||||
|
|
||||||
def update_subcloud_sync_endpoint_type(self, ctxt,
|
|
||||||
subcloud_region,
|
|
||||||
endpoint_type_list,
|
|
||||||
openstack_installed):
|
|
||||||
return self.cast(
|
return self.cast(
|
||||||
ctxt,
|
ctxt,
|
||||||
self.make_msg('update_subcloud_sync_endpoint_type',
|
self.make_msg(
|
||||||
subcloud_region=subcloud_region,
|
"update_subcloud_with_network_reconfig",
|
||||||
endpoint_type_list=endpoint_type_list,
|
subcloud_id=subcloud_id,
|
||||||
openstack_installed=openstack_installed))
|
payload=payload,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def redeploy_subcloud(self, ctxt, subcloud_id, payload):
|
||||||
|
return self.cast(
|
||||||
|
ctxt,
|
||||||
|
self.make_msg(
|
||||||
|
"redeploy_subcloud", subcloud_id=subcloud_id, payload=payload
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def backup_subclouds(self, ctxt, payload):
|
||||||
|
return self.cast(ctxt, self.make_msg("backup_subclouds", payload=payload))
|
||||||
|
|
||||||
|
def delete_subcloud_backups(self, ctxt, release_version, payload):
|
||||||
|
return self.call(
|
||||||
|
ctxt,
|
||||||
|
self.make_msg(
|
||||||
|
"delete_subcloud_backups",
|
||||||
|
release_version=release_version,
|
||||||
|
payload=payload,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def restore_subcloud_backups(self, ctxt, payload):
|
||||||
|
return self.cast(
|
||||||
|
ctxt, self.make_msg("restore_subcloud_backups", payload=payload)
|
||||||
|
)
|
||||||
|
|
||||||
|
def update_subcloud_sync_endpoint_type(
|
||||||
|
self, ctxt, subcloud_region, endpoint_type_list, openstack_installed
|
||||||
|
):
|
||||||
|
return self.cast(
|
||||||
|
ctxt,
|
||||||
|
self.make_msg(
|
||||||
|
"update_subcloud_sync_endpoint_type",
|
||||||
|
subcloud_region=subcloud_region,
|
||||||
|
endpoint_type_list=endpoint_type_list,
|
||||||
|
openstack_installed=openstack_installed,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def prestage_subcloud(self, ctxt, payload):
|
def prestage_subcloud(self, ctxt, payload):
|
||||||
return self.call(ctxt, self.make_msg('prestage_subcloud',
|
return self.call(ctxt, self.make_msg("prestage_subcloud", payload=payload))
|
||||||
payload=payload))
|
|
||||||
|
|
||||||
def subcloud_deploy_create(self, ctxt, subcloud_id, payload):
|
def subcloud_deploy_create(self, ctxt, subcloud_id, payload):
|
||||||
return self.call(ctxt, self.make_msg('subcloud_deploy_create',
|
return self.call(
|
||||||
subcloud_id=subcloud_id,
|
ctxt,
|
||||||
payload=payload))
|
self.make_msg(
|
||||||
|
"subcloud_deploy_create", subcloud_id=subcloud_id, payload=payload
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_deploy_install(self, ctxt, subcloud_id, payload,
|
def subcloud_deploy_install(self, ctxt, subcloud_id, payload, initial_deployment):
|
||||||
initial_deployment):
|
return self.cast(
|
||||||
return self.cast(ctxt, self.make_msg('subcloud_deploy_install',
|
ctxt,
|
||||||
subcloud_id=subcloud_id,
|
self.make_msg(
|
||||||
payload=payload,
|
"subcloud_deploy_install",
|
||||||
initial_deployment=initial_deployment))
|
subcloud_id=subcloud_id,
|
||||||
|
payload=payload,
|
||||||
|
initial_deployment=initial_deployment,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_deploy_enroll(self, ctxt, subcloud_id, payload):
|
def subcloud_deploy_enroll(self, ctxt, subcloud_id, payload):
|
||||||
return self.cast(ctxt, self.make_msg('subcloud_deploy_enroll',
|
return self.cast(
|
||||||
subcloud_id=subcloud_id,
|
ctxt,
|
||||||
payload=payload))
|
self.make_msg(
|
||||||
|
"subcloud_deploy_enroll", subcloud_id=subcloud_id, payload=payload
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_deploy_bootstrap(self, ctxt, subcloud_id, payload,
|
def subcloud_deploy_bootstrap(self, ctxt, subcloud_id, payload, initial_deployment):
|
||||||
initial_deployment):
|
return self.cast(
|
||||||
return self.cast(ctxt, self.make_msg('subcloud_deploy_bootstrap',
|
ctxt,
|
||||||
subcloud_id=subcloud_id,
|
self.make_msg(
|
||||||
payload=payload,
|
"subcloud_deploy_bootstrap",
|
||||||
initial_deployment=initial_deployment))
|
subcloud_id=subcloud_id,
|
||||||
|
payload=payload,
|
||||||
|
initial_deployment=initial_deployment,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_deploy_config(self, ctxt, subcloud_id, payload,
|
def subcloud_deploy_config(self, ctxt, subcloud_id, payload, initial_deployment):
|
||||||
initial_deployment):
|
return self.cast(
|
||||||
return self.cast(ctxt, self.make_msg('subcloud_deploy_config',
|
ctxt,
|
||||||
subcloud_id=subcloud_id,
|
self.make_msg(
|
||||||
payload=payload,
|
"subcloud_deploy_config",
|
||||||
initial_deployment=initial_deployment))
|
subcloud_id=subcloud_id,
|
||||||
|
payload=payload,
|
||||||
|
initial_deployment=initial_deployment,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_deploy_complete(self, ctxt, subcloud_id):
|
def subcloud_deploy_complete(self, ctxt, subcloud_id):
|
||||||
return self.call(ctxt, self.make_msg('subcloud_deploy_complete',
|
return self.call(
|
||||||
subcloud_id=subcloud_id))
|
ctxt, self.make_msg("subcloud_deploy_complete", subcloud_id=subcloud_id)
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_deploy_abort(self, ctxt, subcloud_id, deploy_status):
|
def subcloud_deploy_abort(self, ctxt, subcloud_id, deploy_status):
|
||||||
return self.cast(ctxt, self.make_msg('subcloud_deploy_abort',
|
return self.cast(
|
||||||
subcloud_id=subcloud_id,
|
ctxt,
|
||||||
deploy_status=deploy_status))
|
self.make_msg(
|
||||||
|
"subcloud_deploy_abort",
|
||||||
|
subcloud_id=subcloud_id,
|
||||||
|
deploy_status=deploy_status,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_deploy_resume(self, ctxt, subcloud_id, subcloud_name,
|
def subcloud_deploy_resume(
|
||||||
payload, deploy_states_to_run):
|
self, ctxt, subcloud_id, subcloud_name, payload, deploy_states_to_run
|
||||||
return self.cast(ctxt, self.make_msg(
|
):
|
||||||
'subcloud_deploy_resume',
|
return self.cast(
|
||||||
subcloud_id=subcloud_id,
|
ctxt,
|
||||||
subcloud_name=subcloud_name,
|
self.make_msg(
|
||||||
payload=payload,
|
"subcloud_deploy_resume",
|
||||||
deploy_states_to_run=deploy_states_to_run))
|
subcloud_id=subcloud_id,
|
||||||
|
subcloud_name=subcloud_name,
|
||||||
|
payload=payload,
|
||||||
|
deploy_states_to_run=deploy_states_to_run,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def get_subcloud_name_by_region_name(self, ctxt, subcloud_region):
|
def get_subcloud_name_by_region_name(self, ctxt, subcloud_region):
|
||||||
return self.call(ctxt, self.make_msg('get_subcloud_name_by_region_name',
|
return self.call(
|
||||||
subcloud_region=subcloud_region))
|
ctxt,
|
||||||
|
self.make_msg(
|
||||||
|
"get_subcloud_name_by_region_name", subcloud_region=subcloud_region
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def batch_migrate_subcloud(self, ctxt, payload):
|
def batch_migrate_subcloud(self, ctxt, payload):
|
||||||
return self.cast(ctxt, self.make_msg('batch_migrate_subcloud',
|
return self.cast(ctxt, self.make_msg("batch_migrate_subcloud", payload=payload))
|
||||||
payload=payload))
|
|
||||||
|
|
||||||
def sync_subcloud_peer_group(self, ctxt, association_id):
|
def sync_subcloud_peer_group(self, ctxt, association_id):
|
||||||
return self.cast(ctxt, self.make_msg(
|
return self.cast(
|
||||||
'sync_subcloud_peer_group', association_id=association_id))
|
ctxt,
|
||||||
|
self.make_msg("sync_subcloud_peer_group", association_id=association_id),
|
||||||
|
)
|
||||||
|
|
||||||
def sync_subcloud_peer_group_only(self, ctxt, association_id):
|
def sync_subcloud_peer_group_only(self, ctxt, association_id):
|
||||||
# Without synchronizing subclouds
|
# Without synchronizing subclouds
|
||||||
return self.call(ctxt, self.make_msg(
|
return self.call(
|
||||||
'sync_subcloud_peer_group', association_id=association_id,
|
ctxt,
|
||||||
sync_subclouds=False))
|
self.make_msg(
|
||||||
|
"sync_subcloud_peer_group",
|
||||||
|
association_id=association_id,
|
||||||
|
sync_subclouds=False,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def update_subcloud_peer_group(self, ctxt, peer_group_id,
|
def update_subcloud_peer_group(
|
||||||
group_state, max_subcloud_rehoming,
|
self,
|
||||||
group_name, new_group_name=None):
|
ctxt,
|
||||||
return self.call(ctxt, self.make_msg(
|
peer_group_id,
|
||||||
'update_subcloud_peer_group',
|
group_state,
|
||||||
peer_group_id=peer_group_id,
|
max_subcloud_rehoming,
|
||||||
group_state=group_state,
|
group_name,
|
||||||
max_subcloud_rehoming=max_subcloud_rehoming,
|
new_group_name=None,
|
||||||
group_name=group_name, new_group_name=new_group_name))
|
):
|
||||||
|
return self.call(
|
||||||
|
ctxt,
|
||||||
|
self.make_msg(
|
||||||
|
"update_subcloud_peer_group",
|
||||||
|
peer_group_id=peer_group_id,
|
||||||
|
group_state=group_state,
|
||||||
|
max_subcloud_rehoming=max_subcloud_rehoming,
|
||||||
|
group_name=group_name,
|
||||||
|
new_group_name=new_group_name,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def delete_peer_group_association(self, ctxt, association_id):
|
def delete_peer_group_association(self, ctxt, association_id):
|
||||||
return self.call(ctxt, self.make_msg('delete_peer_group_association',
|
return self.call(
|
||||||
association_id=association_id))
|
ctxt,
|
||||||
|
self.make_msg(
|
||||||
|
"delete_peer_group_association", association_id=association_id
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def update_association_sync_status(self, ctxt, peer_group_id,
|
def update_association_sync_status(
|
||||||
sync_status, sync_message=None):
|
self, ctxt, peer_group_id, sync_status, sync_message=None
|
||||||
return self.call(ctxt, self.make_msg('update_association_sync_status',
|
):
|
||||||
peer_group_id=peer_group_id,
|
return self.call(
|
||||||
sync_status=sync_status,
|
ctxt,
|
||||||
sync_message=sync_message))
|
self.make_msg(
|
||||||
|
"update_association_sync_status",
|
||||||
|
peer_group_id=peer_group_id,
|
||||||
|
sync_status=sync_status,
|
||||||
|
sync_message=sync_message,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def peer_monitor_notify(self, ctxt):
|
def peer_monitor_notify(self, ctxt):
|
||||||
return self.call(ctxt, self.make_msg('peer_monitor_notify'))
|
return self.call(ctxt, self.make_msg("peer_monitor_notify"))
|
||||||
|
|
||||||
def peer_group_audit_notify(self, ctxt, peer_group_name, payload):
|
def peer_group_audit_notify(self, ctxt, peer_group_name, payload):
|
||||||
return self.call(ctxt, self.make_msg('peer_group_audit_notify',
|
return self.call(
|
||||||
peer_group_name=peer_group_name,
|
ctxt,
|
||||||
payload=payload))
|
self.make_msg(
|
||||||
|
"peer_group_audit_notify",
|
||||||
|
peer_group_name=peer_group_name,
|
||||||
|
payload=payload,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class DCManagerNotifications(RPCClient):
|
class DCManagerNotifications(RPCClient):
|
||||||
@ -325,24 +455,33 @@ class DCManagerNotifications(RPCClient):
|
|||||||
Version History:
|
Version History:
|
||||||
1.0 - Initial version
|
1.0 - Initial version
|
||||||
"""
|
"""
|
||||||
DCMANAGER_RPC_API_VERSION = '1.0'
|
|
||||||
TOPIC_DC_NOTIFICIATION = 'DCMANAGER-NOTIFICATION'
|
DCMANAGER_RPC_API_VERSION = "1.0"
|
||||||
|
TOPIC_DC_NOTIFICIATION = "DCMANAGER-NOTIFICATION"
|
||||||
|
|
||||||
def __init__(self, timeout=None):
|
def __init__(self, timeout=None):
|
||||||
super(DCManagerNotifications, self).__init__(
|
super(DCManagerNotifications, self).__init__(
|
||||||
timeout,
|
timeout, self.TOPIC_DC_NOTIFICIATION, self.DCMANAGER_RPC_API_VERSION
|
||||||
self.TOPIC_DC_NOTIFICIATION,
|
)
|
||||||
self.DCMANAGER_RPC_API_VERSION)
|
|
||||||
|
|
||||||
def subcloud_online(self, ctxt, subcloud_name):
|
def subcloud_online(self, ctxt, subcloud_name):
|
||||||
return self.cast(ctxt, self.make_msg('subcloud_online',
|
return self.cast(
|
||||||
subcloud_name=subcloud_name))
|
ctxt, self.make_msg("subcloud_online", subcloud_name=subcloud_name)
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_managed(self, ctxt, subcloud_name):
|
def subcloud_managed(self, ctxt, subcloud_name):
|
||||||
return self.cast(ctxt, self.make_msg('subcloud_managed',
|
return self.cast(
|
||||||
subcloud_name=subcloud_name))
|
ctxt, self.make_msg("subcloud_managed", subcloud_name=subcloud_name)
|
||||||
|
)
|
||||||
|
|
||||||
def subcloud_sysinv_endpoint_update(self, ctxt, subcloud_name, endpoint):
|
def subcloud_sysinv_endpoint_update(self, ctxt, subcloud_name, endpoint):
|
||||||
return self.cast(ctxt, self.make_msg(
|
return self.cast(
|
||||||
'subcloud_sysinv_endpoint_update', subcloud_name=subcloud_name,
|
ctxt,
|
||||||
endpoint=endpoint), fanout=True, version=self.DCMANAGER_RPC_API_VERSION)
|
self.make_msg(
|
||||||
|
"subcloud_sysinv_endpoint_update",
|
||||||
|
subcloud_name=subcloud_name,
|
||||||
|
endpoint=endpoint,
|
||||||
|
),
|
||||||
|
fanout=True,
|
||||||
|
version=self.DCMANAGER_RPC_API_VERSION,
|
||||||
|
)
|
||||||
|
@ -192,16 +192,14 @@ class DCManagerStateService(service.Service):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def bulk_update_subcloud_availability_and_endpoint_status(
|
def bulk_update_subcloud_availability_and_endpoint_status(
|
||||||
self, context, subcloud_name, subcloud_region, availability_data,
|
self, context, subcloud_name, subcloud_region, availability_data, endpoint_data
|
||||||
endpoint_data
|
|
||||||
):
|
):
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"Handling bulk_update_subcloud_availability_and_endpoint_status request "
|
"Handling bulk_update_subcloud_availability_and_endpoint_status request "
|
||||||
f"for subcloud: {subcloud_name}"
|
f"for subcloud: {subcloud_name}"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.subcloud_state_manager.\
|
manager = self.subcloud_state_manager
|
||||||
bulk_update_subcloud_availability_and_endpoint_status(
|
manager.bulk_update_subcloud_availability_and_endpoint_status(
|
||||||
context, subcloud_name, subcloud_region, availability_data,
|
context, subcloud_name, subcloud_region, availability_data, endpoint_data
|
||||||
endpoint_data
|
)
|
||||||
)
|
|
||||||
|
@ -59,18 +59,25 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
"""Manages tasks related to subclouds."""
|
"""Manages tasks related to subclouds."""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
LOG.debug('SubcloudStateManager initialization...')
|
LOG.debug("SubcloudStateManager initialization...")
|
||||||
|
|
||||||
super(SubcloudStateManager,
|
super(SubcloudStateManager, self).__init__(
|
||||||
self).__init__(service_name="subcloud_manager", *args, **kwargs)
|
service_name="subcloud_manager", *args, **kwargs
|
||||||
|
)
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
self.dcorch_rpc_client = dcorch_rpc_client.EngineWorkerClient()
|
self.dcorch_rpc_client = dcorch_rpc_client.EngineWorkerClient()
|
||||||
self.fm_api = fm_api.FaultAPIs()
|
self.fm_api = fm_api.FaultAPIs()
|
||||||
self.audit_rpc_client = dcmanager_audit_rpc_client.ManagerAuditClient()
|
self.audit_rpc_client = dcmanager_audit_rpc_client.ManagerAuditClient()
|
||||||
|
|
||||||
def _do_update_subcloud_endpoint_status(self, context, subcloud_id,
|
def _do_update_subcloud_endpoint_status(
|
||||||
endpoint_type, sync_status,
|
self,
|
||||||
alarmable, ignore_endpoints=None):
|
context,
|
||||||
|
subcloud_id,
|
||||||
|
endpoint_type,
|
||||||
|
sync_status,
|
||||||
|
alarmable,
|
||||||
|
ignore_endpoints=None,
|
||||||
|
):
|
||||||
"""Update online/managed subcloud endpoint status
|
"""Update online/managed subcloud endpoint status
|
||||||
|
|
||||||
:param context: request context object
|
:param context: request context object
|
||||||
@ -91,14 +98,19 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
# retrieve the info from the db for this subcloud.
|
# retrieve the info from the db for this subcloud.
|
||||||
# subcloud_id should not be None
|
# subcloud_id should not be None
|
||||||
try:
|
try:
|
||||||
for subcloud, subcloud_status in db_api. \
|
for subcloud, subcloud_status in db_api.subcloud_get_with_status(
|
||||||
subcloud_get_with_status(context, subcloud_id):
|
context, subcloud_id
|
||||||
|
):
|
||||||
if subcloud_status:
|
if subcloud_status:
|
||||||
subcloud_status_list.append(
|
subcloud_status_list.append(
|
||||||
db_api.subcloud_endpoint_status_db_model_to_dict(
|
db_api.subcloud_endpoint_status_db_model_to_dict(
|
||||||
subcloud_status))
|
subcloud_status
|
||||||
if subcloud_status.endpoint_type == \
|
)
|
||||||
dccommon_consts.ENDPOINT_TYPE_IDENTITY:
|
)
|
||||||
|
if (
|
||||||
|
subcloud_status.endpoint_type
|
||||||
|
== dccommon_consts.ENDPOINT_TYPE_IDENTITY
|
||||||
|
):
|
||||||
original_identity_status = subcloud_status.sync_status
|
original_identity_status = subcloud_status.sync_status
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
@ -108,28 +120,30 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
if endpoint_type:
|
if endpoint_type:
|
||||||
# updating a single endpoint on a single subcloud
|
# updating a single endpoint on a single subcloud
|
||||||
for subcloud_status in subcloud_status_list:
|
for subcloud_status in subcloud_status_list:
|
||||||
if subcloud_status['endpoint_type'] == endpoint_type:
|
if subcloud_status["endpoint_type"] == endpoint_type:
|
||||||
if subcloud_status['sync_status'] == sync_status:
|
if subcloud_status["sync_status"] == sync_status:
|
||||||
# No change in the sync_status
|
# No change in the sync_status
|
||||||
LOG.debug("Sync status (%s) for subcloud %s did "
|
LOG.debug(
|
||||||
"not change - ignore update" %
|
"Sync status (%s) for subcloud %s did not change "
|
||||||
(sync_status, subcloud.name))
|
"- ignore update" % (sync_status, subcloud.name)
|
||||||
|
)
|
||||||
return
|
return
|
||||||
# We found the endpoint
|
# We found the endpoint
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
# We did not find the endpoint
|
# We did not find the endpoint
|
||||||
raise exceptions.BadRequest(
|
raise exceptions.BadRequest(
|
||||||
resource='subcloud',
|
resource="subcloud",
|
||||||
msg='Endpoint %s not found for subcloud' %
|
msg="Endpoint %s not found for subcloud" % endpoint_type,
|
||||||
endpoint_type)
|
)
|
||||||
|
|
||||||
LOG.info("Updating subcloud:%s endpoint:%s sync:%s" %
|
LOG.info(
|
||||||
(subcloud.name, endpoint_type, sync_status))
|
"Updating subcloud:%s endpoint:%s sync:%s"
|
||||||
db_api.subcloud_status_update(context,
|
% (subcloud.name, endpoint_type, sync_status)
|
||||||
subcloud_id,
|
)
|
||||||
endpoint_type,
|
db_api.subcloud_status_update(
|
||||||
sync_status)
|
context, subcloud_id, endpoint_type, sync_status
|
||||||
|
)
|
||||||
|
|
||||||
# Trigger subcloud audits for the subcloud after
|
# Trigger subcloud audits for the subcloud after
|
||||||
# its identity endpoint turns to other status from unknown
|
# its identity endpoint turns to other status from unknown
|
||||||
@ -137,33 +151,38 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
is_identity_unknown = (
|
is_identity_unknown = (
|
||||||
original_identity_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
original_identity_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||||
)
|
)
|
||||||
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_IDENTITY \
|
if (
|
||||||
and is_sync_unknown and is_identity_unknown:
|
endpoint_type == dccommon_consts.ENDPOINT_TYPE_IDENTITY
|
||||||
|
and is_sync_unknown
|
||||||
|
and is_identity_unknown
|
||||||
|
):
|
||||||
if not subcloud.first_identity_sync_complete:
|
if not subcloud.first_identity_sync_complete:
|
||||||
db_api.subcloud_update(context, subcloud_id,
|
db_api.subcloud_update(
|
||||||
first_identity_sync_complete=True)
|
context, subcloud_id, first_identity_sync_complete=True
|
||||||
LOG.debug('Request for audits for %s after updating '
|
)
|
||||||
'identity out of unknown' % subcloud.name)
|
LOG.debug(
|
||||||
self.audit_rpc_client.trigger_subcloud_audits(
|
"Request for audits for %s after updating "
|
||||||
context, subcloud_id)
|
"identity out of unknown" % subcloud.name
|
||||||
|
)
|
||||||
|
self.audit_rpc_client.trigger_subcloud_audits(context, subcloud_id)
|
||||||
|
|
||||||
entity_instance_id = "subcloud=%s.resource=%s" % \
|
entity_instance_id = "subcloud=%s.resource=%s" % (
|
||||||
(subcloud.name, endpoint_type)
|
subcloud.name,
|
||||||
fault = self.fm_api.get_fault(
|
endpoint_type,
|
||||||
ALARM_OUT_OF_SYNC,
|
)
|
||||||
entity_instance_id)
|
fault = self.fm_api.get_fault(ALARM_OUT_OF_SYNC, entity_instance_id)
|
||||||
|
|
||||||
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) \
|
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) and fault:
|
||||||
and fault:
|
|
||||||
try:
|
try:
|
||||||
self.fm_api.clear_fault(
|
self.fm_api.clear_fault(ALARM_OUT_OF_SYNC, entity_instance_id)
|
||||||
ALARM_OUT_OF_SYNC,
|
|
||||||
entity_instance_id)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
|
|
||||||
elif not fault and alarmable and \
|
elif (
|
||||||
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
|
not fault
|
||||||
|
and alarmable
|
||||||
|
and (sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||||
|
):
|
||||||
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
|
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
|
||||||
try:
|
try:
|
||||||
|
|
||||||
@ -173,15 +192,17 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
entity_type_id=entity_type_id,
|
entity_type_id=entity_type_id,
|
||||||
entity_instance_id=entity_instance_id,
|
entity_instance_id=entity_instance_id,
|
||||||
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
||||||
reason_text=("%s %s sync_status is "
|
reason_text=(
|
||||||
"out-of-sync" %
|
"%s %s sync_status is out-of-sync"
|
||||||
(subcloud.name, endpoint_type)),
|
% (subcloud.name, endpoint_type)
|
||||||
|
),
|
||||||
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
||||||
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
|
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
|
||||||
proposed_repair_action="If problem persists "
|
proposed_repair_action=(
|
||||||
"contact next level "
|
"If problem persists contact next level of support"
|
||||||
"of support",
|
),
|
||||||
service_affecting=False)
|
service_affecting=False,
|
||||||
|
)
|
||||||
|
|
||||||
self.fm_api.set_fault(fault)
|
self.fm_api.set_fault(fault)
|
||||||
|
|
||||||
@ -190,9 +211,11 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
# update all endpoints on this subcloud
|
# update all endpoints on this subcloud
|
||||||
LOG.info("Updating all endpoints on subcloud: %s sync: %s "
|
LOG.info(
|
||||||
"ignore_endpoints: %s" %
|
"Updating all endpoints on subcloud: %s sync: %s "
|
||||||
(subcloud.name, sync_status, ignore_endpoints))
|
"ignore_endpoints: %s"
|
||||||
|
% (subcloud.name, sync_status, ignore_endpoints)
|
||||||
|
)
|
||||||
|
|
||||||
# TODO(yuxing): The following code can be further optimized when
|
# TODO(yuxing): The following code can be further optimized when
|
||||||
# batch alarm clearance APIs are available, so we don't need to
|
# batch alarm clearance APIs are available, so we don't need to
|
||||||
@ -209,28 +232,32 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
continue
|
continue
|
||||||
endpoint_to_update_list.append(endpoint)
|
endpoint_to_update_list.append(endpoint)
|
||||||
|
|
||||||
entity_instance_id = "subcloud=%s.resource=%s" % \
|
entity_instance_id = "subcloud=%s.resource=%s" % (
|
||||||
(subcloud.name, endpoint)
|
subcloud.name,
|
||||||
|
endpoint,
|
||||||
|
)
|
||||||
|
|
||||||
fault = self.fm_api.get_fault(
|
fault = self.fm_api.get_fault(ALARM_OUT_OF_SYNC, entity_instance_id)
|
||||||
ALARM_OUT_OF_SYNC,
|
|
||||||
entity_instance_id)
|
|
||||||
|
|
||||||
# TODO(yuxing): batch clear all the out-of-sync alarms of a
|
# TODO(yuxing): batch clear all the out-of-sync alarms of a
|
||||||
# given subcloud if fm_api support it. Be careful with the
|
# given subcloud if fm_api support it. Be careful with the
|
||||||
# dc-cert endpoint when adding the above; the endpoint
|
# dc-cert endpoint when adding the above; the endpoint
|
||||||
# alarm must remain for offline subclouds.
|
# alarm must remain for offline subclouds.
|
||||||
if (sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC) \
|
if (
|
||||||
and fault:
|
sync_status != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
|
||||||
|
) and fault:
|
||||||
try:
|
try:
|
||||||
self.fm_api.clear_fault(
|
self.fm_api.clear_fault(
|
||||||
ALARM_OUT_OF_SYNC,
|
ALARM_OUT_OF_SYNC, entity_instance_id
|
||||||
entity_instance_id)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
|
|
||||||
elif not fault and alarmable and \
|
elif (
|
||||||
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
|
not fault
|
||||||
|
and alarmable
|
||||||
|
and (sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||||
|
):
|
||||||
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
|
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
|
||||||
try:
|
try:
|
||||||
fault = fm_api.Fault(
|
fault = fm_api.Fault(
|
||||||
@ -239,15 +266,17 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
entity_type_id=entity_type_id,
|
entity_type_id=entity_type_id,
|
||||||
entity_instance_id=entity_instance_id,
|
entity_instance_id=entity_instance_id,
|
||||||
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
||||||
reason_text=("%s %s sync_status is "
|
reason_text=(
|
||||||
"out-of-sync" %
|
"%s %s sync_status is out-of-sync"
|
||||||
(subcloud.name, endpoint)),
|
% (subcloud.name, endpoint)
|
||||||
|
),
|
||||||
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
||||||
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
|
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
|
||||||
proposed_repair_action="If problem persists "
|
proposed_repair_action=(
|
||||||
"contact next level "
|
"If problem persists contact next level of support"
|
||||||
"of support",
|
),
|
||||||
service_affecting=False)
|
service_affecting=False,
|
||||||
|
)
|
||||||
|
|
||||||
self.fm_api.set_fault(fault)
|
self.fm_api.set_fault(fault)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -256,10 +285,8 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
if endpoint_to_update_list:
|
if endpoint_to_update_list:
|
||||||
try:
|
try:
|
||||||
db_api.subcloud_status_update_endpoints(
|
db_api.subcloud_status_update_endpoints(
|
||||||
context,
|
context, subcloud_id, endpoint_to_update_list, sync_status
|
||||||
subcloud_id,
|
)
|
||||||
endpoint_to_update_list,
|
|
||||||
sync_status)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
|
|
||||||
@ -287,30 +314,30 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
# the sync status update must be done first.
|
# the sync status update must be done first.
|
||||||
#
|
#
|
||||||
is_in_sync = sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC
|
is_in_sync = sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC
|
||||||
is_online = subcloud.availability_status == \
|
is_online = subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE
|
||||||
dccommon_consts.AVAILABILITY_ONLINE
|
is_managed = subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED
|
||||||
is_managed = subcloud.management_state == \
|
is_endpoint_type_dc_cert = (
|
||||||
dccommon_consts.MANAGEMENT_MANAGED
|
endpoint_type == dccommon_consts.ENDPOINT_TYPE_DC_CERT
|
||||||
is_endpoint_type_dc_cert = endpoint_type == \
|
)
|
||||||
dccommon_consts.ENDPOINT_TYPE_DC_CERT
|
|
||||||
is_secondary = subcloud.deploy_status == consts.DEPLOY_STATE_SECONDARY
|
is_secondary = subcloud.deploy_status == consts.DEPLOY_STATE_SECONDARY
|
||||||
is_sync_unknown = sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
is_sync_unknown = sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||||
is_secondary_and_sync_unknown = is_secondary and is_sync_unknown
|
is_secondary_and_sync_unknown = is_secondary and is_sync_unknown
|
||||||
|
|
||||||
return (
|
return (
|
||||||
(not is_in_sync
|
(not is_in_sync or (is_online and (is_managed or is_endpoint_type_dc_cert)))
|
||||||
or (is_online and (is_managed or is_endpoint_type_dc_cert)))
|
|
||||||
and not is_secondary
|
and not is_secondary
|
||||||
) or is_secondary_and_sync_unknown
|
) or is_secondary_and_sync_unknown
|
||||||
|
|
||||||
@sync_update_subcloud_endpoint_status
|
@sync_update_subcloud_endpoint_status
|
||||||
def _update_subcloud_endpoint_status(
|
def _update_subcloud_endpoint_status(
|
||||||
self, context,
|
self,
|
||||||
subcloud_region,
|
context,
|
||||||
endpoint_type=None,
|
subcloud_region,
|
||||||
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
endpoint_type=None,
|
||||||
alarmable=True,
|
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||||
ignore_endpoints=None):
|
alarmable=True,
|
||||||
|
ignore_endpoints=None,
|
||||||
|
):
|
||||||
"""Update subcloud endpoint status
|
"""Update subcloud endpoint status
|
||||||
|
|
||||||
:param context: request context object
|
:param context: request context object
|
||||||
@ -327,8 +354,8 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
|
|
||||||
if not subcloud_region:
|
if not subcloud_region:
|
||||||
raise exceptions.BadRequest(
|
raise exceptions.BadRequest(
|
||||||
resource='subcloud',
|
resource="subcloud", msg="Subcloud region not provided"
|
||||||
msg='Subcloud region not provided')
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
|
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
|
||||||
@ -340,21 +367,31 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
# update a single subcloud
|
# update a single subcloud
|
||||||
try:
|
try:
|
||||||
self._do_update_subcloud_endpoint_status(
|
self._do_update_subcloud_endpoint_status(
|
||||||
context, subcloud.id, endpoint_type, sync_status,
|
context,
|
||||||
alarmable, ignore_endpoints
|
subcloud.id,
|
||||||
|
endpoint_type,
|
||||||
|
sync_status,
|
||||||
|
alarmable,
|
||||||
|
ignore_endpoints,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
LOG.info("Ignoring subcloud sync_status update for subcloud:%s "
|
LOG.info(
|
||||||
"availability:%s management:%s endpoint:%s sync:%s" %
|
"Ignoring subcloud sync_status update for subcloud:%s "
|
||||||
(subcloud.name, subcloud.availability_status,
|
"availability:%s management:%s endpoint:%s sync:%s"
|
||||||
subcloud.management_state, endpoint_type, sync_status))
|
% (
|
||||||
|
subcloud.name,
|
||||||
|
subcloud.availability_status,
|
||||||
|
subcloud.management_state,
|
||||||
|
endpoint_type,
|
||||||
|
sync_status,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def bulk_update_subcloud_availability_and_endpoint_status(
|
def bulk_update_subcloud_availability_and_endpoint_status(
|
||||||
self, context, subcloud_name, subcloud_region, availability_data,
|
self, context, subcloud_name, subcloud_region, availability_data, endpoint_data
|
||||||
endpoint_data
|
|
||||||
):
|
):
|
||||||
# This bulk update is executed as part of the audit process in dcmanager and
|
# This bulk update is executed as part of the audit process in dcmanager and
|
||||||
# its related endpoints. This method is not used by dcorch and cert-mon.
|
# its related endpoints. This method is not used by dcorch and cert-mon.
|
||||||
@ -362,21 +399,20 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
try:
|
try:
|
||||||
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
|
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(
|
LOG.exception(f"Failed to get subcloud by region name {subcloud_region}")
|
||||||
f"Failed to get subcloud by region name {subcloud_region}"
|
|
||||||
)
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if availability_data:
|
if availability_data:
|
||||||
self.update_subcloud_availability(
|
self.update_subcloud_availability(
|
||||||
context, subcloud_region, availability_data["availability_status"],
|
context,
|
||||||
|
subcloud_region,
|
||||||
|
availability_data["availability_status"],
|
||||||
availability_data["update_state_only"],
|
availability_data["update_state_only"],
|
||||||
availability_data["audit_fail_count"], subcloud
|
availability_data["audit_fail_count"],
|
||||||
|
subcloud,
|
||||||
)
|
)
|
||||||
if endpoint_data:
|
if endpoint_data:
|
||||||
self._bulk_update_subcloud_endpoint_status(
|
self._bulk_update_subcloud_endpoint_status(context, subcloud, endpoint_data)
|
||||||
context, subcloud, endpoint_data
|
|
||||||
)
|
|
||||||
|
|
||||||
@lockutils.synchronized(LOCK_NAME)
|
@lockutils.synchronized(LOCK_NAME)
|
||||||
def _do_bulk_update_subcloud_endpoint_status(
|
def _do_bulk_update_subcloud_endpoint_status(
|
||||||
@ -413,8 +449,7 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
|
|
||||||
elif not fault and \
|
elif not fault and (sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
|
||||||
(sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
|
|
||||||
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
|
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
|
||||||
try:
|
try:
|
||||||
fault = fm_api.Fault(
|
fault = fm_api.Fault(
|
||||||
@ -423,15 +458,17 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
entity_type_id=entity_type_id,
|
entity_type_id=entity_type_id,
|
||||||
entity_instance_id=entity_instance_id,
|
entity_instance_id=entity_instance_id,
|
||||||
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
||||||
reason_text=("%s %s sync_status is "
|
reason_text=(
|
||||||
"out-of-sync" %
|
"%s %s sync_status is "
|
||||||
(subcloud.name, endpoint)),
|
"out-of-sync" % (subcloud.name, endpoint)
|
||||||
|
),
|
||||||
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
||||||
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
|
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
|
||||||
proposed_repair_action="If problem persists "
|
proposed_repair_action="If problem persists "
|
||||||
"contact next level "
|
"contact next level "
|
||||||
"of support",
|
"of support",
|
||||||
service_affecting=False)
|
service_affecting=False,
|
||||||
|
)
|
||||||
|
|
||||||
self.fm_api.set_fault(fault)
|
self.fm_api.set_fault(fault)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -439,7 +476,9 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
db_api.subcloud_status_bulk_update_endpoints(
|
db_api.subcloud_status_bulk_update_endpoints(
|
||||||
context, subcloud.id, endpoint_list,
|
context,
|
||||||
|
subcloud.id,
|
||||||
|
endpoint_list,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(
|
LOG.exception(
|
||||||
@ -447,9 +486,7 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
f"endpoint status: {e}"
|
f"endpoint status: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _bulk_update_subcloud_endpoint_status(
|
def _bulk_update_subcloud_endpoint_status(self, context, subcloud, endpoint_list):
|
||||||
self, context, subcloud, endpoint_list
|
|
||||||
):
|
|
||||||
"""Update the sync status of a list of subcloud endpoints
|
"""Update the sync status of a list of subcloud endpoints
|
||||||
|
|
||||||
:param context: current context object
|
:param context: current context object
|
||||||
@ -483,12 +520,14 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def update_subcloud_endpoint_status(
|
def update_subcloud_endpoint_status(
|
||||||
self, context,
|
self,
|
||||||
subcloud_region=None,
|
context,
|
||||||
endpoint_type=None,
|
subcloud_region=None,
|
||||||
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
endpoint_type=None,
|
||||||
alarmable=True,
|
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||||
ignore_endpoints=None):
|
alarmable=True,
|
||||||
|
ignore_endpoints=None,
|
||||||
|
):
|
||||||
"""Update subcloud endpoint status
|
"""Update subcloud endpoint status
|
||||||
|
|
||||||
:param context: request context object
|
:param context: request context object
|
||||||
@ -505,83 +544,108 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
|
|
||||||
if subcloud_region:
|
if subcloud_region:
|
||||||
self._update_subcloud_endpoint_status(
|
self._update_subcloud_endpoint_status(
|
||||||
context, subcloud_region, endpoint_type, sync_status, alarmable,
|
context,
|
||||||
ignore_endpoints)
|
subcloud_region,
|
||||||
|
endpoint_type,
|
||||||
|
sync_status,
|
||||||
|
alarmable,
|
||||||
|
ignore_endpoints,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# update all subclouds
|
# update all subclouds
|
||||||
for subcloud in db_api.subcloud_get_all(context):
|
for subcloud in db_api.subcloud_get_all(context):
|
||||||
self._update_subcloud_endpoint_status(
|
self._update_subcloud_endpoint_status(
|
||||||
context, subcloud.region_name, endpoint_type, sync_status,
|
context,
|
||||||
alarmable, ignore_endpoints)
|
subcloud.region_name,
|
||||||
|
endpoint_type,
|
||||||
|
sync_status,
|
||||||
|
alarmable,
|
||||||
|
ignore_endpoints,
|
||||||
|
)
|
||||||
|
|
||||||
def _update_subcloud_state(self, context, subcloud_name, subcloud_region,
|
def _update_subcloud_state(
|
||||||
management_state, availability_status):
|
self,
|
||||||
|
context,
|
||||||
|
subcloud_name,
|
||||||
|
subcloud_region,
|
||||||
|
management_state,
|
||||||
|
availability_status,
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
LOG.info('Notifying dcorch, subcloud:%s management: %s, '
|
LOG.info(
|
||||||
'availability:%s' %
|
"Notifying dcorch, subcloud:%s management: %s, availability:%s"
|
||||||
(subcloud_name,
|
% (subcloud_name, management_state, availability_status)
|
||||||
management_state,
|
)
|
||||||
availability_status))
|
|
||||||
|
|
||||||
self.dcorch_rpc_client.update_subcloud_states(
|
self.dcorch_rpc_client.update_subcloud_states(
|
||||||
context, subcloud_region, management_state, availability_status)
|
context, subcloud_region, management_state, availability_status
|
||||||
|
)
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception('Problem informing dcorch of subcloud state change,'
|
LOG.exception(
|
||||||
'subcloud: %s' % subcloud_name)
|
"Problem informing dcorch of subcloud state change, subcloud: %s"
|
||||||
|
% subcloud_name
|
||||||
|
)
|
||||||
|
|
||||||
def _raise_or_clear_subcloud_status_alarm(self, subcloud_name,
|
def _raise_or_clear_subcloud_status_alarm(
|
||||||
availability_status,
|
self, subcloud_name, availability_status, deploy_status=None
|
||||||
deploy_status=None):
|
):
|
||||||
entity_instance_id = "subcloud=%s" % subcloud_name
|
entity_instance_id = "subcloud=%s" % subcloud_name
|
||||||
fault = self.fm_api.get_fault(
|
fault = self.fm_api.get_fault(
|
||||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
|
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE, entity_instance_id
|
||||||
entity_instance_id)
|
)
|
||||||
|
|
||||||
if fault and (availability_status == dccommon_consts.AVAILABILITY_ONLINE):
|
if fault and (availability_status == dccommon_consts.AVAILABILITY_ONLINE):
|
||||||
try:
|
try:
|
||||||
self.fm_api.clear_fault(
|
self.fm_api.clear_fault(
|
||||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
|
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE, entity_instance_id
|
||||||
entity_instance_id)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Failed to clear offline alarm for subcloud: %s",
|
LOG.exception(
|
||||||
subcloud_name)
|
"Failed to clear offline alarm for subcloud: %s", subcloud_name
|
||||||
|
)
|
||||||
|
|
||||||
# Raise the alarm if the subcloud became offline and it's not a
|
# Raise the alarm if the subcloud became offline and it's not a
|
||||||
# secondary subcloud
|
# secondary subcloud
|
||||||
elif not fault and \
|
elif not fault and (
|
||||||
(availability_status == dccommon_consts.AVAILABILITY_OFFLINE and
|
availability_status == dccommon_consts.AVAILABILITY_OFFLINE
|
||||||
deploy_status != consts.DEPLOY_STATE_SECONDARY):
|
and deploy_status != consts.DEPLOY_STATE_SECONDARY
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
fault = fm_api.Fault(
|
fault = fm_api.Fault(
|
||||||
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
|
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
|
||||||
alarm_state=fm_const.FM_ALARM_STATE_SET,
|
alarm_state=fm_const.FM_ALARM_STATE_SET,
|
||||||
entity_type_id=fm_const.FM_ENTITY_TYPE_SUBCLOUD,
|
entity_type_id=fm_const.FM_ENTITY_TYPE_SUBCLOUD,
|
||||||
entity_instance_id=entity_instance_id,
|
entity_instance_id=entity_instance_id,
|
||||||
|
|
||||||
severity=fm_const.FM_ALARM_SEVERITY_CRITICAL,
|
severity=fm_const.FM_ALARM_SEVERITY_CRITICAL,
|
||||||
reason_text=('%s is offline' % subcloud_name),
|
reason_text=("%s is offline" % subcloud_name),
|
||||||
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
||||||
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_29,
|
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_29,
|
||||||
proposed_repair_action="Wait for subcloud to "
|
proposed_repair_action=(
|
||||||
"become online; if "
|
"Wait for subcloud to become online; if problem persists "
|
||||||
"problem persists contact "
|
"contact next level of support."
|
||||||
"next level of support.",
|
),
|
||||||
service_affecting=True)
|
service_affecting=True,
|
||||||
|
)
|
||||||
|
|
||||||
self.fm_api.set_fault(fault)
|
self.fm_api.set_fault(fault)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Failed to raise offline alarm for subcloud: %s",
|
LOG.exception(
|
||||||
subcloud_name)
|
"Failed to raise offline alarm for subcloud: %s", subcloud_name
|
||||||
|
)
|
||||||
|
|
||||||
def update_subcloud_availability(self, context, subcloud_region,
|
def update_subcloud_availability(
|
||||||
availability_status,
|
self,
|
||||||
update_state_only=False,
|
context,
|
||||||
audit_fail_count=None, subcloud=None):
|
subcloud_region,
|
||||||
|
availability_status,
|
||||||
|
update_state_only=False,
|
||||||
|
audit_fail_count=None,
|
||||||
|
subcloud=None,
|
||||||
|
):
|
||||||
if subcloud is None:
|
if subcloud is None:
|
||||||
try:
|
try:
|
||||||
subcloud = db_api.subcloud_get_by_region_name(context,
|
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
|
||||||
subcloud_region)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(
|
LOG.exception(
|
||||||
"Failed to get subcloud by region name %s" % subcloud_region
|
"Failed to get subcloud by region name %s" % subcloud_region
|
||||||
@ -593,29 +657,37 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
# subcloud's availability. This is required to compensate
|
# subcloud's availability. This is required to compensate
|
||||||
# for rare alarm update failures, which may occur during
|
# for rare alarm update failures, which may occur during
|
||||||
# availability updates.
|
# availability updates.
|
||||||
self._raise_or_clear_subcloud_status_alarm(subcloud.name,
|
self._raise_or_clear_subcloud_status_alarm(
|
||||||
availability_status)
|
subcloud.name, availability_status
|
||||||
|
)
|
||||||
|
|
||||||
# Nothing has changed, but we want to send a state update for this
|
# Nothing has changed, but we want to send a state update for this
|
||||||
# subcloud as an audit. Get the most up-to-date data.
|
# subcloud as an audit. Get the most up-to-date data.
|
||||||
self._update_subcloud_state(context, subcloud.name,
|
self._update_subcloud_state(
|
||||||
subcloud.region_name,
|
context,
|
||||||
subcloud.management_state,
|
subcloud.name,
|
||||||
availability_status)
|
subcloud.region_name,
|
||||||
|
subcloud.management_state,
|
||||||
|
availability_status,
|
||||||
|
)
|
||||||
elif availability_status is None:
|
elif availability_status is None:
|
||||||
# only update the audit fail count
|
# only update the audit fail count
|
||||||
try:
|
try:
|
||||||
db_api.subcloud_update(self.context, subcloud.id,
|
db_api.subcloud_update(
|
||||||
audit_fail_count=audit_fail_count)
|
self.context, subcloud.id, audit_fail_count=audit_fail_count
|
||||||
|
)
|
||||||
except exceptions.SubcloudNotFound:
|
except exceptions.SubcloudNotFound:
|
||||||
# slim possibility subcloud could have been deleted since
|
# slim possibility subcloud could have been deleted since
|
||||||
# we found it in db, ignore this benign error.
|
# we found it in db, ignore this benign error.
|
||||||
LOG.info('Ignoring SubcloudNotFound when attempting '
|
LOG.info(
|
||||||
'audit_fail_count update: %s' % subcloud.name)
|
"Ignoring SubcloudNotFound when attempting "
|
||||||
|
"audit_fail_count update: %s" % subcloud.name
|
||||||
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
self._raise_or_clear_subcloud_status_alarm(subcloud.name,
|
self._raise_or_clear_subcloud_status_alarm(
|
||||||
availability_status)
|
subcloud.name, availability_status
|
||||||
|
)
|
||||||
|
|
||||||
if availability_status == dccommon_consts.AVAILABILITY_OFFLINE:
|
if availability_status == dccommon_consts.AVAILABILITY_OFFLINE:
|
||||||
# Subcloud is going offline, set all endpoint statuses to
|
# Subcloud is going offline, set all endpoint statuses to
|
||||||
@ -634,70 +706,77 @@ class SubcloudStateManager(manager.Manager):
|
|||||||
context,
|
context,
|
||||||
subcloud.id,
|
subcloud.id,
|
||||||
availability_status=availability_status,
|
availability_status=availability_status,
|
||||||
audit_fail_count=audit_fail_count)
|
audit_fail_count=audit_fail_count,
|
||||||
|
)
|
||||||
except exceptions.SubcloudNotFound:
|
except exceptions.SubcloudNotFound:
|
||||||
# slim possibility subcloud could have been deleted since
|
# slim possibility subcloud could have been deleted since
|
||||||
# we found it in db, ignore this benign error.
|
# we found it in db, ignore this benign error.
|
||||||
LOG.info('Ignoring SubcloudNotFound when attempting state'
|
LOG.info(
|
||||||
' update: %s' % subcloud.name)
|
"Ignoring SubcloudNotFound when attempting state update: %s"
|
||||||
|
% subcloud.name
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
if availability_status == dccommon_consts.AVAILABILITY_ONLINE:
|
if availability_status == dccommon_consts.AVAILABILITY_ONLINE:
|
||||||
# Subcloud is going online
|
# Subcloud is going online
|
||||||
# Tell cert-mon to audit endpoint certificate.
|
# Tell cert-mon to audit endpoint certificate.
|
||||||
LOG.info('Request for online audit for %s' % subcloud.name)
|
LOG.info("Request for online audit for %s" % subcloud.name)
|
||||||
dc_notification = rpc_client.DCManagerNotifications()
|
dc_notification = rpc_client.DCManagerNotifications()
|
||||||
dc_notification.subcloud_online(context, subcloud.region_name)
|
dc_notification.subcloud_online(context, subcloud.region_name)
|
||||||
# Trigger all the audits for the subcloud so it can update the
|
# Trigger all the audits for the subcloud so it can update the
|
||||||
# sync status ASAP.
|
# sync status ASAP.
|
||||||
self.audit_rpc_client.trigger_subcloud_audits(context,
|
self.audit_rpc_client.trigger_subcloud_audits(context, subcloud.id)
|
||||||
subcloud.id)
|
|
||||||
|
|
||||||
# Send dcorch a state update
|
# Send dcorch a state update
|
||||||
self._update_subcloud_state(context, subcloud.name,
|
self._update_subcloud_state(
|
||||||
subcloud.region_name,
|
context,
|
||||||
updated_subcloud.management_state,
|
subcloud.name,
|
||||||
availability_status)
|
subcloud.region_name,
|
||||||
|
updated_subcloud.management_state,
|
||||||
def update_subcloud_sync_endpoint_type(self, context,
|
availability_status,
|
||||||
subcloud_region,
|
|
||||||
endpoint_type_list,
|
|
||||||
openstack_installed):
|
|
||||||
operation = 'add' if openstack_installed else 'remove'
|
|
||||||
func_switcher = {
|
|
||||||
'add': (
|
|
||||||
self.dcorch_rpc_client.add_subcloud_sync_endpoint_type,
|
|
||||||
db_api.subcloud_status_create
|
|
||||||
),
|
|
||||||
'remove': (
|
|
||||||
self.dcorch_rpc_client.remove_subcloud_sync_endpoint_type,
|
|
||||||
db_api.subcloud_status_delete
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def update_subcloud_sync_endpoint_type(
|
||||||
|
self, context, subcloud_region, endpoint_type_list, openstack_installed
|
||||||
|
):
|
||||||
|
operation = "add" if openstack_installed else "remove"
|
||||||
|
func_switcher = {
|
||||||
|
"add": (
|
||||||
|
self.dcorch_rpc_client.add_subcloud_sync_endpoint_type,
|
||||||
|
db_api.subcloud_status_create,
|
||||||
|
),
|
||||||
|
"remove": (
|
||||||
|
self.dcorch_rpc_client.remove_subcloud_sync_endpoint_type,
|
||||||
|
db_api.subcloud_status_delete,
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
|
subcloud = db_api.subcloud_get_by_region_name(context, subcloud_region)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(
|
LOG.exception("Failed to get subcloud by region name: %s" % subcloud_region)
|
||||||
"Failed to get subcloud by region name: %s" % subcloud_region
|
|
||||||
)
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Notify dcorch to add/remove sync endpoint type list
|
# Notify dcorch to add/remove sync endpoint type list
|
||||||
func_switcher[operation][0](self.context, subcloud_region,
|
func_switcher[operation][0](
|
||||||
endpoint_type_list)
|
self.context, subcloud_region, endpoint_type_list
|
||||||
LOG.info('Notifying dcorch, subcloud: %s new sync endpoint: %s' %
|
)
|
||||||
(subcloud.name, endpoint_type_list))
|
LOG.info(
|
||||||
|
"Notifying dcorch, subcloud: %s new sync endpoint: %s"
|
||||||
|
% (subcloud.name, endpoint_type_list)
|
||||||
|
)
|
||||||
|
|
||||||
# Update subcloud status table by adding/removing openstack sync
|
# Update subcloud status table by adding/removing openstack sync
|
||||||
# endpoint types
|
# endpoint types
|
||||||
for endpoint_type in endpoint_type_list:
|
for endpoint_type in endpoint_type_list:
|
||||||
func_switcher[operation][1](self.context, subcloud.id,
|
func_switcher[operation][1](self.context, subcloud.id, endpoint_type)
|
||||||
endpoint_type)
|
|
||||||
# Update openstack_installed of subcloud table
|
# Update openstack_installed of subcloud table
|
||||||
db_api.subcloud_update(self.context, subcloud.id,
|
db_api.subcloud_update(
|
||||||
openstack_installed=openstack_installed)
|
self.context, subcloud.id, openstack_installed=openstack_installed
|
||||||
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception('Problem informing dcorch of subcloud sync endpoint'
|
LOG.exception(
|
||||||
' type change, subcloud: %s' % subcloud.name)
|
"Problem informing dcorch of subcloud sync endpoint "
|
||||||
|
"type change, subcloud: %s" % subcloud.name
|
||||||
|
)
|
||||||
|
@ -1792,8 +1792,10 @@ class TestSubcloudUpdate(BaseTestSubcloudManager):
|
|||||||
fake_bootstrap_address = "10.10.20.12"
|
fake_bootstrap_address = "10.10.20.12"
|
||||||
self.assertRaisesRegex(
|
self.assertRaisesRegex(
|
||||||
exceptions.BadRequest,
|
exceptions.BadRequest,
|
||||||
"Cannot update bootstrap_address"
|
(
|
||||||
" into rehome data, need to import bootstrap_values first",
|
"Cannot update bootstrap_address into rehome data, need to "
|
||||||
|
"import bootstrap_values first"
|
||||||
|
),
|
||||||
self.sm.update_subcloud,
|
self.sm.update_subcloud,
|
||||||
self.ctx,
|
self.ctx,
|
||||||
self.subcloud.id,
|
self.subcloud.id,
|
||||||
@ -1933,10 +1935,12 @@ class TestSubcloudUpdate(BaseTestSubcloudManager):
|
|||||||
'"admin_password": "dGVzdHBhc3M=", '
|
'"admin_password": "dGVzdHBhc3M=", '
|
||||||
'"bootstrap-address": "123.123.123.123"}}'
|
'"bootstrap-address": "123.123.123.123"}}'
|
||||||
)
|
)
|
||||||
fake_bootstrap_values = '{"name": "TestSubcloud", \
|
fake_bootstrap_values = (
|
||||||
"system_mode": "simplex", "sysadmin_password": "testpass", \
|
'{"name": "TestSubcloud",'
|
||||||
"ansible_ssh_pass": "fakepass", "ansible_become_pass": "fakepass",\
|
'"system_mode": "simplex", "sysadmin_password": "testpass",'
|
||||||
"admin_password": "testpass"}'
|
'"ansible_ssh_pass": "fakepass", "ansible_become_pass": "fakepass",'
|
||||||
|
'"admin_password": "testpass"}'
|
||||||
|
)
|
||||||
fake_bootstrap_address = "123.123.123.123"
|
fake_bootstrap_address = "123.123.123.123"
|
||||||
|
|
||||||
self.sm.update_subcloud(
|
self.sm.update_subcloud(
|
||||||
@ -2030,10 +2034,11 @@ class TestSubcloudUpdate(BaseTestSubcloudManager):
|
|||||||
self.subcloud["deploy_status"] = consts.DEPLOY_STATE_DEPLOY_FAILED
|
self.subcloud["deploy_status"] = consts.DEPLOY_STATE_DEPLOY_FAILED
|
||||||
self.assertRaisesRegex(
|
self.assertRaisesRegex(
|
||||||
exceptions.BadRequest,
|
exceptions.BadRequest,
|
||||||
f"Unable to manage {self.subcloud.name}: "
|
(
|
||||||
"its deploy_status must be either"
|
f"Unable to manage {self.subcloud.name}: its deploy_status "
|
||||||
f" '{consts.DEPLOY_STATE_DONE}' or "
|
f"must be either '{consts.DEPLOY_STATE_DONE}' or "
|
||||||
f"'{consts.DEPLOY_STATE_REHOME_PENDING}'",
|
f"'{consts.DEPLOY_STATE_REHOME_PENDING}'"
|
||||||
|
),
|
||||||
self.sm.update_subcloud,
|
self.sm.update_subcloud,
|
||||||
self.ctx,
|
self.ctx,
|
||||||
self.subcloud.id,
|
self.subcloud.id,
|
||||||
@ -4042,8 +4047,8 @@ class TestSubcloudBackupRestore(BaseTestSubcloudManager):
|
|||||||
|
|
||||||
self.assertIn(expected_log, return_log)
|
self.assertIn(expected_log, return_log)
|
||||||
self.mock_log.info.assert_called_with(
|
self.mock_log.info.assert_called_with(
|
||||||
"Subcloud restore backup operation finished.\nRestored subclouds: 0."
|
"Subcloud restore backup operation finished.\nRestored subclouds: 0. "
|
||||||
" Invalid subclouds: 1. Failed subclouds: 0."
|
"Invalid subclouds: 1. Failed subclouds: 0."
|
||||||
)
|
)
|
||||||
|
|
||||||
@mock.patch.object(
|
@mock.patch.object(
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2017-2021 Wind River Systems, Inc.
|
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
# a copy of the License at
|
# a copy of the License at
|
||||||
@ -15,4 +15,4 @@
|
|||||||
|
|
||||||
import pbr.version
|
import pbr.version
|
||||||
|
|
||||||
version_info = pbr.version.VersionInfo('distributedcloud')
|
version_info = pbr.version.VersionInfo("distributedcloud")
|
||||||
|
@ -126,9 +126,7 @@ class PatchAPIController(Middleware):
|
|||||||
os.remove(fn)
|
os.remove(fn)
|
||||||
return
|
return
|
||||||
except OSError:
|
except OSError:
|
||||||
msg = (
|
msg = f"Unable to remove patch file {fn} from the central storage."
|
||||||
f"Unable to remove patch file {fn} from the central " "storage."
|
|
||||||
)
|
|
||||||
raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
|
raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
|
||||||
LOG.info(f"Patch {patch} was not found in {vault}")
|
LOG.info(f"Patch {patch} was not found in {vault}")
|
||||||
|
|
||||||
|
@ -294,7 +294,7 @@ endpoint_cache_opt_group = cfg.OptGroup(
|
|||||||
)
|
)
|
||||||
|
|
||||||
openstack_cache_opt_group = cfg.OptGroup(
|
openstack_cache_opt_group = cfg.OptGroup(
|
||||||
name="openstack_cache", title="Containerized OpenStack" " Credentials"
|
name="openstack_cache", title="Containerized OpenStack Credentials"
|
||||||
)
|
)
|
||||||
|
|
||||||
fernet_opt_group = cfg.OptGroup(name="fernet", title="Fernet Options")
|
fernet_opt_group = cfg.OptGroup(name="fernet", title="Fernet Options")
|
||||||
|
@ -56,8 +56,7 @@ class GenericSyncWorkerManager(object):
|
|||||||
for endpoint_type in endpoint_type_list:
|
for endpoint_type in endpoint_type_list:
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"Engine id:({self.engine_id}) create "
|
f"Engine id:({self.engine_id}) create "
|
||||||
f"{subcloud_name}/{endpoint_type}/{management_ip} "
|
f"{subcloud_name}/{endpoint_type}/{management_ip} sync obj"
|
||||||
f"sync obj"
|
|
||||||
)
|
)
|
||||||
sync_obj = sync_object_class_map[endpoint_type](
|
sync_obj = sync_object_class_map[endpoint_type](
|
||||||
subcloud_name, endpoint_type, management_ip
|
subcloud_name, endpoint_type, management_ip
|
||||||
@ -70,9 +69,7 @@ class GenericSyncWorkerManager(object):
|
|||||||
f"Engine id:({self.engine_id}) Start to sync "
|
f"Engine id:({self.engine_id}) Start to sync "
|
||||||
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs."
|
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs."
|
||||||
)
|
)
|
||||||
LOG.debug(
|
LOG.debug(f"Engine id:({self.engine_id}) Start to sync {subcloud_sync_list}.")
|
||||||
f"Engine id:({self.engine_id}) Start to sync " f"{subcloud_sync_list}."
|
|
||||||
)
|
|
||||||
|
|
||||||
for sc_region_name, ept, ip in subcloud_sync_list:
|
for sc_region_name, ept, ip in subcloud_sync_list:
|
||||||
try:
|
try:
|
||||||
@ -373,13 +370,11 @@ class GenericSyncWorkerManager(object):
|
|||||||
f"Engine id:({self.engine_id}) Start to audit "
|
f"Engine id:({self.engine_id}) Start to audit "
|
||||||
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs."
|
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs."
|
||||||
)
|
)
|
||||||
LOG.debug(
|
LOG.debug(f"Engine id:({self.engine_id}) Start to audit {subcloud_sync_list}.")
|
||||||
f"Engine id:({self.engine_id}) Start to audit " f"{subcloud_sync_list}."
|
|
||||||
)
|
|
||||||
|
|
||||||
for sc_region_name, ept, ip in subcloud_sync_list:
|
for sc_region_name, ept, ip in subcloud_sync_list:
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"Attempt audit_subcloud: " f"{self.engine_id}/{sc_region_name}/{ept}"
|
f"Attempt audit_subcloud: {self.engine_id}/{sc_region_name}/{ept}"
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
sync_obj = sync_object_class_map[ept](sc_region_name, ept, ip)
|
sync_obj = sync_object_class_map[ept](sc_region_name, ept, ip)
|
||||||
@ -391,7 +386,7 @@ class GenericSyncWorkerManager(object):
|
|||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"Engine id:({self.engine_id}/{sc_region_name}/{ept}) "
|
f"Engine id:({self.engine_id}/{sc_region_name}/{ept}) "
|
||||||
f"SubcloudSyncNotFound: The endpoint in subcloud_sync "
|
f"SubcloudSyncNotFound: The endpoint in subcloud_sync "
|
||||||
f"has been removed"
|
"has been removed"
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
|
@ -59,7 +59,7 @@ class InitialSyncWorkerManager(object):
|
|||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
f"Exception occurred when running initial_sync for "
|
"Exception occurred when running initial_sync for "
|
||||||
f"subcloud {sc_region_name}: {e}"
|
f"subcloud {sc_region_name}: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -82,9 +82,7 @@ class InitialSyncWorkerManager(object):
|
|||||||
)
|
)
|
||||||
if result == 0:
|
if result == 0:
|
||||||
# Sync is no longer required
|
# Sync is no longer required
|
||||||
LOG.debug(
|
LOG.debug(f"Initial sync for subcloud {subcloud_name} no longer required")
|
||||||
f"Initial sync for subcloud {subcloud_name} " f"no longer required"
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# sync_objs stores the sync object per endpoint
|
# sync_objs stores the sync object per endpoint
|
||||||
@ -131,12 +129,12 @@ class InitialSyncWorkerManager(object):
|
|||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
f"Unexpected new_state {new_state} for " f"subcloud {subcloud_name}"
|
f"Unexpected new_state {new_state} for subcloud {subcloud_name}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"Initial sync was cancelled for subcloud "
|
f"Initial sync was cancelled for subcloud {subcloud_name} "
|
||||||
f"{subcloud_name} while in progress"
|
"while in progress"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _reattempt_sync(self, subcloud_name):
|
def _reattempt_sync(self, subcloud_name):
|
||||||
@ -159,9 +157,8 @@ class InitialSyncWorkerManager(object):
|
|||||||
LOG.debug(f"enabling subcloud {subcloud_name}")
|
LOG.debug(f"enabling subcloud {subcloud_name}")
|
||||||
for endpoint_type, sync_obj in sync_objs.items():
|
for endpoint_type, sync_obj in sync_objs.items():
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"Engine id: {self.engine_id} enabling sync thread "
|
f"Engine id: {self.engine_id} enabling sync thread for subcloud "
|
||||||
f"for subcloud {subcloud_name} and "
|
f"{subcloud_name} and endpoint type {endpoint_type}."
|
||||||
f"endpoint type {endpoint_type}."
|
|
||||||
)
|
)
|
||||||
sync_obj.enable()
|
sync_obj.enable()
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) 2017-2024, 2024 Wind River Systems, Inc.
|
# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@ -95,7 +95,7 @@ class SysinvSyncThread(SyncThread):
|
|||||||
|
|
||||||
sc_sysinv_url = build_subcloud_endpoint(self.management_ip, "sysinv")
|
sc_sysinv_url = build_subcloud_endpoint(self.management_ip, "sysinv")
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"Built sc_sysinv_url {sc_sysinv_url} for subcloud " f"{self.subcloud_name}"
|
f"Built sc_sysinv_url {sc_sysinv_url} for subcloud {self.subcloud_name}"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.sc_sysinv_client = SysinvClient(
|
self.sc_sysinv_client = SysinvClient(
|
||||||
@ -266,8 +266,9 @@ class SysinvSyncThread(SyncThread):
|
|||||||
]
|
]
|
||||||
|
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"certificate {} {} [{}] updated with subcloud certificates:"
|
"certificate {} {} [{}] updated with subcloud certificates: {}".format(
|
||||||
" {}".format(rsrc.id, subcloud_rsrc_id, signature, sub_certs_updated),
|
rsrc.id, subcloud_rsrc_id, signature, sub_certs_updated
|
||||||
|
),
|
||||||
extra=self.log_extra,
|
extra=self.log_extra,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -381,8 +382,9 @@ class SysinvSyncThread(SyncThread):
|
|||||||
|
|
||||||
if not passwd_hash:
|
if not passwd_hash:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"sync_user no user update found in resource_info"
|
"sync_user no user update found in resource_info {}".format(
|
||||||
"{}".format(request.orch_job.resource_info),
|
request.orch_job.resource_info
|
||||||
|
),
|
||||||
extra=self.log_extra,
|
extra=self.log_extra,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
@ -531,16 +533,18 @@ class SysinvSyncThread(SyncThread):
|
|||||||
keystone_exceptions.ConnectFailure,
|
keystone_exceptions.ConnectFailure,
|
||||||
) as e:
|
) as e:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"get subcloud_resources {}: subcloud {} is not reachable"
|
"get subcloud_resources {}: subcloud {} is not reachable [{}]".format(
|
||||||
"[{}]".format(resource_type, self.region_name, str(e)),
|
resource_type, self.region_name, str(e)
|
||||||
|
),
|
||||||
extra=self.log_extra,
|
extra=self.log_extra,
|
||||||
)
|
)
|
||||||
# None will force skip of audit
|
# None will force skip of audit
|
||||||
return None
|
return None
|
||||||
except exceptions.NotAuthorized as e:
|
except exceptions.NotAuthorized as e:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"get subcloud_resources {}: subcloud {} not authorized"
|
"get subcloud_resources {}: subcloud {} not authorized [{}]".format(
|
||||||
"[{}]".format(resource_type, self.region_name, str(e)),
|
resource_type, self.region_name, str(e)
|
||||||
|
),
|
||||||
extra=self.log_extra,
|
extra=self.log_extra,
|
||||||
)
|
)
|
||||||
OpenStackDriver.delete_region_clients(self.region_name)
|
OpenStackDriver.delete_region_clients(self.region_name)
|
||||||
|
@ -392,7 +392,7 @@ class SyncThread(object):
|
|||||||
# Early exit in case there are no pending sync requests
|
# Early exit in case there are no pending sync requests
|
||||||
if not sync_requests:
|
if not sync_requests:
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Sync resources done for subcloud - " "no sync requests",
|
"Sync resources done for subcloud - no sync requests",
|
||||||
extra=self.log_extra,
|
extra=self.log_extra,
|
||||||
)
|
)
|
||||||
self.set_sync_status(dccommon_consts.SYNC_STATUS_IN_SYNC)
|
self.set_sync_status(dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||||
@ -432,13 +432,13 @@ class SyncThread(object):
|
|||||||
|
|
||||||
if not actual_sync_requests:
|
if not actual_sync_requests:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"Sync resources done for subcloud - " "no valid sync requests",
|
"Sync resources done for subcloud - no valid sync requests",
|
||||||
extra=self.log_extra,
|
extra=self.log_extra,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
elif not self.is_subcloud_enabled():
|
elif not self.is_subcloud_enabled():
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"Sync resources done for subcloud - " "subcloud is disabled",
|
"Sync resources done for subcloud - subcloud is disabled",
|
||||||
extra=self.log_extra,
|
extra=self.log_extra,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
@ -50,7 +50,7 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise exceptions.ObjectActionError(
|
raise exceptions.ObjectActionError(
|
||||||
action="create",
|
action="create",
|
||||||
reason="cannot create a Subcloud object without a " "resource_id set",
|
reason="cannot create a Subcloud object without a resource_id set",
|
||||||
)
|
)
|
||||||
|
|
||||||
updates = self.obj_get_changes()
|
updates = self.obj_get_changes()
|
||||||
@ -59,7 +59,7 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise exceptions.ObjectActionError(
|
raise exceptions.ObjectActionError(
|
||||||
action="create",
|
action="create",
|
||||||
reason="cannot create a Subcloud object without a " "endpoint_type set",
|
reason="cannot create a Subcloud object without a endpoint_type set",
|
||||||
)
|
)
|
||||||
|
|
||||||
updates = self.obj_get_changes()
|
updates = self.obj_get_changes()
|
||||||
@ -68,8 +68,7 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise exceptions.ObjectActionError(
|
raise exceptions.ObjectActionError(
|
||||||
action="create",
|
action="create",
|
||||||
reason="cannot create a Subcloud object without a "
|
reason="cannot create a Subcloud object without a operation_type set",
|
||||||
"operation_type set",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
db_orch_job = db_api.orch_job_create(
|
db_orch_job = db_api.orch_job_create(
|
||||||
|
@ -54,7 +54,7 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise exceptions.ObjectActionError(
|
raise exceptions.ObjectActionError(
|
||||||
action="create",
|
action="create",
|
||||||
reason="cannot create a Subcloud object without a " "orch_job_id set",
|
reason="cannot create a Subcloud object without a orch_job_id set",
|
||||||
)
|
)
|
||||||
|
|
||||||
updates = self.obj_get_changes()
|
updates = self.obj_get_changes()
|
||||||
@ -63,8 +63,9 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise exceptions.ObjectActionError(
|
raise exceptions.ObjectActionError(
|
||||||
action="create",
|
action="create",
|
||||||
reason="cannot create a Subcloud object without a "
|
reason=(
|
||||||
"target_region_name set",
|
"cannot create a Subcloud object without a target_region_name set"
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
db_orch_request = db_api.orch_request_create(
|
db_orch_request = db_api.orch_request_create(
|
||||||
|
@ -46,7 +46,7 @@ class Resource(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise exceptions.ObjectActionError(
|
raise exceptions.ObjectActionError(
|
||||||
action="create",
|
action="create",
|
||||||
reason="cannot create a Resource object without a " "resource_type set",
|
reason="cannot create a Resource object without a resource_type set",
|
||||||
)
|
)
|
||||||
|
|
||||||
db_resource = db_api.resource_create(self._context, resource_type, updates)
|
db_resource = db_api.resource_create(self._context, resource_type, updates)
|
||||||
|
@ -56,7 +56,7 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise exceptions.ObjectActionError(
|
raise exceptions.ObjectActionError(
|
||||||
action="create",
|
action="create",
|
||||||
reason="cannot create a Subcloud object without a " "region_name set",
|
reason="cannot create a Subcloud object without a region_name set",
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
db_subcloud = db_api.subcloud_create(self._context, region_name, updates)
|
db_subcloud = db_api.subcloud_create(self._context, region_name, updates)
|
||||||
|
@ -9,12 +9,6 @@ modules = [
|
|||||||
"dcdbsync",
|
"dcdbsync",
|
||||||
"dcagent",
|
"dcagent",
|
||||||
"dcorch",
|
"dcorch",
|
||||||
"dcmanager/api",
|
|
||||||
"dcmanager/audit",
|
|
||||||
"dcmanager/common",
|
|
||||||
"dcmanager/db",
|
|
||||||
"dcmanager/orchestrator",
|
|
||||||
"dcmanager/tests",
|
|
||||||
"dcmanager",
|
"dcmanager",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -22,14 +16,9 @@ modules = [
|
|||||||
formatted_modules = [
|
formatted_modules = [
|
||||||
"dccommon",
|
"dccommon",
|
||||||
"dcdbsync",
|
"dcdbsync",
|
||||||
"dcorch",
|
|
||||||
"dcagent",
|
"dcagent",
|
||||||
"dcmanager/api",
|
"dcorch",
|
||||||
"dcmanager/audit",
|
"dcmanager",
|
||||||
"dcmanager/common",
|
|
||||||
"dcmanager/db",
|
|
||||||
"dcmanager/orchestrator",
|
|
||||||
"dcmanager/tests",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -25,6 +26,4 @@ except ImportError:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
# Danger - pbr requirement >= 2.0.0 not satisfied...
|
# Danger - pbr requirement >= 2.0.0 not satisfied...
|
||||||
setuptools.setup(
|
setuptools.setup(setup_requires=["pbr>=1.8.0"], pbr=True)
|
||||||
setup_requires=['pbr>=1.8.0'],
|
|
||||||
pbr=True)
|
|
||||||
|
Loading…
Reference in New Issue
Block a user