Apply black formatter to dcmanager/api

This commit applies the Black format to the `dcmanager/api`
files to ensure that it adheres to the Black code style guidelines.

Test Plan:
PASS: Success in stx-distcloud-tox-black

Story: 2011149
Task: 50444

Change-Id: Ib1af98da7b1fdd6a478b8c093dc7dd474d3bc5e6
Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
Hugo Brito 2024-06-28 17:03:01 -03:00
parent 5558d9a4ee
commit 9aa90f102e
36 changed files with 2465 additions and 2101 deletions

View File

@ -18,5 +18,18 @@
# - Because you must use a hash, you need to append to this list in a follow-up
# commit to the actual reformatting commit that you are trying to ignore.
# Enable pylint/pep8 violations for all Python files
3685223b28194754fef89229724d795c6862014f
a2b9bb7ab28a0da08dd4ba20dc97671eb10b6b50
766f0522956d9e3a5272cff4dec06008d791c129
4438b8fd5579f20ece09e670b712e3afd691bd79
# Format all Python files with Black formatter
69970aac60f91f1bc7cbc1323364a5a782756cfe
83dbf64acabff56f9dfc9d496a5eed66fcd41481
686abd1cbc75b5a27798f522c1e686f3c8bd7376
56e359bf8adfa871838194145b04c25bd43f8321
fb82532bbf0c91fbd6ba385b0cbab880ec1159d6
51bee5a605ebb016b7905b243ac9d3da86551df6
51b6e19a2cf572399dc394ff3336c3c384de010a
70fd84b263fef57d9589683ce22496b4cb55d65c

View File

@ -341,7 +341,7 @@ class SubcloudInstall(object):
)
update_iso_cmd += [
consts.GEN_ISO_OPTIONS[key],
("vlan" + "=" + vlan_inteface),
("vlan=" + vlan_inteface),
]
elif key == "bootstrap_interface" and "bootstrap_vlan" in values:
boot_interface = "%s.%s" % (

View File

@ -1,5 +1,5 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -32,29 +32,35 @@ from dcmanager.common import version
LOG = logging.getLogger(__name__)
common_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=8119,
help=_("The port to bind to")),
cfg.IntOpt('api_workers', default=2,
help=_("number of api workers")),
cfg.StrOpt('state_path',
default=os.path.join(os.path.dirname(__file__), '../'),
help='Top-level directory for maintaining dcmanager state'),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.StrOpt("bind_host", default="0.0.0.0", help=_("The host IP to bind to")),
cfg.IntOpt("bind_port", default=8119, help=_("The port to bind to")),
cfg.IntOpt("api_workers", default=2, help=_("number of api workers")),
cfg.StrOpt(
"state_path",
default=os.path.join(os.path.dirname(__file__), "../"),
help="Top-level directory for maintaining dcmanager state",
),
cfg.StrOpt(
"api_extensions_path", default="", help=_("The path for API extensions")
),
cfg.StrOpt(
"auth_strategy", default="keystone", help=_("The type of authentication to use")
),
cfg.BoolOpt("allow_bulk", default=True, help=_("Allow the usage of the bulk API")),
cfg.BoolOpt(
"allow_pagination", default=False, help=_("Allow the usage of the pagination")
),
cfg.BoolOpt(
"allow_sorting", default=False, help=_("Allow the usage of the sorting")
),
cfg.StrOpt(
"pagination_max_limit",
default="-1",
help=_(
"The maximum number of items returned in a single response, "
"value was 'infinite' or negative integer means no limit"
),
),
]
@ -66,9 +72,12 @@ def init(args, **kwargs):
# auth.register_conf_options(cfg.CONF)
logging.register_options(cfg.CONF)
cfg.CONF(args=args, project='dcmanager',
version='%%(prog)s %s' % version.version_info.release_string(),
**kwargs)
cfg.CONF(
args=args,
project="dcmanager",
version="%%(prog)s %s" % version.version_info.release_string(),
**kwargs
)
def setup_logging():
@ -76,9 +85,10 @@ def setup_logging():
product_name = "dcmanager"
logging.setup(cfg.CONF, product_name)
LOG.info("Logging enabled!")
LOG.info("%(prog)s version %(version)s",
{'prog': sys.argv[0],
'version': version.version_info.release_string()})
LOG.info(
"%(prog)s version %(version)s",
{"prog": sys.argv[0], "version": version.version_info.release_string()},
)
LOG.debug("command line: %s", " ".join(sys.argv))

View File

@ -30,20 +30,14 @@ def setup_app(*args, **kwargs):
opts = cfg.CONF.pecan
config = {
'server': {
'port': cfg.CONF.bind_port,
'host': cfg.CONF.bind_host
},
'app': {
'root': 'dcmanager.api.controllers.root.RootController',
'modules': ['dcmanager.api'],
"server": {"port": cfg.CONF.bind_port, "host": cfg.CONF.bind_host},
"app": {
"root": "dcmanager.api.controllers.root.RootController",
"modules": ["dcmanager.api"],
"debug": opts.debug,
"auth_enable": opts.auth_enable,
'errors': {
400: '/error',
'__force_dict__': True
}
}
"errors": {400: "/error", "__force_dict__": True},
},
}
pecan_config = pecan.configuration.conf_from_dict(config)
@ -55,11 +49,8 @@ def setup_app(*args, **kwargs):
debug=False,
wrap_app=_wrap_app,
force_canonical=False,
hooks=lambda: [
ctx.AuthHook(),
ctx.AuditLoggingHook()
],
guess_content_type_from_ext=True
hooks=lambda: [ctx.AuthHook(), ctx.AuditLoggingHook()],
guess_content_type_from_ext=True,
)
return app
@ -67,10 +58,10 @@ def setup_app(*args, **kwargs):
def _wrap_app(app):
app = request_id.RequestId(app)
if cfg.CONF.pecan.auth_enable and cfg.CONF.auth_strategy == 'keystone':
if cfg.CONF.pecan.auth_enable and cfg.CONF.auth_strategy == "keystone":
conf = dict(cfg.CONF.keystone_authtoken)
# Change auth decisions of requests to the app itself.
conf.update({'delay_auth_decision': True})
conf.update({"delay_auth_decision": True})
# NOTE: Policy enforcement works only if Keystone
# authentication is enabled. No support for other authentication
@ -86,7 +77,7 @@ _launcher = None
def serve(api_service, conf, workers=1):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
raise RuntimeError(_("serve() can only be called once"))
_launcher = service.launch(conf, api_service, workers=workers)

View File

@ -24,34 +24,35 @@ import dcmanager.common.context as k_context
def extract_context_from_environ():
context_paras = {'auth_token': 'HTTP_X_AUTH_TOKEN',
'user': 'HTTP_X_USER_ID',
'project': 'HTTP_X_TENANT_ID',
'user_name': 'HTTP_X_USER_NAME',
'tenant_name': 'HTTP_X_PROJECT_NAME',
'domain': 'HTTP_X_DOMAIN_ID',
'roles': 'HTTP_X_ROLE',
'user_domain': 'HTTP_X_USER_DOMAIN_ID',
'project_domain': 'HTTP_X_PROJECT_DOMAIN_ID',
'request_id': 'openstack.request_id'}
context_paras = {
"auth_token": "HTTP_X_AUTH_TOKEN",
"user": "HTTP_X_USER_ID",
"project": "HTTP_X_TENANT_ID",
"user_name": "HTTP_X_USER_NAME",
"tenant_name": "HTTP_X_PROJECT_NAME",
"domain": "HTTP_X_DOMAIN_ID",
"roles": "HTTP_X_ROLE",
"user_domain": "HTTP_X_USER_DOMAIN_ID",
"project_domain": "HTTP_X_PROJECT_DOMAIN_ID",
"request_id": "openstack.request_id",
}
environ = request.environ
for key, val in context_paras.items():
context_paras[key] = environ.get(val)
role = environ.get('HTTP_X_ROLE')
role = environ.get("HTTP_X_ROLE")
context_paras['is_admin'] = 'admin' in role.split(',')
context_paras["is_admin"] = "admin" in role.split(",")
return k_context.RequestContext(**context_paras)
def extract_credentials_for_policy():
context_paras = {'project_name': 'HTTP_X_PROJECT_NAME',
'roles': 'HTTP_X_ROLE'}
context_paras = {"project_name": "HTTP_X_PROJECT_NAME", "roles": "HTTP_X_ROLE"}
environ = request.environ
for key, val in context_paras.items():
context_paras[key] = environ.get(val)
context_paras['roles'] = context_paras['roles'].split(',')
context_paras["roles"] = context_paras["roles"].split(",")
return context_paras

View File

@ -23,16 +23,16 @@ from dcmanager.api.controllers.v1 import root as v1_root
class RootController(object):
@pecan.expose('json')
@pecan.expose("json")
def _lookup(self, version, *remainder):
version = str(version)
minor_version = version[-1]
major_version = version[1]
remainder = remainder + (minor_version,)
if major_version == '1':
if major_version == "1":
return v1_root.Controller(), remainder
@pecan.expose(generic=True, template='json')
@pecan.expose(generic=True, template="json")
def index(self):
return {
"versions": [
@ -41,19 +41,19 @@ class RootController(object):
"links": [
{
"rel": "self",
"href": pecan.request.application_url + "/v1.0/"
"href": pecan.request.application_url + "/v1.0/",
}
],
"id": "v1.0",
"updated": "2017-10-2"
"updated": "2017-10-2",
}
]
}
@index.when(method='POST')
@index.when(method='PUT')
@index.when(method='DELETE')
@index.when(method='HEAD')
@index.when(method='PATCH')
@index.when(method="POST")
@index.when(method="PUT")
@index.when(method="DELETE")
@index.when(method="HEAD")
@index.when(method="PATCH")
def not_supported(self):
pecan.abort(405)

View File

@ -28,13 +28,13 @@ LOG = logging.getLogger(__name__)
class SubcloudAlarmController(object):
VERSION_ALIASES = {
'Newton': '1.0',
"Newton": "1.0",
}
def __init__(self, *args, **kwargs):
super(SubcloudAlarmController, self).__init__(*args, **kwargs)
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@ -44,21 +44,24 @@ class SubcloudAlarmController(object):
context = restcomm.extract_context_from_environ()
alarms = db_api.subcloud_alarms_get_all(context)
for alarm in alarms:
alarm_dict = {'region_name': alarm['name'],
'uuid': alarm['uuid'],
'critical_alarms': alarm['critical_alarms'],
'major_alarms': alarm['major_alarms'],
'minor_alarms': alarm['minor_alarms'],
'warnings': alarm['warnings'],
'cloud_status': alarm['cloud_status']}
alarm_dict = {
"region_name": alarm["name"],
"uuid": alarm["uuid"],
"critical_alarms": alarm["critical_alarms"],
"major_alarms": alarm["major_alarms"],
"minor_alarms": alarm["minor_alarms"],
"warnings": alarm["warnings"],
"cloud_status": alarm["cloud_status"],
}
summary.append(alarm_dict)
return {'alarm_summary': summary}
return {"alarm_summary": summary}
@index.when(method='GET', template='json')
@index.when(method="GET", template="json")
def get(self):
"""Get List of alarm summarys
"""
policy.authorize(alarm_manager_policy.POLICY_ROOT % "get", {},
restcomm.extract_credentials_for_policy())
"""Get List of alarm summarys"""
policy.authorize(
alarm_manager_policy.POLICY_ROOT % "get",
{},
restcomm.extract_credentials_for_policy(),
)
return self._get_alarm_aggregates()

View File

@ -34,31 +34,30 @@ class NotificationsController(object):
super(NotificationsController, self).__init__()
self.audit_rpc_client = audit_rpc_client.ManagerAuditClient()
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self):
if 'events' not in request.json_body:
pecan.abort(httpclient.BAD_REQUEST,
"Missing required notification events")
if "events" not in request.json_body:
pecan.abort(httpclient.BAD_REQUEST, "Missing required notification events")
events = request.json_body['events']
if 'platform-upgrade-completed' in events:
events = request.json_body["events"]
if "platform-upgrade-completed" in events:
# We're being notified that a platform upgrade has completed,
# so we want to trigger a load audit of all subclouds on the
# next audit cycle.
context = restcomm.extract_context_from_environ()
self.audit_rpc_client.trigger_load_audit(context)
if 'k8s-upgrade-completed' in events:
if "k8s-upgrade-completed" in events:
# We're being notified that a kubernetes upgrade has completed,
# so we want to trigger a kubernetes audit of all subclouds on
# the next audit cycle.
context = restcomm.extract_context_from_environ()
self.audit_rpc_client.trigger_kubernetes_audit(context)
if 'kube-rootca-update-completed' in events:
if "kube-rootca-update-completed" in events:
# We're being notified that a kube rootca update has completed, so
# we want to trigger a kube rootca update audit of all subclouds on
# the next audit cycle.

View File

@ -17,8 +17,9 @@ from pecan import request
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcmanager.api.controllers import restcomm
from dcmanager.api.policies import peer_group_association as \
peer_group_association_policy
from dcmanager.api.policies import (
peer_group_association as peer_group_association_policy,
)
from dcmanager.api import policy
from dcmanager.common import consts
from dcmanager.common import exceptions as exception
@ -33,12 +34,13 @@ LOG = logging.getLogger(__name__)
MIN_PEER_GROUP_ASSOCIATION_PRIORITY = 1
MAX_PEER_GROUP_ASSOCIATION_PRIORITY = 65536
ASSOCIATION_SYNC_STATUS_LIST = \
[consts.ASSOCIATION_SYNC_STATUS_SYNCING,
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
consts.ASSOCIATION_SYNC_STATUS_FAILED,
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN]
ASSOCIATION_SYNC_STATUS_LIST = [
consts.ASSOCIATION_SYNC_STATUS_SYNCING,
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
consts.ASSOCIATION_SYNC_STATUS_FAILED,
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
]
class PeerGroupAssociationsController(restcomm.GenericPathController):
@ -47,7 +49,7 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
super(PeerGroupAssociationsController, self).__init__()
self.rpc_client = rpc_client.ManagerClient()
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@ -58,12 +60,13 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
for association in associations:
association_dict = db_api.peer_group_association_db_model_to_dict(
association)
association
)
# Remove the sync_message from the list response
association_dict.pop('sync-message', None)
association_dict.pop("sync-message", None)
association_list.append(association_dict)
result = {'peer_group_associations': association_list}
result = {"peer_group_associations": association_list}
return result
@staticmethod
@ -71,12 +74,12 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
try:
payload = json.loads(request.body)
except Exception:
error_msg = 'Request body is malformed.'
error_msg = "Request body is malformed."
LOG.exception(error_msg)
pecan.abort(400, _(error_msg))
if not isinstance(payload, dict):
pecan.abort(400, _('Invalid request body format'))
pecan.abort(400, _("Invalid request body format"))
return payload
def _validate_peer_group_leader_id(self, system_leader_id):
@ -84,33 +87,37 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
sysinv_client = SysinvClient(
dccommon_consts.DEFAULT_REGION_NAME,
ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
endpoint=ks_client.endpoint_cache.get_endpoint("sysinv"),
)
system = sysinv_client.get_system()
return True if system.uuid == system_leader_id else False
@index.when(method='GET', template='json')
@index.when(method="GET", template="json")
def get(self, association_id=None):
"""Get details about peer group association.
:param association_id: ID of peer group association
"""
policy.authorize(peer_group_association_policy.POLICY_ROOT % "get", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
peer_group_association_policy.POLICY_ROOT % "get",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if association_id is None:
# List of peer group association requested
return self._get_peer_group_association_list(context)
elif not association_id.isdigit():
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association ID must be an integer'))
pecan.abort(
httpclient.BAD_REQUEST,
_("Peer Group Association ID must be an integer"),
)
try:
association = db_api.peer_group_association_get(context,
association_id)
association = db_api.peer_group_association_get(context, association_id)
except exception.PeerGroupAssociationNotFound:
pecan.abort(httpclient.NOT_FOUND,
_('Peer Group Association not found'))
pecan.abort(httpclient.NOT_FOUND, _("Peer Group Association not found"))
return db_api.peer_group_association_db_model_to_dict(association)
@ -118,12 +125,15 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
try:
db_api.subcloud_peer_group_get(context, peer_group_id)
except exception.SubcloudPeerGroupNotFound:
LOG.debug("Subcloud Peer Group Not Found, peer group id: %s"
% peer_group_id)
LOG.debug(
"Subcloud Peer Group Not Found, peer group id: %s" % peer_group_id
)
return False
except Exception as e:
LOG.warning("Get Subcloud Peer Group failed: %s; peer_group_id: %s"
% (e, peer_group_id))
LOG.warning(
"Get Subcloud Peer Group failed: %s; peer_group_id: %s"
% (e, peer_group_id)
)
return False
return True
@ -131,12 +141,12 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
try:
db_api.system_peer_get(context, system_peer_id)
except exception.SystemPeerNotFound:
LOG.debug("System Peer Not Found, system peer id: %s"
% system_peer_id)
LOG.debug("System Peer Not Found, system peer id: %s" % system_peer_id)
return False
except Exception as e:
LOG.warning("Get System Peer failed: %s; system_peer_id: %s"
% (e, system_peer_id))
LOG.warning(
"Get System Peer failed: %s; system_peer_id: %s" % (e, system_peer_id)
)
return False
return True
@ -145,14 +155,17 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
# Check the value is an integer
val = int(peer_group_priority)
except ValueError:
LOG.debug("Peer Group Priority is not Integer: %s"
% peer_group_priority)
LOG.debug("Peer Group Priority is not Integer: %s" % peer_group_priority)
return False
# Less than min or greater than max priority is not supported.
if val < MIN_PEER_GROUP_ASSOCIATION_PRIORITY or \
val > MAX_PEER_GROUP_ASSOCIATION_PRIORITY:
LOG.debug("Invalid Peer Group Priority out of support range: %s"
% peer_group_priority)
if (
val < MIN_PEER_GROUP_ASSOCIATION_PRIORITY
or val > MAX_PEER_GROUP_ASSOCIATION_PRIORITY
):
LOG.debug(
"Invalid Peer Group Priority out of support range: %s"
% peer_group_priority
)
return False
return True
@ -162,80 +175,108 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
return False
return True
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self):
"""Create a new peer group association."""
policy.authorize(peer_group_association_policy.POLICY_ROOT %
"create", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
peer_group_association_policy.POLICY_ROOT % "create",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
payload = self._get_payload(request)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
# Validate payload
peer_group_id = payload.get('peer_group_id')
peer_group_id = payload.get("peer_group_id")
if not self._validate_peer_group_id(context, peer_group_id):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer_group_id'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer_group_id"))
system_peer_id = payload.get('system_peer_id')
system_peer_id = payload.get("system_peer_id")
if not self._validate_system_peer_id(context, system_peer_id):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid system_peer_id'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid system_peer_id"))
peer_group_priority = payload.get('peer_group_priority')
peer_group_priority = payload.get("peer_group_priority")
peer_group = db_api.subcloud_peer_group_get(context, peer_group_id)
if peer_group_priority is not None and not \
self._validate_peer_group_priority(peer_group_priority):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer_group_priority'))
if peer_group_priority is not None and not self._validate_peer_group_priority(
peer_group_priority
):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer_group_priority"))
if (peer_group.group_priority == consts.PEER_GROUP_PRIMARY_PRIORITY and
peer_group_priority is None) or (
peer_group.group_priority > consts.PEER_GROUP_PRIMARY_PRIORITY and
peer_group_priority is not None):
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association create is not allowed when '
'the subcloud peer group priority is greater than 0 '
'and it is required when the subcloud peer group '
'priority is 0.'))
if (
peer_group.group_priority == consts.PEER_GROUP_PRIMARY_PRIORITY
and peer_group_priority is None
) or (
peer_group.group_priority > consts.PEER_GROUP_PRIMARY_PRIORITY
and peer_group_priority is not None
):
pecan.abort(
httpclient.BAD_REQUEST,
_(
"Peer Group Association create is not allowed when the subcloud "
"peer group priority is greater than 0 and it is required when "
"the subcloud peer group priority is 0."
),
)
is_primary = peer_group.group_priority == consts.PEER_GROUP_PRIMARY_PRIORITY
# only one combination of peer_group_id + system_peer_id can exists
association = None
try:
association = db_api.\
peer_group_association_get_by_peer_group_and_system_peer_id(
context,
peer_group_id,
system_peer_id)
association = (
db_api.peer_group_association_get_by_peer_group_and_system_peer_id(
context, peer_group_id, system_peer_id
)
)
except exception.PeerGroupAssociationCombinationNotFound:
# This is a normal scenario, no need to log or raise an error
pass
except Exception as e:
LOG.warning("Peer Group Association get failed: %s;"
"peer_group_id: %s, system_peer_id: %s"
% (e, peer_group_id, system_peer_id))
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('peer_group_association_get_by_peer_group_and_'
'system_peer_id failed: %s' % e))
LOG.warning(
"Peer Group Association get failed: %s;"
"peer_group_id: %s, system_peer_id: %s"
% (e, peer_group_id, system_peer_id)
)
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_(
"peer_group_association_get_by_peer_group_and_"
"system_peer_id failed: %s" % e
),
)
if association:
LOG.warning("Failed to create Peer group association, association "
"with peer_group_id:[%s],system_peer_id:[%s] "
"already exists" % (peer_group_id, system_peer_id))
pecan.abort(httpclient.BAD_REQUEST,
_('A Peer group association with same peer_group_id, '
'system_peer_id already exists'))
LOG.warning(
"Failed to create Peer group association, association with "
"peer_group_id:[%s],system_peer_id:[%s] already exists"
% (peer_group_id, system_peer_id)
)
pecan.abort(
httpclient.BAD_REQUEST,
_(
"A Peer group association with same peer_group_id, "
"system_peer_id already exists"
),
)
# Create the peer group association
try:
association_type = consts.ASSOCIATION_TYPE_PRIMARY if is_primary \
association_type = (
consts.ASSOCIATION_TYPE_PRIMARY
if is_primary
else consts.ASSOCIATION_TYPE_NON_PRIMARY
)
association = db_api.peer_group_association_create(
context, peer_group_id, system_peer_id, peer_group_priority,
association_type, consts.ASSOCIATION_SYNC_STATUS_SYNCING)
context,
peer_group_id,
system_peer_id,
peer_group_priority,
association_type,
consts.ASSOCIATION_SYNC_STATUS_SYNCING,
)
if is_primary:
# Sync the subcloud peer group to peer site
@ -247,107 +288,133 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to create peer group association'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Unable to create peer group association"),
)
def _sync_association(self, context, association, is_non_primary):
if is_non_primary:
self.rpc_client.peer_monitor_notify(context)
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association sync is not allowed '
'when the association type is non-primary. But the '
'peer monitor notify was triggered.'))
pecan.abort(
httpclient.BAD_REQUEST,
_(
"Peer Group Association sync is not allowed when the association "
"type is non-primary. But the peer monitor notify was triggered."
),
)
else:
peer_group = db_api.subcloud_peer_group_get(
context, association.peer_group_id)
if not self._validate_peer_group_leader_id(peer_group.
system_leader_id):
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association sync is not allowed when '
'the subcloud peer group system_leader_id is not '
'the current system controller UUID.'))
context, association.peer_group_id
)
if not self._validate_peer_group_leader_id(peer_group.system_leader_id):
pecan.abort(
httpclient.BAD_REQUEST,
_(
"Peer Group Association sync is not allowed when "
"the subcloud peer group system_leader_id is not "
"the current system controller UUID."
),
)
try:
# Sync the subcloud peer group to peer site
self.rpc_client.sync_subcloud_peer_group(context,
association.id)
self.rpc_client.sync_subcloud_peer_group(context, association.id)
association = db_api.peer_group_association_update(
context, id=association.id,
context,
id=association.id,
sync_status=consts.ASSOCIATION_SYNC_STATUS_SYNCING,
sync_message='None')
return db_api.peer_group_association_db_model_to_dict(
association)
sync_message="None",
)
return db_api.peer_group_association_db_model_to_dict(association)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
# additional exceptions.
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to sync peer group association'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Unable to sync peer group association"),
)
def _update_association(self, context, association, is_non_primary):
payload = self._get_payload(request)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
peer_group_priority = payload.get('peer_group_priority')
sync_status = payload.get('sync_status')
peer_group_priority = payload.get("peer_group_priority")
sync_status = payload.get("sync_status")
# Check value is not None or empty before calling validate
if not (peer_group_priority is not None or sync_status):
pecan.abort(httpclient.BAD_REQUEST, _('nothing to update'))
pecan.abort(httpclient.BAD_REQUEST, _("nothing to update"))
elif peer_group_priority is not None and sync_status:
pecan.abort(httpclient.BAD_REQUEST,
_('peer_group_priority and sync_status cannot be '
'updated at the same time.'))
pecan.abort(
httpclient.BAD_REQUEST,
_(
"peer_group_priority and sync_status cannot be "
"updated at the same time."
),
)
if peer_group_priority is not None:
if not self._validate_peer_group_priority(peer_group_priority):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer_group_priority'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer_group_priority"))
if is_non_primary:
self.rpc_client.peer_monitor_notify(context)
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association peer_group_priority is '
'not allowed to update when the association type '
'is non-primary.'))
pecan.abort(
httpclient.BAD_REQUEST,
_(
"Peer Group Association peer_group_priority is not allowed to "
"update when the association type is non-primary."
),
)
else:
db_api.peer_group_association_update(
context, id=association.id,
peer_group_priority=peer_group_priority)
context, id=association.id, peer_group_priority=peer_group_priority
)
if sync_status:
if not self._validate_sync_status(sync_status):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid sync_status'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid sync_status"))
if not is_non_primary:
self.rpc_client.peer_monitor_notify(context)
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association sync_status is not '
'allowed to update when the association type is '
'primary.'))
pecan.abort(
httpclient.BAD_REQUEST,
_(
"Peer Group Association sync_status is not allowed to update "
"when the association type is primary."
),
)
else:
sync_message = 'Primary association sync to current site ' + \
'failed.' if sync_status == \
consts.ASSOCIATION_SYNC_STATUS_FAILED else 'None'
sync_message = (
"Primary association sync to current site failed."
if sync_status == consts.ASSOCIATION_SYNC_STATUS_FAILED
else "None"
)
association = db_api.peer_group_association_update(
context, id=association.id, sync_status=sync_status,
sync_message=sync_message)
context,
id=association.id,
sync_status=sync_status,
sync_message=sync_message,
)
self.rpc_client.peer_monitor_notify(context)
return db_api.peer_group_association_db_model_to_dict(
association)
return db_api.peer_group_association_db_model_to_dict(association)
try:
# Ask dcmanager-manager to update the subcloud peer group priority
# to peer site. It will do the real work...
return self.rpc_client.sync_subcloud_peer_group_only(
context, association.id)
context, association.id
)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
# additional exceptions.
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to update peer group association'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Unable to update peer group association"),
)
@index.when(method='PATCH', template='json')
@index.when(method="PATCH", template="json")
def patch(self, association_id, sync=False):
"""Update a peer group association.
@ -355,70 +422,78 @@ class PeerGroupAssociationsController(restcomm.GenericPathController):
:param sync: sync action that sync the peer group
"""
policy.authorize(peer_group_association_policy.POLICY_ROOT % "modify",
{}, restcomm.extract_credentials_for_policy())
policy.authorize(
peer_group_association_policy.POLICY_ROOT % "modify",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if association_id is None:
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association ID required'))
pecan.abort(httpclient.BAD_REQUEST, _("Peer Group Association ID required"))
elif not association_id.isdigit():
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association ID must be an integer'))
pecan.abort(
httpclient.BAD_REQUEST,
_("Peer Group Association ID must be an integer"),
)
try:
association = db_api.peer_group_association_get(context,
association_id)
association = db_api.peer_group_association_get(context, association_id)
except exception.PeerGroupAssociationNotFound:
pecan.abort(httpclient.NOT_FOUND,
_('Peer Group Association not found'))
pecan.abort(httpclient.NOT_FOUND, _("Peer Group Association not found"))
is_non_primary = association.association_type == consts.\
ASSOCIATION_TYPE_NON_PRIMARY
is_non_primary = (
association.association_type == consts.ASSOCIATION_TYPE_NON_PRIMARY
)
if sync:
return self._sync_association(context, association, is_non_primary)
else:
return self._update_association(context, association, is_non_primary)
@index.when(method='delete', template='json')
@index.when(method="delete", template="json")
def delete(self, association_id):
"""Delete the peer group association.
:param association_id: ID of peer group association to delete
"""
policy.authorize(peer_group_association_policy.POLICY_ROOT % "delete",
{}, restcomm.extract_credentials_for_policy())
policy.authorize(
peer_group_association_policy.POLICY_ROOT % "delete",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if association_id is None:
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association ID required'))
pecan.abort(httpclient.BAD_REQUEST, _("Peer Group Association ID required"))
# Validate the ID
if not association_id.isdigit():
pecan.abort(httpclient.BAD_REQUEST,
_('Peer Group Association ID must be an integer'))
pecan.abort(
httpclient.BAD_REQUEST,
_("Peer Group Association ID must be an integer"),
)
try:
association = db_api.peer_group_association_get(context,
association_id)
is_non_primary = association.association_type == consts.\
ASSOCIATION_TYPE_NON_PRIMARY
association = db_api.peer_group_association_get(context, association_id)
is_non_primary = (
association.association_type == consts.ASSOCIATION_TYPE_NON_PRIMARY
)
if is_non_primary:
result = db_api.peer_group_association_destroy(context,
association_id)
result = db_api.peer_group_association_destroy(context, association_id)
self.rpc_client.peer_monitor_notify(context)
return result
else:
# Ask system-peer-manager to delete the association.
# It will do all the real work...
return self.rpc_client.delete_peer_group_association(
context, association_id)
context, association_id
)
except exception.PeerGroupAssociationNotFound:
pecan.abort(httpclient.NOT_FOUND,
_('Peer Group Association not found'))
pecan.abort(httpclient.NOT_FOUND, _("Peer Group Association not found"))
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to delete peer group association'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Unable to delete peer group association"),
)

View File

@ -13,8 +13,9 @@ import pecan
from dcmanager.api.controllers import restcomm
from dcmanager.api.controllers.v1.subclouds import SubcloudsController
from dcmanager.api.policies import phased_subcloud_deploy as \
phased_subcloud_deploy_policy
from dcmanager.api.policies import (
phased_subcloud_deploy as phased_subcloud_deploy_policy,
)
from dcmanager.api import policy
from dcmanager.common import consts
from dcmanager.common.context import RequestContext
@ -27,7 +28,7 @@ from dcmanager.db.sqlalchemy import models
from dcmanager.rpc import client as rpc_client
LOG = logging.getLogger(__name__)
LOCK_NAME = 'PhasedSubcloudDeployController'
LOCK_NAME = "PhasedSubcloudDeployController"
INSTALL = consts.DEPLOY_PHASE_INSTALL
BOOTSTRAP = consts.DEPLOY_PHASE_BOOTSTRAP
@ -39,41 +40,26 @@ ENROLL = consts.DEPLOY_PHASE_ENROLL
SUBCLOUD_CREATE_REQUIRED_PARAMETERS = (
consts.BOOTSTRAP_VALUES,
consts.BOOTSTRAP_ADDRESS
consts.BOOTSTRAP_ADDRESS,
)
# The consts.DEPLOY_CONFIG is missing here because it's handled differently
# by the upload_deploy_config_file() function
SUBCLOUD_CREATE_GET_FILE_CONTENTS = (
consts.BOOTSTRAP_VALUES,
consts.INSTALL_VALUES,
)
SUBCLOUD_INSTALL_GET_FILE_CONTENTS = (
consts.INSTALL_VALUES,
)
SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS = (
consts.BOOTSTRAP_VALUES,
)
SUBCLOUD_ENROLL_GET_FILE_CONTENTS = (
consts.BOOTSTRAP_VALUES,
consts.INSTALL_VALUES
)
SUBCLOUD_CONFIG_GET_FILE_CONTENTS = (
consts.DEPLOY_CONFIG,
)
SUBCLOUD_INSTALL_GET_FILE_CONTENTS = (consts.INSTALL_VALUES,)
SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS = (consts.BOOTSTRAP_VALUES,)
SUBCLOUD_ENROLL_GET_FILE_CONTENTS = (consts.BOOTSTRAP_VALUES, consts.INSTALL_VALUES)
SUBCLOUD_CONFIG_GET_FILE_CONTENTS = (consts.DEPLOY_CONFIG,)
VALID_STATES_FOR_DEPLOY_INSTALL = (
consts.DEPLOY_STATE_CREATED,
consts.DEPLOY_STATE_PRE_INSTALL_FAILED,
consts.DEPLOY_STATE_INSTALL_FAILED,
consts.DEPLOY_STATE_INSTALLED,
consts.DEPLOY_STATE_INSTALL_ABORTED
consts.DEPLOY_STATE_INSTALL_ABORTED,
)
VALID_STATES_FOR_DEPLOY_BOOTSTRAP = [
consts.DEPLOY_STATE_INSTALLED,
consts.DEPLOY_STATE_PRE_BOOTSTRAP_FAILED,
@ -82,44 +68,37 @@ VALID_STATES_FOR_DEPLOY_BOOTSTRAP = [
consts.DEPLOY_STATE_BOOTSTRAPPED,
# The subcloud can be installed manually (without remote install) so we need
# to allow the bootstrap operation when the state == DEPLOY_STATE_CREATED
consts.DEPLOY_STATE_CREATED
consts.DEPLOY_STATE_CREATED,
]
VALID_STATES_FOR_DEPLOY_CONFIG = (
consts.DEPLOY_STATE_DONE,
consts.DEPLOY_STATE_PRE_CONFIG_FAILED,
consts.DEPLOY_STATE_CONFIG_FAILED,
consts.DEPLOY_STATE_BOOTSTRAPPED,
consts.DEPLOY_STATE_CONFIG_ABORTED
consts.DEPLOY_STATE_CONFIG_ABORTED,
)
VALID_STATES_FOR_DEPLOY_ABORT = (
consts.DEPLOY_STATE_INSTALLING,
consts.DEPLOY_STATE_BOOTSTRAPPING,
consts.DEPLOY_STATE_CONFIGURING
consts.DEPLOY_STATE_CONFIGURING,
)
VALID_STATES_FOR_DEPLOY_ENROLL = (
consts.DEPLOY_STATE_CREATED,
consts.DEPLOY_STATE_ENROLL_FAILED,
consts.DEPLOY_STATE_ENROLLED,
consts.DEPLOY_STATE_PRE_ENROLL,
consts.DEPLOY_STATE_INIT_ENROLL_FAILED
consts.DEPLOY_STATE_INIT_ENROLL_FAILED,
)
FILES_FOR_RESUME_INSTALL = \
SUBCLOUD_INSTALL_GET_FILE_CONTENTS + \
SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS + \
SUBCLOUD_CONFIG_GET_FILE_CONTENTS
FILES_FOR_RESUME_BOOTSTRAP = \
SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS + \
SUBCLOUD_CONFIG_GET_FILE_CONTENTS
FILES_FOR_RESUME_INSTALL = (
SUBCLOUD_INSTALL_GET_FILE_CONTENTS
+ SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS
+ SUBCLOUD_CONFIG_GET_FILE_CONTENTS
)
FILES_FOR_RESUME_BOOTSTRAP = (
SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS + SUBCLOUD_CONFIG_GET_FILE_CONTENTS
)
FILES_FOR_RESUME_CONFIG = SUBCLOUD_CONFIG_GET_FILE_CONTENTS
RESUMABLE_STATES = {
consts.DEPLOY_STATE_CREATED: [INSTALL, BOOTSTRAP, CONFIG],
consts.DEPLOY_STATE_INSTALLED: [BOOTSTRAP, CONFIG],
@ -132,21 +111,18 @@ RESUMABLE_STATES = {
consts.DEPLOY_STATE_BOOTSTRAP_ABORTED: [BOOTSTRAP, CONFIG],
consts.DEPLOY_STATE_PRE_CONFIG_FAILED: [CONFIG],
consts.DEPLOY_STATE_CONFIG_FAILED: [CONFIG],
consts.DEPLOY_STATE_CONFIG_ABORTED: [CONFIG]
consts.DEPLOY_STATE_CONFIG_ABORTED: [CONFIG],
}
DEPLOY_PHASES = [INSTALL, BOOTSTRAP, CONFIG]
FILES_MAPPING = {
INSTALL: SUBCLOUD_INSTALL_GET_FILE_CONTENTS,
BOOTSTRAP: SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS,
CONFIG: SUBCLOUD_CONFIG_GET_FILE_CONTENTS
CONFIG: SUBCLOUD_CONFIG_GET_FILE_CONTENTS,
}
RESUME_PREP_UPDATE_STATUS = {
INSTALL: consts.DEPLOY_STATE_PRE_INSTALL,
BOOTSTRAP: consts.DEPLOY_STATE_PRE_BOOTSTRAP,
CONFIG: consts.DEPLOY_STATE_PRE_CONFIG
CONFIG: consts.DEPLOY_STATE_PRE_CONFIG,
}
@ -157,7 +133,7 @@ def get_create_payload(request: pecan.Request) -> dict:
if f in request.POST:
file_item = request.POST[f]
file_item.file.seek(0, os.SEEK_SET)
data = utils.yaml_safe_load(file_item.file.read().decode('utf8'), f)
data = utils.yaml_safe_load(file_item.file.read().decode("utf8"), f)
if f == consts.BOOTSTRAP_VALUES:
payload.update(data)
else:
@ -175,10 +151,14 @@ class PhasedSubcloudDeployController(object):
self.dcmanager_rpc_client = rpc_client.ManagerClient()
def _deploy_create(self, context: RequestContext, request: pecan.Request):
policy.authorize(phased_subcloud_deploy_policy.POLICY_ROOT % "create",
{}, restcomm.extract_credentials_for_policy())
policy.authorize(
phased_subcloud_deploy_policy.POLICY_ROOT % "create",
{},
restcomm.extract_credentials_for_policy(),
)
psd_common.check_required_parameters(
request, SUBCLOUD_CREATE_REQUIRED_PARAMETERS)
request, SUBCLOUD_CREATE_REQUIRED_PARAMETERS
)
payload = get_create_payload(request)
@ -193,43 +173,54 @@ class PhasedSubcloudDeployController(object):
# Ask dcmanager-manager to create the subcloud.
# It will do all the real work...
subcloud_dict = self.dcmanager_rpc_client.subcloud_deploy_create(
context, subcloud.id, payload)
context, subcloud.id, payload
)
return subcloud_dict
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception:
LOG.exception("Unable to create subcloud %s" % payload.get('name'))
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to create subcloud'))
LOG.exception("Unable to create subcloud %s" % payload.get("name"))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Unable to create subcloud")
)
def _deploy_install(self, context: RequestContext,
request: pecan.Request, subcloud):
def _deploy_install(
self, context: RequestContext, request: pecan.Request, subcloud
):
payload = psd_common.get_request_data(
request, subcloud, SUBCLOUD_INSTALL_GET_FILE_CONTENTS)
request, subcloud, SUBCLOUD_INSTALL_GET_FILE_CONTENTS
)
if not payload:
pecan.abort(400, _('Body required'))
pecan.abort(400, _("Body required"))
SubcloudsController.validate_software_deploy_state()
if subcloud.deploy_status not in VALID_STATES_FOR_DEPLOY_INSTALL:
allowed_states_str = ', '.join(VALID_STATES_FOR_DEPLOY_INSTALL)
pecan.abort(400, _('Subcloud deploy status must be either: %s')
% allowed_states_str)
allowed_states_str = ", ".join(VALID_STATES_FOR_DEPLOY_INSTALL)
pecan.abort(
400, _("Subcloud deploy status must be either: %s") % allowed_states_str
)
initial_deployment = psd_common.is_initial_deployment(subcloud.name)
if not initial_deployment:
pecan.abort(400, _('The deploy install command can only be used '
'during initial deployment.'))
pecan.abort(
400,
_(
"The deploy install command can only be used "
"during initial deployment."
),
)
unvalidated_sw_version = payload.get('release', subcloud.software_version)
unvalidated_sw_version = payload.get("release", subcloud.software_version)
# get_sw_version will simply return back
# the passed unvalidated_sw_version after validating it.
payload['software_version'] = utils.get_sw_version(unvalidated_sw_version)
payload["software_version"] = utils.get_sw_version(unvalidated_sw_version)
psd_common.populate_payload_with_pre_existing_data(
payload, subcloud, SUBCLOUD_INSTALL_GET_FILE_CONTENTS)
payload, subcloud, SUBCLOUD_INSTALL_GET_FILE_CONTENTS
)
psd_common.pre_deploy_install(payload, subcloud)
@ -238,123 +229,140 @@ class PhasedSubcloudDeployController(object):
# version. Update the deploy status as pre-install.
self.dcmanager_rpc_client.subcloud_deploy_install(
context, subcloud.id, payload, initial_deployment=True)
context, subcloud.id, payload, initial_deployment=True
)
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
subcloud_dict['deploy-status'] = consts.DEPLOY_STATE_PRE_INSTALL
subcloud_dict['software-version'] = payload['software_version']
subcloud_dict["deploy-status"] = consts.DEPLOY_STATE_PRE_INSTALL
subcloud_dict["software-version"] = payload["software_version"]
return subcloud_dict
except RemoteError as e:
pecan.abort(422, e.value)
except Exception:
LOG.exception("Unable to install subcloud %s" % subcloud.name)
pecan.abort(500, _('Unable to install subcloud'))
pecan.abort(500, _("Unable to install subcloud"))
def _deploy_bootstrap(self, context: RequestContext,
request: pecan.Request,
subcloud: models.Subcloud):
def _deploy_bootstrap(
self, context: RequestContext, request: pecan.Request, subcloud: models.Subcloud
):
if subcloud.deploy_status not in VALID_STATES_FOR_DEPLOY_BOOTSTRAP:
valid_states_str = ', '.join(VALID_STATES_FOR_DEPLOY_BOOTSTRAP)
pecan.abort(400, _('Subcloud deploy status must be either: %s')
% valid_states_str)
valid_states_str = ", ".join(VALID_STATES_FOR_DEPLOY_BOOTSTRAP)
pecan.abort(
400, _("Subcloud deploy status must be either: %s") % valid_states_str
)
has_bootstrap_values = consts.BOOTSTRAP_VALUES in request.POST
payload = psd_common.get_request_data(
request, subcloud, SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS)
request, subcloud, SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS
)
# Try to load the existing override values
override_file = psd_common.get_config_file_path(subcloud.name)
if os.path.exists(override_file):
if not has_bootstrap_values:
psd_common.populate_payload_with_pre_existing_data(
payload, subcloud, SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS)
payload, subcloud, SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS
)
elif not has_bootstrap_values:
msg = _("Required bootstrap-values file was not provided and it was"
" not previously available at %s") % (override_file)
msg = _(
"Required bootstrap-values file was not provided and it was "
"not previously available at %s"
) % (override_file)
pecan.abort(400, msg)
payload['software_version'] = subcloud.software_version
payload["software_version"] = subcloud.software_version
psd_common.pre_deploy_bootstrap(context, payload, subcloud,
has_bootstrap_values)
psd_common.pre_deploy_bootstrap(
context, payload, subcloud, has_bootstrap_values
)
try:
# Ask dcmanager-manager to bootstrap the subcloud.
self.dcmanager_rpc_client.subcloud_deploy_bootstrap(
context, subcloud.id, payload, initial_deployment=True)
context, subcloud.id, payload, initial_deployment=True
)
# Change the response to correctly display the values
# that will be updated on the manager.
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
subcloud_dict['deploy-status'] = consts.DEPLOY_STATE_PRE_BOOTSTRAP
subcloud_dict['description'] = payload.get("description",
subcloud.description)
subcloud_dict['location'] = payload.get("location", subcloud.location)
subcloud_dict['management-subnet'] = utils.get_management_subnet(payload)
subcloud_dict['management-gateway-ip'] = \
subcloud_dict["deploy-status"] = consts.DEPLOY_STATE_PRE_BOOTSTRAP
subcloud_dict["description"] = payload.get(
"description", subcloud.description
)
subcloud_dict["location"] = payload.get("location", subcloud.location)
subcloud_dict["management-subnet"] = utils.get_management_subnet(payload)
subcloud_dict["management-gateway-ip"] = (
utils.get_management_gateway_address(payload)
subcloud_dict['management-start-ip'] = \
utils.get_management_start_address(payload)
subcloud_dict['management-end-ip'] = \
utils.get_management_end_address(payload)
subcloud_dict['systemcontroller-gateway-ip'] = payload.get(
"systemcontroller_gateway_address",
subcloud.systemcontroller_gateway_ip
)
subcloud_dict["management-start-ip"] = utils.get_management_start_address(
payload
)
subcloud_dict["management-end-ip"] = utils.get_management_end_address(
payload
)
subcloud_dict["systemcontroller-gateway-ip"] = payload.get(
"systemcontroller_gateway_address", subcloud.systemcontroller_gateway_ip
)
return subcloud_dict
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception:
LOG.exception("Unable to bootstrap subcloud %s" %
payload.get('name'))
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to bootstrap subcloud'))
LOG.exception("Unable to bootstrap subcloud %s" % payload.get("name"))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Unable to bootstrap subcloud")
)
def _deploy_config(self, context: RequestContext,
request: pecan.Request, subcloud):
def _deploy_config(self, context: RequestContext, request: pecan.Request, subcloud):
payload = psd_common.get_request_data(
request, subcloud, SUBCLOUD_CONFIG_GET_FILE_CONTENTS)
request, subcloud, SUBCLOUD_CONFIG_GET_FILE_CONTENTS
)
if not payload:
pecan.abort(400, _('Body required'))
pecan.abort(400, _("Body required"))
if subcloud.deploy_status not in VALID_STATES_FOR_DEPLOY_CONFIG:
allowed_states_str = ', '.join(VALID_STATES_FOR_DEPLOY_CONFIG)
pecan.abort(400, _('Subcloud deploy status must be %s') %
allowed_states_str)
allowed_states_str = ", ".join(VALID_STATES_FOR_DEPLOY_CONFIG)
pecan.abort(
400, _("Subcloud deploy status must be %s") % allowed_states_str
)
if subcloud.prestage_status in consts.STATES_FOR_ONGOING_PRESTAGE:
pecan.abort(400, _('Subcloud prestage is ongoing %s') %
subcloud.prestage_status)
pecan.abort(
400, _("Subcloud prestage is ongoing %s") % subcloud.prestage_status
)
# If the subcloud belongs to a peer group, ensure that
# it's not being configured in a secondary site.
if subcloud.peer_group_id is not None:
peer_group = utils.subcloud_peer_group_get_by_ref(
context, str(subcloud.peer_group_id))
context, str(subcloud.peer_group_id)
)
if peer_group is not None:
if peer_group.group_priority != consts.PEER_GROUP_PRIMARY_PRIORITY:
pecan.abort(400,
_('Subcloud can only be configured in'
' its primary site.'))
pecan.abort(
400,
_("Subcloud can only be configured in its primary site."),
)
psd_common.populate_payload_with_pre_existing_data(
payload, subcloud, SUBCLOUD_CONFIG_GET_FILE_CONTENTS)
payload, subcloud, SUBCLOUD_CONFIG_GET_FILE_CONTENTS
)
psd_common.pre_deploy_config(payload, subcloud)
try:
self.dcmanager_rpc_client.subcloud_deploy_config(
context, subcloud.id, payload, initial_deployment=True)
context, subcloud.id, payload, initial_deployment=True
)
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
subcloud_dict['deploy-status'] = consts.DEPLOY_STATE_PRE_CONFIG
subcloud_dict["deploy-status"] = consts.DEPLOY_STATE_PRE_CONFIG
return subcloud_dict
except RemoteError as e:
pecan.abort(422, e.value)
except Exception:
LOG.exception("Unable to configure subcloud %s" % subcloud.name)
pecan.abort(500, _('Unable to configure subcloud'))
pecan.abort(500, _("Unable to configure subcloud"))
def _deploy_complete(self, context: RequestContext, subcloud):
@ -362,67 +370,81 @@ class PhasedSubcloudDeployController(object):
# is consts.DEPLOY_STATE_BOOTSTRAPPED because the user could have
# configured the subcloud manually
if subcloud.deploy_status != consts.DEPLOY_STATE_BOOTSTRAPPED:
pecan.abort(400, _('Subcloud deploy can only be completed when'
' its deploy status is: %s')
% consts.DEPLOY_STATE_BOOTSTRAPPED)
pecan.abort(
400,
_(
"Subcloud deploy can only be completed when "
"its deploy status is: %s"
)
% consts.DEPLOY_STATE_BOOTSTRAPPED,
)
try:
# Ask dcmanager-manager to complete the subcloud deployment
subcloud = self.dcmanager_rpc_client.subcloud_deploy_complete(
context, subcloud.id)
context, subcloud.id
)
return subcloud
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception:
LOG.exception("Unable to complete subcloud %s deployment" %
subcloud.name)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to complete subcloud deployment'))
LOG.exception("Unable to complete subcloud %s deployment" % subcloud.name)
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Unable to complete subcloud deployment"),
)
def _deploy_abort(self, context, subcloud):
if subcloud.deploy_status not in VALID_STATES_FOR_DEPLOY_ABORT:
allowed_states_str = ', '.join(VALID_STATES_FOR_DEPLOY_ABORT)
pecan.abort(400, _('Subcloud deploy status must be in one '
'of the following states: %s')
% allowed_states_str)
allowed_states_str = ", ".join(VALID_STATES_FOR_DEPLOY_ABORT)
pecan.abort(
400,
_("Subcloud deploy status must be in one of the following states: %s")
% allowed_states_str,
)
initial_deployment = psd_common.is_initial_deployment(subcloud.name)
if not initial_deployment:
pecan.abort(400, _('The subcloud can only be aborted during '
'initial deployment.'))
pecan.abort(
400, _("The subcloud can only be aborted during initial deployment.")
)
try:
self.dcmanager_rpc_client.subcloud_deploy_abort(
context, subcloud.id, subcloud.deploy_status)
context, subcloud.id, subcloud.deploy_status
)
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
subcloud_dict['deploy-status'] = \
utils.ABORT_UPDATE_STATUS[subcloud.deploy_status]
subcloud_dict["deploy-status"] = utils.ABORT_UPDATE_STATUS[
subcloud.deploy_status
]
return subcloud_dict
except RemoteError as e:
pecan.abort(422, e.value)
except Exception:
LOG.exception("Unable to abort subcloud %s deployment" % subcloud.name)
pecan.abort(500, _('Unable to abort subcloud deployment'))
pecan.abort(500, _("Unable to abort subcloud deployment"))
def _deploy_resume(self, context: RequestContext,
request: pecan.Request, subcloud):
def _deploy_resume(self, context: RequestContext, request: pecan.Request, subcloud):
if subcloud.deploy_status not in RESUMABLE_STATES:
allowed_states_str = ', '.join(RESUMABLE_STATES)
pecan.abort(400, _('Subcloud deploy status must be either: %s')
% allowed_states_str)
allowed_states_str = ", ".join(RESUMABLE_STATES)
pecan.abort(
400, _("Subcloud deploy status must be either: %s") % allowed_states_str
)
initial_deployment = psd_common.is_initial_deployment(subcloud.name)
if not initial_deployment:
pecan.abort(400, _('The subcloud can only be resumed during '
'initial deployment.'))
pecan.abort(
400, _("The subcloud can only be resumed during initial deployment.")
)
# Since both install and config are optional phases,
# it's necessary to check if they should be executed
config_file = psd_common.get_config_file_path(subcloud.name,
consts.DEPLOY_CONFIG)
config_file = psd_common.get_config_file_path(
subcloud.name, consts.DEPLOY_CONFIG
)
has_original_install_values = subcloud.data_install
has_original_config_values = os.path.exists(config_file)
has_new_install_values = consts.INSTALL_VALUES in request.POST
@ -433,11 +455,15 @@ class PhasedSubcloudDeployController(object):
base_deploy_states = RESUMABLE_STATES[subcloud.deploy_status]
if base_deploy_states == [CONFIG] and not has_config_values:
msg = _("Only deploy phase left is deploy config. "
msg = (
_(
"Only deploy phase left is deploy config. "
"Required %s file was not provided and it was not "
"previously available. If manually configuring the "
"subcloud, please run 'dcmanager subcloud deploy "
"complete'") % consts.DEPLOY_CONFIG
"subcloud, please run 'dcmanager subcloud deploy complete'"
)
% consts.DEPLOY_CONFIG
)
pecan.abort(400, msg)
# Since the subcloud can be installed manually and the config is optional,
@ -475,126 +501,138 @@ class PhasedSubcloudDeployController(object):
# of the pending deploy states
if INSTALL in deploy_states_to_run:
SubcloudsController.validate_software_deploy_state()
unvalidated_sw_version = \
payload.get('release', subcloud.software_version)
unvalidated_sw_version = payload.get("release", subcloud.software_version)
else:
LOG.debug(
'Disregarding release parameter for %s as installation is complete.'
"Disregarding release parameter for %s as installation is complete."
% subcloud.name
)
unvalidated_sw_version = subcloud.software_version
# get_sw_version will simply return back the passed
# unvalidated_sw_version after validating it.
payload['software_version'] = utils.get_sw_version(unvalidated_sw_version)
payload["software_version"] = utils.get_sw_version(unvalidated_sw_version)
# Need to remove bootstrap_values from the list of files to populate
# pre existing data so it does not overwrite newly loaded values
if has_bootstrap_values:
files_for_resume = [f for f in files_for_resume if f
not in FILES_MAPPING[BOOTSTRAP]]
files_for_resume = [
f for f in files_for_resume if f not in FILES_MAPPING[BOOTSTRAP]
]
psd_common.populate_payload_with_pre_existing_data(
payload, subcloud, files_for_resume)
payload, subcloud, files_for_resume
)
psd_common.validate_sysadmin_password(payload)
for state in deploy_states_to_run:
if state == INSTALL:
psd_common.pre_deploy_install(payload, validate_password=False)
elif state == BOOTSTRAP:
psd_common.pre_deploy_bootstrap(context, payload, subcloud,
has_bootstrap_values,
validate_password=False)
psd_common.pre_deploy_bootstrap(
context,
payload,
subcloud,
has_bootstrap_values,
validate_password=False,
)
elif state == CONFIG:
psd_common.pre_deploy_config(payload, subcloud,
validate_password=False)
psd_common.pre_deploy_config(payload, subcloud, validate_password=False)
try:
self.dcmanager_rpc_client.subcloud_deploy_resume(
context, subcloud.id, subcloud.name, payload, deploy_states_to_run)
context, subcloud.id, subcloud.name, payload, deploy_states_to_run
)
# Change the response to correctly display the values
# that will be updated on the manager.
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
next_deploy_phase = RESUMABLE_STATES[subcloud.deploy_status][0]
next_deploy_state = RESUME_PREP_UPDATE_STATUS[next_deploy_phase]
subcloud_dict['deploy-status'] = next_deploy_state
subcloud_dict['software-version'] = payload['software_version']
subcloud_dict['description'] = payload.get("description",
subcloud.description)
subcloud_dict['location'] = payload.get("location", subcloud.location)
subcloud_dict['management-subnet'] = utils.get_management_subnet(payload)
subcloud_dict['management-gateway-ip'] = \
subcloud_dict["deploy-status"] = next_deploy_state
subcloud_dict["software-version"] = payload["software_version"]
subcloud_dict["description"] = payload.get(
"description", subcloud.description
)
subcloud_dict["location"] = payload.get("location", subcloud.location)
subcloud_dict["management-subnet"] = utils.get_management_subnet(payload)
subcloud_dict["management-gateway-ip"] = (
utils.get_management_gateway_address(payload)
subcloud_dict['management-start-ip'] = \
utils.get_management_start_address(payload)
subcloud_dict['management-end-ip'] = \
utils.get_management_end_address(payload)
subcloud_dict['systemcontroller-gateway-ip'] = payload.get(
"systemcontroller_gateway_address",
subcloud.systemcontroller_gateway_ip
)
subcloud_dict["management-start-ip"] = utils.get_management_start_address(
payload
)
subcloud_dict["management-end-ip"] = utils.get_management_end_address(
payload
)
subcloud_dict["systemcontroller-gateway-ip"] = payload.get(
"systemcontroller_gateway_address", subcloud.systemcontroller_gateway_ip
)
return subcloud_dict
except RemoteError as e:
pecan.abort(422, e.value)
except Exception:
LOG.exception("Unable to resume subcloud %s deployment" % subcloud.name)
pecan.abort(500, _('Unable to resume subcloud deployment'))
pecan.abort(500, _("Unable to resume subcloud deployment"))
def _deploy_enroll(self, context: RequestContext,
request: pecan.Request, subcloud: models.Subcloud):
def _deploy_enroll(
self, context: RequestContext, request: pecan.Request, subcloud: models.Subcloud
):
if subcloud.deploy_status not in VALID_STATES_FOR_DEPLOY_ENROLL:
valid_states_str = ', '.join(VALID_STATES_FOR_DEPLOY_ENROLL)
msg = f'Subcloud deploy status must be either: {valid_states_str}'
valid_states_str = ", ".join(VALID_STATES_FOR_DEPLOY_ENROLL)
msg = f"Subcloud deploy status must be either: {valid_states_str}"
pecan.abort(400, _(msg))
has_bootstrap_values = consts.BOOTSTRAP_VALUES in request.POST
payload = psd_common.get_request_data(
request, subcloud, SUBCLOUD_ENROLL_GET_FILE_CONTENTS)
request, subcloud, SUBCLOUD_ENROLL_GET_FILE_CONTENTS
)
# Try to load the existing override values
override_file = psd_common.get_config_file_path(subcloud.name)
if os.path.exists(override_file):
if not has_bootstrap_values:
psd_common.populate_payload_with_pre_existing_data(
payload, subcloud, SUBCLOUD_ENROLL_GET_FILE_CONTENTS)
payload, subcloud, SUBCLOUD_ENROLL_GET_FILE_CONTENTS
)
elif not has_bootstrap_values:
msg = ("Required bootstrap-values file was not provided and it was "
f"not previously available at {override_file}")
msg = (
"Required bootstrap-values file was not provided and it was "
f"not previously available at {override_file}"
)
pecan.abort(400, _(msg))
psd_common.validate_enroll_parameter(payload)
payload['software_version'] = subcloud.software_version
payload["software_version"] = subcloud.software_version
# Use bootstrap file verification
psd_common.pre_deploy_bootstrap(context, payload, subcloud,
has_bootstrap_values)
psd_common.pre_deploy_bootstrap(
context, payload, subcloud, has_bootstrap_values
)
self.dcmanager_rpc_client.subcloud_deploy_enroll(
context, subcloud.id, payload)
self.dcmanager_rpc_client.subcloud_deploy_enroll(context, subcloud.id, payload)
pecan.abort(400, "subcloud deploy enrollment is not "
"available yet")
pecan.abort(400, "subcloud deploy enrollment is not available yet")
return ""
# TODO(glyraper): The return is necessary to avoid
# the E1111 while the implementation is not complete
# TODO(glyraper): Enroll function in development
@pecan.expose(generic=True, template='json')
@pecan.expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@utils.synchronized(LOCK_NAME)
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self):
context = restcomm.extract_context_from_environ()
return self._deploy_create(context, pecan.request)
@utils.synchronized(LOCK_NAME)
@index.when(method='PATCH', template='json')
@index.when(method="PATCH", template="json")
def patch(self, subcloud_ref=None, verb=None):
"""Modify the subcloud deployment.
@ -604,12 +642,15 @@ class PhasedSubcloudDeployController(object):
or subcloud operation
"""
policy.authorize(phased_subcloud_deploy_policy.POLICY_ROOT % "modify", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
phased_subcloud_deploy_policy.POLICY_ROOT % "modify",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if not subcloud_ref:
pecan.abort(400, _('Subcloud ID required'))
pecan.abort(400, _("Subcloud ID required"))
try:
if subcloud_ref.isdigit():
@ -617,7 +658,7 @@ class PhasedSubcloudDeployController(object):
else:
subcloud = db_api.subcloud_get_by_name(context, subcloud_ref)
except (exceptions.SubcloudNotFound, exceptions.SubcloudNameNotFound):
pecan.abort(404, _('Subcloud not found'))
pecan.abort(404, _("Subcloud not found"))
if verb == ABORT:
subcloud = self._deploy_abort(context, subcloud)
@ -634,6 +675,6 @@ class PhasedSubcloudDeployController(object):
elif verb == ENROLL:
subcloud = self._deploy_enroll(context, pecan.request, subcloud)
else:
pecan.abort(400, _('Invalid request'))
pecan.abort(400, _("Invalid request"))
return subcloud

View File

@ -1,5 +1,5 @@
# Copyright (c) 2017 Ericsson AB.
# Copyright (c) 2017-2023 Wind River Systems, Inc.
# Copyright (c) 2017-2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -40,29 +40,33 @@ class Controller(object):
minor_version = remainder[-1]
remainder = remainder[:-1]
sub_controllers = dict()
if minor_version == '0':
if minor_version == "0":
sub_controllers["subclouds"] = subclouds.SubcloudsController
sub_controllers["subcloud-deploy"] = subcloud_deploy.\
SubcloudDeployController
sub_controllers["subcloud-deploy"] = (
subcloud_deploy.SubcloudDeployController
)
sub_controllers["alarms"] = alarm_manager.SubcloudAlarmController
sub_controllers["sw-update-strategy"] = \
sub_controllers["sw-update-strategy"] = (
sw_update_strategy.SwUpdateStrategyController
sub_controllers["sw-update-options"] = \
)
sub_controllers["sw-update-options"] = (
sw_update_options.SwUpdateOptionsController
sub_controllers["subcloud-groups"] = \
subcloud_group.SubcloudGroupsController
sub_controllers["notifications"] = \
notifications.NotificationsController
sub_controllers["subcloud-backup"] = subcloud_backup.\
SubcloudBackupController
sub_controllers["phased-subcloud-deploy"] = phased_subcloud_deploy.\
PhasedSubcloudDeployController
sub_controllers["subcloud-peer-groups"] = \
)
sub_controllers["subcloud-groups"] = subcloud_group.SubcloudGroupsController
sub_controllers["notifications"] = notifications.NotificationsController
sub_controllers["subcloud-backup"] = (
subcloud_backup.SubcloudBackupController
)
sub_controllers["phased-subcloud-deploy"] = (
phased_subcloud_deploy.PhasedSubcloudDeployController
)
sub_controllers["subcloud-peer-groups"] = (
subcloud_peer_group.SubcloudPeerGroupsController
sub_controllers["peer-group-associations"] = \
)
sub_controllers["peer-group-associations"] = (
peer_group_association.PeerGroupAssociationsController
sub_controllers["system-peers"] = system_peers.\
SystemPeersController
)
sub_controllers["system-peers"] = system_peers.SystemPeersController
for name, ctrl in sub_controllers.items():
setattr(self, name, ctrl)

View File

@ -30,19 +30,20 @@ from dcmanager.rpc import client as rpc_client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
LOCK_NAME = 'SubcloudBackupController'
LOCK_NAME = "SubcloudBackupController"
# Subcloud/group information to be retrieved from request params
RequestEntity = namedtuple('RequestEntity', ['type', 'id', 'name', 'subclouds'])
RequestEntity = namedtuple("RequestEntity", ["type", "id", "name", "subclouds"])
class SubcloudBackupController(object):
def __init__(self):
super(SubcloudBackupController, self).__init__()
self.dcmanager_rpc_client = rpc_client.ManagerClient(
timeout=consts.RPC_SUBCLOUD_BACKUP_TIMEOUT)
timeout=consts.RPC_SUBCLOUD_BACKUP_TIMEOUT
)
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@ -50,24 +51,24 @@ class SubcloudBackupController(object):
@staticmethod
def _get_payload(request, verb):
expected_params = dict()
if verb == 'create':
if verb == "create":
expected_params = {
"subcloud": "text",
"group": "text",
"local_only": "text",
"registry_images": "text",
"backup_values": "yaml",
"sysadmin_password": "text"
"sysadmin_password": "text",
}
elif verb == 'delete':
elif verb == "delete":
expected_params = {
"release": "text",
"subcloud": "text",
"group": "text",
"local_only": "text",
"sysadmin_password": "text"
"sysadmin_password": "text",
}
elif verb == 'restore':
elif verb == "restore":
expected_params = {
"with_install": "text",
"release": "text",
@ -76,30 +77,29 @@ class SubcloudBackupController(object):
"sysadmin_password": "text",
"restore_values": "text",
"subcloud": "text",
"group": "text"
"group": "text",
}
else:
pecan.abort(400, _("Unexpected verb received"))
content_type = request.headers.get('content-type')
LOG.info('Request content-type: %s' % content_type)
if 'multipart/form-data' in content_type.lower():
return SubcloudBackupController._get_multipart_payload(request,
expected_params)
content_type = request.headers.get("content-type")
LOG.info("Request content-type: %s" % content_type)
if "multipart/form-data" in content_type.lower():
return SubcloudBackupController._get_multipart_payload(
request, expected_params
)
else:
return SubcloudBackupController._get_json_payload(request,
expected_params)
return SubcloudBackupController._get_json_payload(request, expected_params)
@staticmethod
def _get_multipart_payload(request, expected_params):
payload = dict()
file_params = ['backup_values', 'restore_values']
file_params = ["backup_values", "restore_values"]
for param in file_params:
if param in request.POST:
file_item = request.POST[param]
file_item.file.seek(0, os.SEEK_SET)
data = \
utils.yaml_safe_load(file_item.file.read().decode('utf8'), param)
data = utils.yaml_safe_load(file_item.file.read().decode("utf8"), param)
payload.update({param: data})
del request.POST[param]
@ -116,12 +116,12 @@ class SubcloudBackupController(object):
try:
payload = json.loads(request.body)
except Exception:
error_msg = 'Request body is malformed.'
error_msg = "Request body is malformed."
LOG.exception(error_msg)
pecan.abort(400, _(error_msg))
return
if not isinstance(payload, dict):
pecan.abort(400, _('Invalid request body format'))
pecan.abort(400, _("Invalid request body format"))
if not set(payload.keys()).issubset(expected_params.keys()):
LOG.info("Got an unexpected parameter in: %s" % payload)
pecan.abort(400, _("Unexpected parameter received"))
@ -133,13 +133,16 @@ class SubcloudBackupController(object):
sysadmin_password = payload.get(param_name)
if not sysadmin_password:
pecan.abort(400, _('subcloud sysadmin_password required'))
pecan.abort(400, _("subcloud sysadmin_password required"))
try:
payload['sysadmin_password'] = \
utils.decode_and_normalize_passwd(sysadmin_password)
payload["sysadmin_password"] = utils.decode_and_normalize_passwd(
sysadmin_password
)
except Exception:
msg = _('Failed to decode subcloud sysadmin_password, '
'verify the password is base64 encoded')
msg = _(
"Failed to decode subcloud sysadmin_password, "
"verify the password is base64 encoded"
)
LOG.exception(msg)
pecan.abort(400, msg)
@ -148,13 +151,14 @@ class SubcloudBackupController(object):
for param_name in param_names:
param = payload.get(param_name)
if param:
if param.lower() == 'true':
if param.lower() == "true":
payload[param_name] = True
elif param.lower() == 'false':
elif param.lower() == "false":
payload[param_name] = False
else:
pecan.abort(400, _('Invalid %s value, should be boolean'
% param_name))
pecan.abort(
400, _("Invalid %s value, should be boolean" % param_name)
)
else:
payload[param_name] = default
@ -177,23 +181,25 @@ class SubcloudBackupController(object):
operation (string): Subcloud backup operation
"""
subclouds = request_entity.subclouds
error_msg = _(
'Subcloud(s) must be in a valid state for backup %s.' % operation)
error_msg = _("Subcloud(s) must be in a valid state for backup %s." % operation)
has_valid_subclouds = False
valid_subclouds = list()
for subcloud in subclouds:
try:
is_valid = utils.is_valid_for_backup_operation(
operation, subcloud, bootstrap_address_dict)
operation, subcloud, bootstrap_address_dict
)
if operation == 'create':
backup_in_progress = subcloud.backup_status in \
consts.STATES_FOR_ONGOING_BACKUP
if operation == "create":
backup_in_progress = (
subcloud.backup_status in consts.STATES_FOR_ONGOING_BACKUP
)
if is_valid and not backup_in_progress:
has_valid_subclouds = True
else:
error_msg = _('Subcloud(s) already have a backup '
'operation in progress.')
error_msg = _(
"Subcloud(s) already have a backup operation in progress."
)
else:
if is_valid:
valid_subclouds.append(subcloud)
@ -202,21 +208,27 @@ class SubcloudBackupController(object):
except exceptions.ValidateFail as e:
error_msg = e.message
if (operation == 'create' and has_valid_subclouds
and request_entity.type == 'subcloud'):
if (
operation == "create"
and has_valid_subclouds
and request_entity.type == "subcloud"
):
# Check the system health only if the command was issued
# to a single subcloud to avoid huge delays.
if not utils.is_subcloud_healthy(subcloud.region_name):
msg = _('Subcloud %s must be in good health for '
'subcloud-backup create.' % subcloud.name)
msg = _(
"Subcloud %s must be in good health for subcloud-backup create."
% subcloud.name
)
pecan.abort(400, msg)
if not has_valid_subclouds:
if request_entity.type == 'group':
msg = _('None of the subclouds in group %s are in a valid '
'state for subcloud-backup %s') % (request_entity.name,
operation)
elif request_entity.type == 'subcloud':
if request_entity.type == "group":
msg = _(
"None of the subclouds in group %s are in a valid "
"state for subcloud-backup %s"
) % (request_entity.name, operation)
elif request_entity.type == "subcloud":
msg = error_msg
pecan.abort(400, msg)
@ -225,52 +237,62 @@ class SubcloudBackupController(object):
@staticmethod
def _get_subclouds_from_group(group, context):
if not group:
pecan.abort(404, _('Group not found'))
pecan.abort(404, _("Group not found"))
return db_api.subcloud_get_for_group(context, group.id)
def _read_entity_from_request_params(self, context, payload):
subcloud_ref = payload.get('subcloud')
group_ref = payload.get('group')
subcloud_ref = payload.get("subcloud")
group_ref = payload.get("group")
if subcloud_ref:
if group_ref:
pecan.abort(400, _("'subcloud' and 'group' parameters "
"should not be given at the same time"))
pecan.abort(
400,
_(
"'subcloud' and 'group' parameters should not be given at "
"the same time"
),
)
subcloud = utils.subcloud_get_by_ref(context, subcloud_ref)
if not subcloud:
pecan.abort(400, _('Subcloud not found'))
return RequestEntity('subcloud', subcloud.id, subcloud_ref, [subcloud])
pecan.abort(400, _("Subcloud not found"))
return RequestEntity("subcloud", subcloud.id, subcloud_ref, [subcloud])
elif group_ref:
group = utils.subcloud_group_get_by_ref(context, group_ref)
group_subclouds = self._get_subclouds_from_group(group, context)
if not group_subclouds:
pecan.abort(400, _('No subclouds present in group'))
return RequestEntity('group', group.id, group_ref, group_subclouds)
pecan.abort(400, _("No subclouds present in group"))
return RequestEntity("group", group.id, group_ref, group_subclouds)
else:
pecan.abort(400, _("'subcloud' or 'group' parameter is required"))
@utils.synchronized(LOCK_NAME)
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self):
"""Create a new subcloud backup."""
context = restcomm.extract_context_from_environ()
payload = self._get_payload(pecan_request, 'create')
payload = self._get_payload(pecan_request, "create")
policy.authorize(subcloud_backup_policy.POLICY_ROOT % "create", {},
restcomm.extract_credentials_for_policy())
self._validate_and_decode_sysadmin_password(payload, 'sysadmin_password')
policy.authorize(
subcloud_backup_policy.POLICY_ROOT % "create",
{},
restcomm.extract_credentials_for_policy(),
)
self._validate_and_decode_sysadmin_password(payload, "sysadmin_password")
if not payload.get('local_only') and payload.get('registry_images'):
pecan.abort(400, _('Option registry_images can not be used without '
'local_only option.'))
if not payload.get("local_only") and payload.get("registry_images"):
pecan.abort(
400,
_("Option registry_images can not be used without local_only option."),
)
request_entity = self._read_entity_from_request_params(context, payload)
self._validate_subclouds(request_entity, 'create')
self._validate_subclouds(request_entity, "create")
# Set subcloud/group ID as reference instead of name to ease processing
payload[request_entity.type] = request_entity.id
self._convert_param_to_bool(payload, ['local_only', 'registry_images'])
self._convert_param_to_bool(payload, ["local_only", "registry_images"])
try:
self.dcmanager_rpc_client.backup_subclouds(context, payload)
@ -279,10 +301,10 @@ class SubcloudBackupController(object):
pecan.abort(422, e.value)
except Exception:
LOG.exception("Unable to backup subclouds")
pecan.abort(500, _('Unable to backup subcloud'))
pecan.abort(500, _("Unable to backup subcloud"))
@utils.synchronized(LOCK_NAME)
@index.when(method='PATCH', template='json')
@index.when(method="PATCH", template="json")
def patch(self, verb, release_version=None):
"""Delete or restore a subcloud backup.
@ -294,25 +316,29 @@ class SubcloudBackupController(object):
context = restcomm.extract_context_from_environ()
payload = self._get_payload(pecan_request, verb)
if verb == 'delete':
policy.authorize(subcloud_backup_policy.POLICY_ROOT % "delete", {},
restcomm.extract_credentials_for_policy())
if verb == "delete":
policy.authorize(
subcloud_backup_policy.POLICY_ROOT % "delete",
{},
restcomm.extract_credentials_for_policy(),
)
if not release_version:
pecan.abort(400, _('Release version required'))
pecan.abort(400, _("Release version required"))
self._convert_param_to_bool(payload, ['local_only'])
self._convert_param_to_bool(payload, ["local_only"])
# Backup delete in systemcontroller doesn't need sysadmin_password
if payload.get('local_only'):
if payload.get("local_only"):
self._validate_and_decode_sysadmin_password(
payload, 'sysadmin_password')
payload, "sysadmin_password"
)
request_entity = self._read_entity_from_request_params(context, payload)
# Validate subcloud state when deleting locally
# Not needed for centralized storage, since connection is not required
local_only = payload.get('local_only')
local_only = payload.get("local_only")
if local_only:
self._validate_subclouds(request_entity, verb)
@ -321,7 +347,8 @@ class SubcloudBackupController(object):
try:
message = self.dcmanager_rpc_client.delete_subcloud_backups(
context, release_version, payload)
context, release_version, payload
)
if message:
response.status_int = 207
@ -332,83 +359,109 @@ class SubcloudBackupController(object):
pecan.abort(422, e.value)
except Exception:
LOG.exception("Unable to delete subcloud backups")
pecan.abort(500, _('Unable to delete subcloud backups'))
elif verb == 'restore':
policy.authorize(subcloud_backup_policy.POLICY_ROOT % "restore", {},
restcomm.extract_credentials_for_policy())
pecan.abort(500, _("Unable to delete subcloud backups"))
elif verb == "restore":
policy.authorize(
subcloud_backup_policy.POLICY_ROOT % "restore",
{},
restcomm.extract_credentials_for_policy(),
)
if not payload:
pecan.abort(400, _('Body required'))
pecan.abort(400, _("Body required"))
self._validate_and_decode_sysadmin_password(payload, 'sysadmin_password')
self._validate_and_decode_sysadmin_password(payload, "sysadmin_password")
self._convert_param_to_bool(payload, ['local_only', 'with_install',
'registry_images'])
self._convert_param_to_bool(
payload, ["local_only", "with_install", "registry_images"]
)
if not payload['local_only'] and payload['registry_images']:
pecan.abort(400, _('Option registry_images cannot be used '
'without local_only option.'))
if not payload["local_only"] and payload["registry_images"]:
pecan.abort(
400,
_(
"Option registry_images cannot be used "
"without local_only option."
),
)
if not payload['with_install'] and payload.get('release'):
pecan.abort(400, _('Option release cannot be used '
'without with_install option.'))
if not payload["with_install"] and payload.get("release"):
pecan.abort(
400,
_("Option release cannot be used without with_install option."),
)
request_entity = self._read_entity_from_request_params(context, payload)
if len(request_entity.subclouds) == 0:
msg = "No subclouds exist under %s %s" % (request_entity.type,
request_entity.id)
msg = "No subclouds exist under %s %s" % (
request_entity.type,
request_entity.id,
)
pecan.abort(400, _(msg))
bootstrap_address_dict = \
payload.get('restore_values', {}).get('bootstrap_address', {})
bootstrap_address_dict = payload.get("restore_values", {}).get(
"bootstrap_address", {}
)
if not isinstance(bootstrap_address_dict, dict):
pecan.abort(
400, _('The bootstrap_address provided in restore_values '
'is in invalid format.')
400,
_(
"The bootstrap_address provided in restore_values "
"is in invalid format."
),
)
restore_subclouds = self._validate_subclouds(
request_entity, verb, bootstrap_address_dict)
request_entity, verb, bootstrap_address_dict
)
payload[request_entity.type] = request_entity.id
if payload.get('with_install'):
if payload.get("with_install"):
subclouds_without_install_values = [
subcloud.name for subcloud in request_entity.subclouds if
not subcloud.data_install
subcloud.name
for subcloud in request_entity.subclouds
if not subcloud.data_install
]
if subclouds_without_install_values:
subclouds_str = ', '.join(subclouds_without_install_values)
subclouds_str = ", ".join(subclouds_without_install_values)
pecan.abort(
400, _('The restore operation was requested with_install, '
'but the following subcloud(s) does not contain '
'install values: %s' % subclouds_str)
400,
_(
"The restore operation was requested with_install, "
"but the following subcloud(s) does not contain "
"install values: %s" % subclouds_str
),
)
# Confirm the requested or active load is still in dc-vault
payload['software_version'] = utils.get_sw_version(
payload.get('release'))
matching_iso, err_msg = \
utils.get_matching_iso(payload['software_version'])
payload["software_version"] = utils.get_sw_version(
payload.get("release")
)
matching_iso, err_msg = utils.get_matching_iso(
payload["software_version"]
)
if err_msg:
LOG.exception(err_msg)
pecan.abort(400, _(err_msg))
LOG.info("Restore operation will use image %s in subcloud "
"installation" % matching_iso)
LOG.info(
"Restore operation will use image %s in subcloud installation"
% matching_iso
)
try:
# local update to deploy_status - this is just for CLI response
# pylint: disable-next=consider-using-enumerate
for i in range(len(restore_subclouds)):
restore_subclouds[i].deploy_status = (
consts.DEPLOY_STATE_PRE_RESTORE)
restore_subclouds[i].deploy_status = consts.DEPLOY_STATE_PRE_RESTORE
message = self.dcmanager_rpc_client.restore_subcloud_backups(
context, payload)
context, payload
)
return utils.subcloud_db_list_to_dict(restore_subclouds)
except RemoteError as e:
pecan.abort(422, e.value)
except Exception:
LOG.exception("Unable to restore subcloud")
pecan.abort(500, _('Unable to restore subcloud'))
pecan.abort(500, _("Unable to restore subcloud"))
else:
pecan.abort(400, _('Invalid request'))
pecan.abort(400, _("Invalid request"))

View File

@ -37,7 +37,7 @@ from dcmanager.common import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
LOCK_NAME = 'SubcloudDeployController'
LOCK_NAME = "SubcloudDeployController"
class SubcloudDeployController(object):
@ -48,7 +48,7 @@ class SubcloudDeployController(object):
@staticmethod
def _upload_files(dir_path, file_option, file_item, binary):
prefix = file_option + '_'
prefix = file_option + "_"
# create the version directory if it does not exist
if not os.path.isdir(dir_path):
os.mkdir(dir_path, 0o755)
@ -56,30 +56,32 @@ class SubcloudDeployController(object):
# check if the file exists, if so remove it
filename = utils.get_filename_by_prefix(dir_path, prefix)
if filename is not None:
os.remove(dir_path + '/' + filename)
os.remove(dir_path + "/" + filename)
# upload the new file
file_item.file.seek(0, os.SEEK_SET)
contents = file_item.file.read()
fn = os.path.join(dir_path, prefix + os.path.basename(
file_item.filename))
fn = os.path.join(dir_path, prefix + os.path.basename(file_item.filename))
if binary:
dst = open(fn, 'wb')
dst = open(fn, "wb")
dst.write(contents)
else:
dst = os.open(fn, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.write(dst, contents)
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@utils.synchronized(LOCK_NAME)
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self):
policy.authorize(subcloud_deploy_policy.POLICY_ROOT % "upload", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_deploy_policy.POLICY_ROOT % "upload",
{},
restcomm.extract_credentials_for_policy(),
)
deploy_dicts = dict()
missing_options = set()
for f in consts.DEPLOY_COMMON_FILE_OPTIONS:
@ -92,30 +94,31 @@ class SubcloudDeployController(object):
# 3. DEPLOY_PRESTAGE
size = len(missing_options)
if len(missing_options) > 0:
if ((consts.DEPLOY_PRESTAGE in missing_options and size != 1) or
(consts.DEPLOY_PRESTAGE not in missing_options and size != 3)):
if (consts.DEPLOY_PRESTAGE in missing_options and size != 1) or (
consts.DEPLOY_PRESTAGE not in missing_options and size != 3
):
missing_str = str()
for missing in missing_options:
if missing is not consts.DEPLOY_PRESTAGE:
missing_str += '--%s ' % missing
missing_str += "--%s " % missing
error_msg = "error: argument %s is required" % missing_str.rstrip()
pecan.abort(httpclient.BAD_REQUEST, error_msg)
deploy_dicts['software_version'] = \
utils.get_sw_version(request.POST.get('release'))
deploy_dicts["software_version"] = utils.get_sw_version(
request.POST.get("release")
)
dir_path = os.path.join(
dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version']
dccommon_consts.DEPLOY_DIR, deploy_dicts["software_version"]
)
for f in consts.DEPLOY_COMMON_FILE_OPTIONS:
if f not in request.POST:
continue
file_item = request.POST[f]
filename = getattr(file_item, 'filename', '')
filename = getattr(file_item, "filename", "")
if not filename:
pecan.abort(httpclient.BAD_REQUEST,
_("No %s file uploaded" % f))
pecan.abort(httpclient.BAD_REQUEST, _("No %s file uploaded" % f))
binary = False
if f == consts.DEPLOY_CHART:
@ -123,76 +126,90 @@ class SubcloudDeployController(object):
try:
self._upload_files(dir_path, f, file_item, binary)
except Exception as e:
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_("Failed to upload %s file: %s" % (f, e)))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Failed to upload %s file: %s" % (f, e)),
)
deploy_dicts.update({f: filename})
return deploy_dicts
@index.when(method='GET', template='json')
@index.when(method="GET", template="json")
def get(self, release=None):
"""Get the subcloud deploy files that has been uploaded and stored.
:param release: release version
"""
policy.authorize(subcloud_deploy_policy.POLICY_ROOT % "get", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_deploy_policy.POLICY_ROOT % "get",
{},
restcomm.extract_credentials_for_policy(),
)
deploy_dicts = dict()
deploy_dicts['software_version'] = utils.get_sw_version(release)
deploy_dicts["software_version"] = utils.get_sw_version(release)
dir_path = os.path.join(
dccommon_consts.DEPLOY_DIR, deploy_dicts['software_version']
dccommon_consts.DEPLOY_DIR, deploy_dicts["software_version"]
)
for f in consts.DEPLOY_COMMON_FILE_OPTIONS:
filename = None
if os.path.isdir(dir_path):
prefix = f + '_'
prefix = f + "_"
filename = utils.get_filename_by_prefix(dir_path, prefix)
if filename is not None:
filename = filename.replace(prefix, '', 1)
filename = filename.replace(prefix, "", 1)
deploy_dicts.update({f: filename})
return dict(subcloud_deploy=deploy_dicts)
@index.when(method='DELETE', template='json')
@index.when(method="DELETE", template="json")
def delete(self, release=None):
"""Delete the subcloud deploy files.
:param release: release version
"""
policy.authorize(subcloud_deploy_policy.POLICY_ROOT % "delete", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_deploy_policy.POLICY_ROOT % "delete",
{},
restcomm.extract_credentials_for_policy(),
)
is_prestage_images = \
request.params.get('prestage_images', '').lower() == 'true'
is_deployment_files = \
request.params.get('deployment_files', '').lower() == 'true'
is_prestage_images = request.params.get("prestage_images", "").lower() == "true"
is_deployment_files = (
request.params.get("deployment_files", "").lower() == "true"
)
dir_path = \
os.path.join(dccommon_consts.DEPLOY_DIR, utils.get_sw_version(release))
dir_path = os.path.join(
dccommon_consts.DEPLOY_DIR, utils.get_sw_version(release)
)
if not os.path.isdir(dir_path):
pecan.abort(httpclient.NOT_FOUND,
_("Directory not found: %s" % dir_path))
pecan.abort(httpclient.NOT_FOUND, _("Directory not found: %s" % dir_path))
try:
file_options = []
if is_prestage_images:
file_options.append(consts.DEPLOY_PRESTAGE)
if is_deployment_files:
file_options.extend([consts.DEPLOY_OVERRIDES, consts.DEPLOY_CHART,
consts.DEPLOY_PLAYBOOK])
file_options.extend(
[
consts.DEPLOY_OVERRIDES,
consts.DEPLOY_CHART,
consts.DEPLOY_PLAYBOOK,
]
)
if not (is_deployment_files or is_prestage_images):
file_options.extend(consts.DEPLOY_COMMON_FILE_OPTIONS)
for file_option in file_options:
prefix = file_option + '_'
prefix = file_option + "_"
file_name = utils.get_filename_by_prefix(dir_path, prefix)
if file_name:
os.remove(os.path.join(dir_path, file_name))
else:
LOG.warning('%s file not present' % file_option)
LOG.warning("%s file not present" % file_option)
except Exception as e:
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_("Failed to delete file: %s" % e))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Failed to delete file: %s" % e)
)
return None

View File

@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__)
SUPPORTED_GROUP_APPLY_TYPES = [
consts.SUBCLOUD_APPLY_TYPE_PARALLEL,
consts.SUBCLOUD_APPLY_TYPE_SERIAL
consts.SUBCLOUD_APPLY_TYPE_SERIAL,
]
# validation constants for Subcloud Group
@ -54,7 +54,7 @@ class SubcloudGroupsController(restcomm.GenericPathController):
super(SubcloudGroupsController, self).__init__()
self.rpc_client = rpc_client.ManagerClient()
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@ -72,17 +72,20 @@ class SubcloudGroupsController(restcomm.GenericPathController):
subcloud_group_list.append(group_dict)
result = dict()
result['subcloud_groups'] = subcloud_group_list
result["subcloud_groups"] = subcloud_group_list
return result
@index.when(method='GET', template='json')
@index.when(method="GET", template="json")
def get(self, group_ref=None, subclouds=False):
"""Get details about subcloud group.
:param group_ref: ID or name of subcloud group
"""
policy.authorize(subcloud_group_policy.POLICY_ROOT % "get", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_group_policy.POLICY_ROOT % "get",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if group_ref is None:
@ -91,7 +94,7 @@ class SubcloudGroupsController(restcomm.GenericPathController):
group = utils.subcloud_group_get_by_ref(context, group_ref)
if group is None:
pecan.abort(httpclient.NOT_FOUND, _('Subcloud Group not found'))
pecan.abort(httpclient.NOT_FOUND, _("Subcloud Group not found"))
if subclouds:
# Return only the subclouds for this subcloud group
return self._get_subcloud_list_for_group(context, group.id)
@ -128,107 +131,114 @@ class SubcloudGroupsController(restcomm.GenericPathController):
return False
return True
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self):
"""Create a new subcloud group."""
policy.authorize(subcloud_group_policy.POLICY_ROOT % "create", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_group_policy.POLICY_ROOT % "create",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
payload = eval(request.body)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
name = payload.get('name')
description = payload.get('description')
update_apply_type = payload.get('update_apply_type')
max_parallel_subclouds = payload.get('max_parallel_subclouds')
name = payload.get("name")
description = payload.get("description")
update_apply_type = payload.get("update_apply_type")
max_parallel_subclouds = payload.get("max_parallel_subclouds")
# Validate payload
if not utils.validate_name(name,
prohibited_name_list=[
consts.DEFAULT_SUBCLOUD_GROUP_NAME]):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid group name'))
if not utils.validate_name(
name, prohibited_name_list=[consts.DEFAULT_SUBCLOUD_GROUP_NAME]
):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group name"))
if not self._validate_description(description):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid group description'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group description"))
if not self._validate_update_apply_type(update_apply_type):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group update_apply_type'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group update_apply_type"))
if not self._validate_max_parallel_subclouds(max_parallel_subclouds):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group max_parallel_subclouds'))
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid group max_parallel_subclouds")
)
try:
group_ref = db_api.subcloud_group_create(context,
name,
description,
update_apply_type,
max_parallel_subclouds)
group_ref = db_api.subcloud_group_create(
context, name, description, update_apply_type, max_parallel_subclouds
)
return db_api.subcloud_group_db_model_to_dict(group_ref)
except db_exc.DBDuplicateEntry:
LOG.info("Group create failed. Group %s already exists" % name)
pecan.abort(httpclient.BAD_REQUEST,
_('A subcloud group with this name already exists'))
pecan.abort(
httpclient.BAD_REQUEST,
_("A subcloud group with this name already exists"),
)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to create subcloud group'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Unable to create subcloud group")
)
@index.when(method='PATCH', template='json')
@index.when(method="PATCH", template="json")
def patch(self, group_ref):
"""Update a subcloud group.
:param group_ref: ID or name of subcloud group to update
"""
policy.authorize(subcloud_group_policy.POLICY_ROOT % "modify", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_group_policy.POLICY_ROOT % "modify",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if group_ref is None:
pecan.abort(httpclient.BAD_REQUEST,
_('Subcloud Group Name or ID required'))
pecan.abort(httpclient.BAD_REQUEST, _("Subcloud Group Name or ID required"))
payload = eval(request.body)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
group = utils.subcloud_group_get_by_ref(context, group_ref)
if group is None:
pecan.abort(httpclient.NOT_FOUND, _('Subcloud Group not found'))
pecan.abort(httpclient.NOT_FOUND, _("Subcloud Group not found"))
name = payload.get('name')
description = payload.get('description')
update_apply_type = payload.get('update_apply_type')
max_parallel_str = payload.get('max_parallel_subclouds')
name = payload.get("name")
description = payload.get("description")
update_apply_type = payload.get("update_apply_type")
max_parallel_str = payload.get("max_parallel_subclouds")
if not (name or description or update_apply_type or max_parallel_str):
pecan.abort(httpclient.BAD_REQUEST, _('nothing to update'))
pecan.abort(httpclient.BAD_REQUEST, _("nothing to update"))
# Check value is not None or empty before calling validate
if name:
if not utils.validate_name(name,
prohibited_name_list=[
consts.DEFAULT_SUBCLOUD_GROUP_NAME]):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group name'))
if not utils.validate_name(
name, prohibited_name_list=[consts.DEFAULT_SUBCLOUD_GROUP_NAME]
):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group name"))
# Special case. Default group name cannot be changed
if group.id == consts.DEFAULT_SUBCLOUD_GROUP_ID:
pecan.abort(httpclient.BAD_REQUEST,
_('Default group name cannot be changed'))
pecan.abort(
httpclient.BAD_REQUEST, _("Default group name cannot be changed")
)
if description:
if not self._validate_description(description):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group description'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group description"))
if update_apply_type:
if not self._validate_update_apply_type(update_apply_type):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group update_apply_type'))
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid group update_apply_type")
)
if max_parallel_str:
if not self._validate_max_parallel_subclouds(max_parallel_str):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group max_parallel_subclouds'))
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid group max_parallel_subclouds")
)
try:
updated_group = db_api.subcloud_group_update(
@ -237,44 +247,49 @@ class SubcloudGroupsController(restcomm.GenericPathController):
name=name,
description=description,
update_apply_type=update_apply_type,
max_parallel_subclouds=max_parallel_str)
max_parallel_subclouds=max_parallel_str,
)
return db_api.subcloud_group_db_model_to_dict(updated_group)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
# additional exceptions.
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to update subcloud group'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Unable to update subcloud group")
)
@index.when(method='delete', template='json')
@index.when(method="delete", template="json")
def delete(self, group_ref):
"""Delete the subcloud group."""
policy.authorize(subcloud_group_policy.POLICY_ROOT % "delete", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_group_policy.POLICY_ROOT % "delete",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if group_ref is None:
pecan.abort(httpclient.BAD_REQUEST,
_('Subcloud Group Name or ID required'))
pecan.abort(httpclient.BAD_REQUEST, _("Subcloud Group Name or ID required"))
group = utils.subcloud_group_get_by_ref(context, group_ref)
if group is None:
pecan.abort(httpclient.NOT_FOUND, _('Subcloud Group not found'))
pecan.abort(httpclient.NOT_FOUND, _("Subcloud Group not found"))
if group.name == consts.DEFAULT_SUBCLOUD_GROUP_NAME:
pecan.abort(httpclient.BAD_REQUEST,
_('Default Subcloud Group may not be deleted'))
pecan.abort(
httpclient.BAD_REQUEST, _("Default Subcloud Group may not be deleted")
)
try:
# a subcloud group may not be deleted if it is use by any subclouds
subclouds = db_api.subcloud_get_for_group(context, group.id)
if len(subclouds) > 0:
pecan.abort(httpclient.BAD_REQUEST,
_('Subcloud Group not empty'))
pecan.abort(httpclient.BAD_REQUEST, _("Subcloud Group not empty"))
db_api.subcloud_group_destroy(context, group.id)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to delete subcloud group'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Unable to delete subcloud group")
)
# This should return nothing
return None

View File

@ -42,10 +42,7 @@ MAX_SUBCLOUD_PEER_GROUP_PRIORITY = 65536
MIN_SUBCLOUD_PEER_GROUP_PRIORITY = 0
DEFAULT_SUBCLOUD_PEER_GROUP_PRIORITY = 0
DEFAULT_SUBCLOUD_PEER_GROUP_MAX_REHOMING = 10
SUPPORTED_GROUP_STATES = [
consts.OPERATIONAL_ENABLED,
consts.OPERATIONAL_DISABLED
]
SUPPORTED_GROUP_STATES = [consts.OPERATIONAL_ENABLED, consts.OPERATIONAL_DISABLED]
class SubcloudPeerGroupsController(restcomm.GenericPathController):
@ -54,7 +51,7 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
super(SubcloudPeerGroupsController, self).__init__()
self.rpc_client = rpc_client.ManagerClient()
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@ -71,7 +68,7 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
group_dict = db_api.subcloud_peer_group_db_model_to_dict(group)
subcloud_peer_group_list.append(group_dict)
result = {'subcloud_peer_groups': subcloud_peer_group_list}
result = {"subcloud_peer_groups": subcloud_peer_group_list}
return result
def _get_local_system(self):
@ -84,46 +81,46 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
sysinv_client = SysinvClient(
dccommon_consts.DEFAULT_REGION_NAME,
ks_client.keystone_client.session,
endpoint=ks_client.keystone_client.endpoint_cache.get_endpoint
("sysinv"),
endpoint=ks_client.keystone_client.endpoint_cache.get_endpoint(
"sysinv"
),
)
system = sysinv_client.get_system()
return system
except Exception:
pecan.abort(httpclient.BAD_REQUEST,
_("Failed to get local system info"))
pecan.abort(httpclient.BAD_REQUEST, _("Failed to get local system info"))
def _get_subcloud_status_for_peer_group(self, context, group):
subclouds = db_api.subcloud_get_for_peer_group(context, group.id)
pg_status = dict()
pg_status['peer_group_id'] = group.id
pg_status['peer_group_name'] = group.peer_group_name
pg_status['total_subclouds'] = len(subclouds)
pg_status['complete'] = 0
pg_status['waiting_for_migrate'] = 0
pg_status['rehoming'] = 0
pg_status['rehome_failed'] = 0
pg_status['managed'] = 0
pg_status['unmanaged'] = 0
pg_status["peer_group_id"] = group.id
pg_status["peer_group_name"] = group.peer_group_name
pg_status["total_subclouds"] = len(subclouds)
pg_status["complete"] = 0
pg_status["waiting_for_migrate"] = 0
pg_status["rehoming"] = 0
pg_status["rehome_failed"] = 0
pg_status["managed"] = 0
pg_status["unmanaged"] = 0
for subcloud in subclouds:
if subcloud.management_state == 'managed':
pg_status['managed'] += 1
if subcloud.management_state == "managed":
pg_status["managed"] += 1
else:
pg_status['unmanaged'] += 1
pg_status["unmanaged"] += 1
if subcloud.deploy_status == 'secondary':
pg_status['waiting_for_migrate'] += 1
elif subcloud.deploy_status == 'rehome-failed':
pg_status['rehome_failed'] += 1
elif subcloud.deploy_status == 'rehome-prep-failed':
pg_status['rehome_failed'] += 1
elif subcloud.deploy_status == 'complete':
pg_status['complete'] += 1
elif subcloud.deploy_status == 'rehoming':
pg_status['rehoming'] += 1
if subcloud.deploy_status == "secondary":
pg_status["waiting_for_migrate"] += 1
elif subcloud.deploy_status == "rehome-failed":
pg_status["rehome_failed"] += 1
elif subcloud.deploy_status == "rehome-prep-failed":
pg_status["rehome_failed"] += 1
elif subcloud.deploy_status == "complete":
pg_status["complete"] += 1
elif subcloud.deploy_status == "rehoming":
pg_status["rehoming"] += 1
return pg_status
@index.when(method='GET', template='json')
@index.when(method="GET", template="json")
def get(self, group_ref=None, verb=None):
"""Get details about subcloud peer group.
@ -131,8 +128,11 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
to the subcloud-peer-group get operation
:param group_ref: ID or name of subcloud peer group
"""
policy.authorize(subcloud_peer_group_policy.POLICY_ROOT % "get", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_peer_group_policy.POLICY_ROOT % "get",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if group_ref is None:
@ -143,42 +143,45 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
if group is None:
pecan.abort(httpclient.NOT_FOUND, _("Subcloud Peer Group not found"))
if verb is None:
subcloud_peer_group_dict = \
db_api.subcloud_peer_group_db_model_to_dict(group)
subcloud_peer_group_dict = db_api.subcloud_peer_group_db_model_to_dict(
group
)
return subcloud_peer_group_dict
elif verb == 'subclouds':
elif verb == "subclouds":
# Return only the subclouds for this subcloud peer group
return self._get_subcloud_list_for_peer_group(context, group.id)
elif verb == 'status':
elif verb == "status":
return self._get_subcloud_status_for_peer_group(context, group)
else:
pecan.abort(400, _('Invalid request'))
pecan.abort(400, _("Invalid request"))
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self):
"""Create a new subcloud peer group."""
policy.authorize(subcloud_peer_group_policy.POLICY_ROOT % "create", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_peer_group_policy.POLICY_ROOT % "create",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
payload = json.loads(request.body)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
LOG.info("Handling create subcloud peer group request for: %s" % payload)
peer_group_name = payload.get('peer-group-name')
group_priority = payload.get('group-priority')
group_state = payload.get('group-state')
system_leader_id = payload.get('system-leader-id')
system_leader_name = payload.get('system-leader-name')
max_subcloud_rehoming = payload.get('max-subcloud-rehoming')
peer_group_name = payload.get("peer-group-name")
group_priority = payload.get("group-priority")
group_state = payload.get("group-state")
system_leader_id = payload.get("system-leader-id")
system_leader_name = payload.get("system-leader-name")
max_subcloud_rehoming = payload.get("max-subcloud-rehoming")
local_system = None
# Validate payload
# peer_group_name is mandatory
if not utils.validate_name(peer_group_name,
prohibited_name_list=['none']):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer-group-name'))
if not utils.validate_name(peer_group_name, prohibited_name_list=["none"]):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer-group-name"))
if not system_leader_id:
# 1.Operator does not need to (and should not) specify
# system_leader_id for a local subcloud peer group which
@ -191,8 +194,10 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
local_system = self._get_local_system()
system_leader_id = local_system.uuid
elif not self._validate_system_leader_id(system_leader_id):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid system-leader-id [%s]' % (system_leader_id)))
pecan.abort(
httpclient.BAD_REQUEST,
_("Invalid system-leader-id [%s]" % (system_leader_id)),
)
if not system_leader_name:
# Get system_leader_name from local DC
# if no system_leader_name provided
@ -200,43 +205,46 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
local_system = self._get_local_system()
system_leader_name = local_system.name
elif not utils.validate_name(system_leader_name):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid system-leader-name'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid system-leader-name"))
if group_priority is None:
group_priority = DEFAULT_SUBCLOUD_PEER_GROUP_PRIORITY
elif not self._validate_group_priority(group_priority):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid group-priority'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group-priority"))
if not group_state:
group_state = consts.OPERATIONAL_ENABLED
elif not self._validate_group_state(group_state):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group-state'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group-state"))
if max_subcloud_rehoming is None:
max_subcloud_rehoming = DEFAULT_SUBCLOUD_PEER_GROUP_MAX_REHOMING
elif not self._validate_max_subcloud_rehoming(max_subcloud_rehoming):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid max-subcloud-rehoming'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid max-subcloud-rehoming"))
try:
group_ref = db_api.subcloud_peer_group_create(context,
peer_group_name,
group_priority,
group_state,
max_subcloud_rehoming,
system_leader_id,
system_leader_name)
group_ref = db_api.subcloud_peer_group_create(
context,
peer_group_name,
group_priority,
group_state,
max_subcloud_rehoming,
system_leader_id,
system_leader_name,
)
return db_api.subcloud_peer_group_db_model_to_dict(group_ref)
except db_exc.DBDuplicateEntry:
pecan.abort(httpclient.CONFLICT,
_('A subcloud peer group with this name already exists'))
pecan.abort(
httpclient.CONFLICT,
_("A subcloud peer group with this name already exists"),
)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to create subcloud peer group'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Unable to create subcloud peer group"),
)
@index.when(method='PATCH', template='json')
@index.when(method="PATCH", template="json")
def patch(self, group_ref, verb=None):
"""Update a subcloud peer group.
@ -245,36 +253,40 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
:param group_ref: ID or name of subcloud group to update
"""
policy.authorize(subcloud_peer_group_policy.POLICY_ROOT % "modify", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_peer_group_policy.POLICY_ROOT % "modify",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if group_ref is None:
pecan.abort(httpclient.BAD_REQUEST,
_('Subcloud Peer Group Name or ID required'))
pecan.abort(
httpclient.BAD_REQUEST, _("Subcloud Peer Group Name or ID required")
)
group = utils.subcloud_peer_group_get_by_ref(context, group_ref)
if group is None:
pecan.abort(httpclient.NOT_FOUND, _('Subcloud Peer Group not found'))
pecan.abort(httpclient.NOT_FOUND, _("Subcloud Peer Group not found"))
if verb is None:
payload = json.loads(request.body)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
if group.group_priority > 0 and \
not utils.is_req_from_another_dc(request):
pecan.abort(httpclient.BAD_REQUEST,
_("Cannot update a peer group from a non-primary "
"site."))
if group.group_priority > 0 and not utils.is_req_from_another_dc(request):
pecan.abort(
httpclient.BAD_REQUEST,
_("Cannot update a peer group from a non-primary site."),
)
LOG.info("Handling update subcloud peer group request for: %s" % payload)
peer_group_name = payload.get('peer-group-name')
group_priority = payload.get('group-priority')
group_state = payload.get('group-state')
system_leader_id = payload.get('system-leader-id')
system_leader_name = payload.get('system-leader-name')
max_subcloud_rehoming = payload.get('max-subcloud-rehoming')
if 'migration_status' in payload:
migration_status = payload.get('migration_status')
peer_group_name = payload.get("peer-group-name")
group_priority = payload.get("group-priority")
group_state = payload.get("group-state")
system_leader_id = payload.get("system-leader-id")
system_leader_name = payload.get("system-leader-name")
max_subcloud_rehoming = payload.get("max-subcloud-rehoming")
if "migration_status" in payload:
migration_status = payload.get("migration_status")
if migration_status is None:
migration_status = consts.PEER_GROUP_MIGRATION_NONE
else:
@ -289,84 +301,91 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
or max_subcloud_rehoming is not None
or migration_status
):
pecan.abort(httpclient.BAD_REQUEST, _('nothing to update'))
pecan.abort(httpclient.BAD_REQUEST, _("nothing to update"))
# The flag to indicate if the update needs to be synced to
# the peer site(s).
sync_needed = ((peer_group_name and
peer_group_name != group.peer_group_name) or
(group_state and
group_state != group.group_state) or
(max_subcloud_rehoming is not None and
max_subcloud_rehoming != group.max_subcloud_rehoming))
sync_needed = (
(peer_group_name and peer_group_name != group.peer_group_name)
or (group_state and group_state != group.group_state)
or (
max_subcloud_rehoming is not None
and max_subcloud_rehoming != group.max_subcloud_rehoming
)
)
any_update = (sync_needed or
((group_priority is not None and
group_priority != group.group_priority) or
(system_leader_id and
system_leader_id != group.system_leader_id) or
(system_leader_name and
system_leader_name != group.system_leader_name) or
(migration_status and
migration_status != group.migration_status)))
any_update = sync_needed or (
(group_priority is not None and group_priority != group.group_priority)
or (system_leader_id and system_leader_id != group.system_leader_id)
or (
system_leader_name
and system_leader_name != group.system_leader_name
)
or (migration_status and migration_status != group.migration_status)
)
if not any_update:
return db_api.subcloud_peer_group_db_model_to_dict(group)
# Check value is not None or empty before calling validation function
if (peer_group_name is not None and
not utils.validate_name(peer_group_name,
prohibited_name_list=['none'])):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer-group-name'))
if (group_priority is not None and
not self._validate_group_priority(group_priority)):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid group-priority'))
if peer_group_name is not None and not utils.validate_name(
peer_group_name, prohibited_name_list=["none"]
):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer-group-name"))
if group_priority is not None and not self._validate_group_priority(
group_priority
):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group-priority"))
if group_state and not self._validate_group_state(group_state):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid group-state'))
if (max_subcloud_rehoming is not None and
not self._validate_max_subcloud_rehoming(max_subcloud_rehoming)):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid max-subcloud-rehoming'))
if (system_leader_id and
not self._validate_system_leader_id(system_leader_id)):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid system-leader-id'))
if (system_leader_name is not None and
not utils.validate_name(system_leader_name)):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid system-leader-name'))
if (migration_status and
migration_status.lower() not in [
consts.PEER_GROUP_MIGRATING,
consts.PEER_GROUP_MIGRATION_COMPLETE,
consts.PEER_GROUP_MIGRATION_NONE
]):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid migration_status'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid group-state"))
if (
max_subcloud_rehoming is not None
and not self._validate_max_subcloud_rehoming(max_subcloud_rehoming)
):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid max-subcloud-rehoming"))
if system_leader_id and not self._validate_system_leader_id(
system_leader_id
):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid system-leader-id"))
if system_leader_name is not None and not utils.validate_name(
system_leader_name
):
pecan.abort(httpclient.BAD_REQUEST, _("Invalid system-leader-name"))
if migration_status and migration_status.lower() not in [
consts.PEER_GROUP_MIGRATING,
consts.PEER_GROUP_MIGRATION_COMPLETE,
consts.PEER_GROUP_MIGRATION_NONE,
]:
pecan.abort(httpclient.BAD_REQUEST, _("Invalid migration_status"))
# Update on peer site(s)
if (not utils.is_req_from_another_dc(request)) and sync_needed:
success_peer_ids, failed_peer_ids = \
success_peer_ids, failed_peer_ids = (
self.rpc_client.update_subcloud_peer_group(
context, group.id, group_state, max_subcloud_rehoming,
group.peer_group_name, peer_group_name)
context,
group.id,
group_state,
max_subcloud_rehoming,
group.peer_group_name,
peer_group_name,
)
)
if failed_peer_ids:
if not success_peer_ids:
# Reject if all failed
pecan.abort(
httpclient.FORBIDDEN,
_('Unable to sync the update to the peer site(s)'))
_("Unable to sync the update to the peer site(s)"),
)
# Local update will continue if it's only partial
# failure.
# TODO(gherzmann): update the association sync status to
# out-of-date on partial failures when support for multiple
# associations is added (geo-redundancy phase 2)
LOG.error(f"Failed to sync the subcloud peer group "
f"{group.id} update on the peer site(s) "
f"{failed_peer_ids} with the values: "
f"{group_state=}, "
f"{max_subcloud_rehoming=}, "
f"{peer_group_name=}")
LOG.error(
f"Failed to sync the subcloud peer group {group.id} update on "
f"the peer site(s) {failed_peer_ids} with the values: "
f"{group_state=}, {max_subcloud_rehoming=}, {peer_group_name=}"
)
try:
updated_peer_group = db_api.subcloud_peer_group_update(
@ -378,29 +397,31 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
max_subcloud_rehoming=max_subcloud_rehoming,
system_leader_id=system_leader_id,
system_leader_name=system_leader_name,
migration_status=migration_status)
return db_api.subcloud_peer_group_db_model_to_dict(
updated_peer_group
migration_status=migration_status,
)
return db_api.subcloud_peer_group_db_model_to_dict(updated_peer_group)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
# additional exceptions.
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to update subcloud peer group'))
elif verb == 'migrate':
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Unable to update subcloud peer group"),
)
elif verb == "migrate":
payload = json.loads(request.body)
LOG.info("Handling migrate subcloud peer group request for: %s" %
group_ref)
LOG.info("Handling migrate subcloud peer group request for: %s" % group_ref)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
if 'sysadmin_password' not in payload:
msg = ("Unable to migrate subcloud peer group: %s "
"need sysadmin_password" % group_ref)
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
if "sysadmin_password" not in payload:
msg = (
"Unable to migrate subcloud peer group: %s need sysadmin_password"
% group_ref
)
LOG.error(msg)
pecan.abort(400, _(msg))
payload['peer_group'] = group_ref
payload["peer_group"] = group_ref
# Validate subclouds
subclouds = db_api.subcloud_get_for_peer_group(context, group.id)
rehome_ready_subclouds = []
@ -409,67 +430,81 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
# Verify rehome data
rehome_data_json_str = tmp_subcloud.rehome_data
if not rehome_data_json_str:
msg = ("Unable to migrate subcloud: %s "
"required rehoming data is missing" %
tmp_subcloud.name)
msg = (
"Unable to migrate subcloud: %s "
"required rehoming data is missing" % tmp_subcloud.name
)
err_msg_list.append(msg)
continue
tmp_rehome_data = json.loads(rehome_data_json_str)
if 'saved_payload' not in tmp_rehome_data:
msg = ("Unable to migrate subcloud: %s "
"saved_payload is missing in "
"rehoming data" % tmp_subcloud.name)
if "saved_payload" not in tmp_rehome_data:
msg = (
"Unable to migrate subcloud: %s saved_payload is missing in "
"rehoming data" % tmp_subcloud.name
)
err_msg_list.append(msg)
continue
saved_payload = tmp_rehome_data['saved_payload']
saved_payload = tmp_rehome_data["saved_payload"]
# Validate saved_payload
if not saved_payload:
msg = ("Unable to migrate subcloud: %s saved_payload "
"is empty" % tmp_subcloud.name)
msg = (
"Unable to migrate subcloud: %s saved_payload is empty"
% tmp_subcloud.name
)
err_msg_list.append(msg)
continue
if 'bootstrap-address' not in saved_payload:
msg = ("Unable to migrate subcloud: %s, "
"bootstrap-address is missing in rehoming "
"data" % tmp_subcloud.name)
if "bootstrap-address" not in saved_payload:
msg = (
"Unable to migrate subcloud: %s, bootstrap-address is missing "
"in rehoming data" % tmp_subcloud.name
)
err_msg_list.append(msg)
continue
# If any subcloud in the peer group is in 'rehoming'
# or 'pre-rehome'state, we consider the peer group
# is already in batch rehoming, then abort.
rehome_states = [consts.DEPLOY_STATE_PRE_REHOME,
consts.DEPLOY_STATE_REHOMING]
rehome_states = [
consts.DEPLOY_STATE_PRE_REHOME,
consts.DEPLOY_STATE_REHOMING,
]
if tmp_subcloud.deploy_status in rehome_states:
msg = ("Unable to migrate subcloud peer group %s, "
"subcloud %s already in rehoming process" %
(group.peer_group_name, tmp_subcloud.name))
msg = (
"Unable to migrate subcloud peer group %s, "
"subcloud %s already in rehoming process"
% (group.peer_group_name, tmp_subcloud.name)
)
err_msg_list.append(msg)
continue
# Filter for secondary/rehome-failed/rehome-prep-failed
# subclouds, which is the correct state for rehoming
if (tmp_subcloud.deploy_status in
[consts.DEPLOY_STATE_SECONDARY,
consts.DEPLOY_STATE_REHOME_FAILED,
consts.DEPLOY_STATE_REHOME_PREP_FAILED]):
if tmp_subcloud.deploy_status in [
consts.DEPLOY_STATE_SECONDARY,
consts.DEPLOY_STATE_REHOME_FAILED,
consts.DEPLOY_STATE_REHOME_PREP_FAILED,
]:
rehome_ready_subclouds.append(tmp_subcloud)
else:
LOG.info("Excluding subcloud: %s from batch migration: "
"subcloud deploy_status is not secondary, "
"rehome-failed or rehome-prep-failed" %
tmp_subcloud.name)
LOG.info(
"Excluding subcloud: %s from batch migration: "
"subcloud deploy_status is not secondary, "
"rehome-failed or rehome-prep-failed" % tmp_subcloud.name
)
if err_msg_list:
for m in err_msg_list:
LOG.error(m)
pecan.abort(500, _("Batch migrate subclouds error: %s" %
err_msg_list))
pecan.abort(500, _("Batch migrate subclouds error: %s" % err_msg_list))
if not rehome_ready_subclouds:
pecan.abort(400, _("Nothing to migrate, no "
"secondary, rehome-failed or "
"rehome-prep-failed subcloud in peer "
"group %s" % group.peer_group_name))
pecan.abort(
400,
_(
"Nothing to migrate, no secondary, rehome-failed or "
"rehome-prep-failed subcloud in peer group %s"
% group.peer_group_name
),
)
# Call batch migrate
try:
@ -479,52 +514,82 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
pecan.abort(422, e.value)
except Exception:
LOG.exception(
"Unable to batch migrate peer group %s" % group.peer_group_name)
pecan.abort(500, _('Unable to batch migrate '
'peer group %s' % group.peer_group_name))
elif verb == 'audit':
"Unable to batch migrate peer group %s" % group.peer_group_name
)
pecan.abort(
500,
_("Unable to batch migrate peer group %s" % group.peer_group_name),
)
elif verb == "audit":
payload = json.loads(request.body)
if 'peer_uuid' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing peer_uuid' %
group.peer_group_name))
if 'peer_group_name' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing peer_group_name' %
group.peer_group_name))
if 'group_priority' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing group_priority' %
group.peer_group_name))
if 'group_state' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing group_state' %
group.peer_group_name))
if 'system_leader_id' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing system_leader_id' %
group.peer_group_name))
if 'system_leader_name' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing system_leader_name' %
group.peer_group_name))
if 'migration_status' not in payload:
pecan.abort(400, _('Unable to audit peer group '
'%s, missing migration_status' %
group.peer_group_name))
if "peer_uuid" not in payload:
pecan.abort(
400,
_(
"Unable to audit peer group %s, missing peer_uuid"
% group.peer_group_name
),
)
if "peer_group_name" not in payload:
pecan.abort(
400,
_(
"Unable to audit peer group %s, missing peer_group_name"
% group.peer_group_name
),
)
if "group_priority" not in payload:
pecan.abort(
400,
_(
"Unable to audit peer group %s, missing group_priority"
% group.peer_group_name
),
)
if "group_state" not in payload:
pecan.abort(
400,
_(
"Unable to audit peer group %s, missing group_state"
% group.peer_group_name
),
)
if "system_leader_id" not in payload:
pecan.abort(
400,
_(
"Unable to audit peer group %s, missing system_leader_id"
% group.peer_group_name
),
)
if "system_leader_name" not in payload:
pecan.abort(
400,
_(
"Unable to audit peer group %s, missing system_leader_name"
% group.peer_group_name
),
)
if "migration_status" not in payload:
pecan.abort(
400,
_(
"Unable to audit peer group %s, missing migration_status"
% group.peer_group_name
),
)
try:
msg = self.rpc_client.peer_group_audit_notify(
context,
group.peer_group_name,
payload)
context, group.peer_group_name, payload
)
return {"message": msg}
except Exception:
LOG.exception('Unable to audit peer group %s' %
group.peer_group_name)
pecan.abort(500, _('Unable to audit peer group %s' %
group.peer_group_name))
LOG.exception("Unable to audit peer group %s" % group.peer_group_name)
pecan.abort(
500, _("Unable to audit peer group %s" % group.peer_group_name)
)
else:
pecan.abort(400, _('Invalid request'))
pecan.abort(400, _("Invalid request"))
def _validate_group_priority(self, priority):
try:
@ -565,40 +630,46 @@ class SubcloudPeerGroupsController(restcomm.GenericPathController):
except Exception:
return False
@index.when(method='delete', template='json')
@index.when(method="delete", template="json")
def delete(self, group_ref):
"""Delete the subcloud peer group."""
policy.authorize(subcloud_peer_group_policy.POLICY_ROOT % "delete", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
subcloud_peer_group_policy.POLICY_ROOT % "delete",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if group_ref is None:
pecan.abort(httpclient.BAD_REQUEST,
_('Subcloud Peer Group Name or ID required'))
pecan.abort(
httpclient.BAD_REQUEST, _("Subcloud Peer Group Name or ID required")
)
group = utils.subcloud_peer_group_get_by_ref(context, group_ref)
if group is None:
LOG.info("Subcloud Peer Group [%s] not found" % group_ref)
pecan.abort(httpclient.NOT_FOUND, _('Subcloud Peer Group not found'))
pecan.abort(httpclient.NOT_FOUND, _("Subcloud Peer Group not found"))
LOG.info("Handling delete subcloud peer group request for: %s" %
group)
LOG.info("Handling delete subcloud peer group request for: %s" % group)
# A peer group cannot be deleted if it is used by any associations
association = db_api.peer_group_association_get_by_peer_group_id(
context, group.id)
context, group.id
)
if len(association) > 0:
pecan.abort(httpclient.BAD_REQUEST,
_("Cannot delete a peer group "
"which is associated with a system peer."))
pecan.abort(
httpclient.BAD_REQUEST,
_("Cannot delete a peer group which is associated with a system peer."),
)
try:
db_api.subcloud_peer_group_destroy(context, group.id)
# Disassociate the subcloud.
subclouds = db_api.subcloud_get_for_peer_group(context, group.id)
for subcloud in subclouds:
db_api.subcloud_update(context, subcloud.id,
peer_group_id='none')
db_api.subcloud_update(context, subcloud.id, peer_group_id="none")
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to delete subcloud peer group'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR,
_("Unable to delete subcloud peer group"),
)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
# Copyright (c) 2017 Ericsson AB.
# Copyright (c) 2017-2022 Wind River Systems, Inc.
# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -42,19 +42,22 @@ class SwUpdateOptionsController(object):
super(SwUpdateOptionsController, self).__init__()
self.rpc_client = rpc_client.ManagerClient()
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@index.when(method='GET', template='json')
@index.when(method="GET", template="json")
def get(self, subcloud_ref=None):
"""Get details about software update options.
:param subcloud: name or id of subcloud (optional)
"""
policy.authorize(sw_update_options_policy.POLICY_ROOT % "get", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
sw_update_options_policy.POLICY_ROOT % "get",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if subcloud_ref is None:
@ -62,21 +65,21 @@ class SwUpdateOptionsController(object):
# Prepend the all clouds default options to the result.
result = dict()
result['sw-update-options'] = list()
result["sw-update-options"] = list()
default_sw_update_opts_dict = utils.get_sw_update_opts(
context)
default_sw_update_opts_dict = utils.get_sw_update_opts(context)
result['sw-update-options'].append(default_sw_update_opts_dict)
result["sw-update-options"].append(default_sw_update_opts_dict)
subclouds = db_api.sw_update_opts_get_all_plus_subcloud_info(
context)
subclouds = db_api.sw_update_opts_get_all_plus_subcloud_info(context)
for subcloud, sw_update_opts in subclouds:
if sw_update_opts:
result['sw-update-options'].append(
result["sw-update-options"].append(
db_api.sw_update_opts_w_name_db_model_to_dict(
sw_update_opts, subcloud.name))
sw_update_opts, subcloud.name
)
)
return result
@ -93,22 +96,20 @@ class SwUpdateOptionsController(object):
try:
subcloud = db_api.subcloud_get(context, subcloud_ref)
except exceptions.SubcloudNotFound:
pecan.abort(404, _('Subcloud not found'))
pecan.abort(404, _("Subcloud not found"))
else:
# Look up subcloud by name
try:
subcloud = db_api.subcloud_get_by_name(context,
subcloud_ref)
subcloud = db_api.subcloud_get_by_name(context, subcloud_ref)
except exceptions.SubcloudNameNotFound:
pecan.abort(404, _('Subcloud not found'))
pecan.abort(404, _("Subcloud not found"))
try:
return utils.get_sw_update_opts(
context, subcloud_id=subcloud.id)
return utils.get_sw_update_opts(context, subcloud_id=subcloud.id)
except Exception as e:
pecan.abort(404, _('%s') % e)
pecan.abort(404, _("%s") % e)
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self, subcloud_ref=None):
"""Update or create sw update options.
@ -118,13 +119,16 @@ class SwUpdateOptionsController(object):
# Note creating or updating subcloud specific options require
# setting all options.
policy.authorize(sw_update_options_policy.POLICY_ROOT % "update", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
sw_update_options_policy.POLICY_ROOT % "update",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
payload = eval(request.body)
if not payload:
pecan.abort(400, _('Body required'))
pecan.abort(400, _("Body required"))
if subcloud_ref == dccommon_consts.DEFAULT_REGION_NAME:
@ -136,11 +140,12 @@ class SwUpdateOptionsController(object):
try:
sw_update_opts_ref = db_api.sw_update_opts_default_update(
context,
payload['storage-apply-type'],
payload['worker-apply-type'],
payload['max-parallel-workers'],
payload['alarm-restriction-type'],
payload['default-instance-action'])
payload["storage-apply-type"],
payload["worker-apply-type"],
payload["max-parallel-workers"],
payload["alarm-restriction-type"],
payload["default-instance-action"],
)
except Exception as e:
LOG.exception(e)
raise e
@ -149,11 +154,12 @@ class SwUpdateOptionsController(object):
try:
sw_update_opts_ref = db_api.sw_update_opts_default_create(
context,
payload['storage-apply-type'],
payload['worker-apply-type'],
payload['max-parallel-workers'],
payload['alarm-restriction-type'],
payload['default-instance-action'])
payload["storage-apply-type"],
payload["worker-apply-type"],
payload["max-parallel-workers"],
payload["alarm-restriction-type"],
payload["default-instance-action"],
)
except Exception as e:
LOG.exception(e)
raise e
@ -165,53 +171,57 @@ class SwUpdateOptionsController(object):
try:
subcloud = db_api.subcloud_get(context, subcloud_ref)
except exceptions.SubcloudNotFound:
pecan.abort(404, _('Subcloud not found'))
pecan.abort(404, _("Subcloud not found"))
subcloud_name = subcloud.name
else:
# Look up subcloud by name
try:
subcloud = db_api.subcloud_get_by_name(context,
subcloud_ref)
subcloud = db_api.subcloud_get_by_name(context, subcloud_ref)
except exceptions.SubcloudNameNotFound:
pecan.abort(404, _('Subcloud not found'))
pecan.abort(404, _("Subcloud not found"))
subcloud_name = subcloud_ref
sw_update_opts = db_api.sw_update_opts_get(context,
subcloud.id)
sw_update_opts = db_api.sw_update_opts_get(context, subcloud.id)
if sw_update_opts is None:
sw_update_opts_ref = db_api.sw_update_opts_create(
context,
subcloud.id,
payload['storage-apply-type'],
payload['worker-apply-type'],
payload['max-parallel-workers'],
payload['alarm-restriction-type'],
payload['default-instance-action'])
payload["storage-apply-type"],
payload["worker-apply-type"],
payload["max-parallel-workers"],
payload["alarm-restriction-type"],
payload["default-instance-action"],
)
else:
# a row is present in table, update
sw_update_opts_ref = db_api.sw_update_opts_update(
context,
subcloud.id,
payload['storage-apply-type'],
payload['worker-apply-type'],
payload['max-parallel-workers'],
payload['alarm-restriction-type'],
payload['default-instance-action'])
payload["storage-apply-type"],
payload["worker-apply-type"],
payload["max-parallel-workers"],
payload["alarm-restriction-type"],
payload["default-instance-action"],
)
return db_api.sw_update_opts_w_name_db_model_to_dict(
sw_update_opts_ref, subcloud_name)
sw_update_opts_ref, subcloud_name
)
@index.when(method='delete', template='json')
@index.when(method="delete", template="json")
def delete(self, subcloud_ref):
"""Delete the software update options."""
policy.authorize(sw_update_options_policy.POLICY_ROOT % "delete", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
sw_update_options_policy.POLICY_ROOT % "delete",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if subcloud_ref == dccommon_consts.DEFAULT_REGION_NAME:
@ -230,18 +240,17 @@ class SwUpdateOptionsController(object):
try:
subcloud = db_api.subcloud_get(context, subcloud_ref)
except exceptions.SubcloudNotFound:
pecan.abort(404, _('Subcloud not found'))
pecan.abort(404, _("Subcloud not found"))
else:
# Look up subcloud by name
try:
subcloud = db_api.subcloud_get_by_name(context,
subcloud_ref)
subcloud = db_api.subcloud_get_by_name(context, subcloud_ref)
except exceptions.SubcloudNameNotFound:
pecan.abort(404, _('Subcloud not found'))
pecan.abort(404, _("Subcloud not found"))
# Delete the subcloud specific options
if db_api.sw_update_opts_get(context, subcloud.id):
db_api.sw_update_opts_destroy(context, subcloud.id)
else:
pecan.abort(404, _('Subcloud patch options not found'))
pecan.abort(404, _("Subcloud patch options not found"))

View File

@ -61,42 +61,44 @@ class SwUpdateStrategyController(object):
super(SwUpdateStrategyController, self).__init__()
self.orch_rpc_client = orch_rpc_client.ManagerOrchestratorClient()
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@index.when(method='GET', template='json')
@index.when(method="GET", template="json")
def get(self, steps=None, cloud_name=None):
"""Get details about software update strategy.
:param steps: get the steps for this strategy (optional)
:param cloud_name: name of cloud (optional)
"""
policy.authorize(sw_update_strat_policy.POLICY_ROOT % "get", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
sw_update_strat_policy.POLICY_ROOT % "get",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
# If 'type' is in the request params, filter the update_type
update_type_filter = request.params.get('type', None)
update_type_filter = request.params.get("type", None)
if steps is None:
# Strategy requested
strategy = None
try:
strategy = db_api.sw_update_strategy_get(
context,
update_type=update_type_filter)
context, update_type=update_type_filter
)
except exceptions.NotFound:
if update_type_filter is None:
pecan.abort(404, _('Strategy not found'))
pecan.abort(404, _("Strategy not found"))
else:
pecan.abort(404,
_("Strategy of type '%s' not found"
% update_type_filter))
pecan.abort(
404, _("Strategy of type '%s' not found" % update_type_filter)
)
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(
strategy)
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(strategy)
return strategy_dict
elif steps == "steps":
@ -104,11 +106,12 @@ class SwUpdateStrategyController(object):
if cloud_name is None:
# List of steps requested
result = dict()
result['strategy-steps'] = list()
result["strategy-steps"] = list()
strategy_steps = db_api.strategy_step_get_all(context)
for strategy_step in strategy_steps:
result['strategy-steps'].append(
db_api.strategy_step_db_model_to_dict(strategy_step))
result["strategy-steps"].append(
db_api.strategy_step_db_model_to_dict(strategy_step)
)
return result
else:
@ -120,184 +123,213 @@ class SwUpdateStrategyController(object):
try:
strategy_step = db_api.strategy_step_get(context, None)
except exceptions.StrategyStepNotFound:
pecan.abort(404, _('Strategy step not found'))
pecan.abort(404, _("Strategy step not found"))
else:
try:
strategy_step = db_api.strategy_step_get_by_name(
context, cloud_name)
context, cloud_name
)
except exceptions.StrategyStepNameNotFound:
pecan.abort(404, _('Strategy step not found'))
pecan.abort(404, _("Strategy step not found"))
strategy_step_dict = db_api.strategy_step_db_model_to_dict(
strategy_step)
strategy_step
)
return strategy_step_dict
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self, actions=None):
"""Create a new software update strategy."""
context = restcomm.extract_context_from_environ()
payload = eval(request.body)
if not payload:
pecan.abort(400, _('Body required'))
pecan.abort(400, _("Body required"))
if actions is None:
policy.authorize(sw_update_strat_policy.POLICY_ROOT % "create", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
sw_update_strat_policy.POLICY_ROOT % "create",
{},
restcomm.extract_credentials_for_policy(),
)
# Validate any options that were supplied
strategy_type = payload.get('type')
strategy_type = payload.get("type")
if not strategy_type:
pecan.abort(400, _('type required'))
pecan.abort(400, _("type required"))
if strategy_type not in SUPPORTED_STRATEGY_TYPES:
pecan.abort(400, _('type invalid'))
pecan.abort(400, _("type invalid"))
subcloud_apply_type = payload.get('subcloud-apply-type')
subcloud_apply_type = payload.get("subcloud-apply-type")
if subcloud_apply_type is not None:
if subcloud_apply_type not in [
consts.SUBCLOUD_APPLY_TYPE_PARALLEL,
consts.SUBCLOUD_APPLY_TYPE_SERIAL]:
pecan.abort(400, _('subcloud-apply-type invalid'))
consts.SUBCLOUD_APPLY_TYPE_PARALLEL,
consts.SUBCLOUD_APPLY_TYPE_SERIAL,
]:
pecan.abort(400, _("subcloud-apply-type invalid"))
patch_file = payload.get('patch')
patch_file = payload.get("patch")
if patch_file and not os.path.isfile(patch_file):
message = f"Patch file {patch_file} is missing."
pecan.abort(400, _(message))
if strategy_type == consts.SW_UPDATE_TYPE_SOFTWARE and not payload.get(
'release'):
message = ("Release parameter is required for strategy "
f"type {strategy_type}.")
"release"
):
message = (
"Release parameter is required for strategy type {strategy_type}."
)
pecan.abort(400, _(message))
max_parallel_subclouds_str = payload.get('max-parallel-subclouds')
max_parallel_subclouds_str = payload.get("max-parallel-subclouds")
if max_parallel_subclouds_str is not None:
max_parallel_subclouds = None
try:
max_parallel_subclouds = int(max_parallel_subclouds_str)
except ValueError:
pecan.abort(400, _('max-parallel-subclouds invalid'))
pecan.abort(400, _("max-parallel-subclouds invalid"))
if max_parallel_subclouds < 1 or max_parallel_subclouds > 500:
pecan.abort(400, _('max-parallel-subclouds invalid'))
pecan.abort(400, _("max-parallel-subclouds invalid"))
stop_on_failure = payload.get('stop-on-failure')
stop_on_failure = payload.get("stop-on-failure")
if stop_on_failure is not None:
if stop_on_failure not in ["true", "false"]:
pecan.abort(400, _('stop-on-failure invalid'))
pecan.abort(400, _("stop-on-failure invalid"))
force_flag = payload.get('force')
force_flag = payload.get("force")
if force_flag is not None:
if force_flag not in ["true", "false"]:
pecan.abort(400, _('force invalid'))
pecan.abort(400, _("force invalid"))
elif strategy_type not in FORCE_ALL_TYPES:
if payload.get('cloud_name') is None:
pecan.abort(400,
_('The --force option can only be applied '
'for a single subcloud. Please specify '
'the subcloud name.'))
if payload.get("cloud_name") is None:
pecan.abort(
400,
_(
"The --force option can only be applied for a single "
"subcloud. Please specify the subcloud name."
),
)
subcloud_group = payload.get('subcloud_group')
subcloud_group = payload.get("subcloud_group")
# prevents passing both cloud_name and subcloud_group options
# from REST APIs and checks if the group exists
if subcloud_group is not None:
if payload.get('cloud_name') is not None:
pecan.abort(400, _('cloud_name and subcloud_group are '
'mutually exclusive'))
if payload.get("cloud_name") is not None:
pecan.abort(
400,
_("cloud_name and subcloud_group are mutually exclusive"),
)
if (subcloud_apply_type is not None or
max_parallel_subclouds_str is not None):
pecan.abort(400, _('subcloud-apply-type and '
'max-parallel-subclouds are not '
'supported when subcloud_group is '
'applied'))
if (
subcloud_apply_type is not None
or max_parallel_subclouds_str is not None
):
pecan.abort(
400,
_(
"subcloud-apply-type and max-parallel-subclouds are not "
"supported when subcloud_group is applied"
),
)
group = utils.subcloud_group_get_by_ref(context,
subcloud_group)
group = utils.subcloud_group_get_by_ref(context, subcloud_group)
if group is None:
pecan.abort(400, _('Invalid group_id'))
pecan.abort(400, _("Invalid group_id"))
# get_sw_version is used here to validate the
# release parameter if specified.
utils.get_sw_version(payload.get('release'))
utils.get_sw_version(payload.get("release"))
# Not adding validation for extra args. Passing them through.
try:
# Ask dcmanager-manager to create the strategy.
# It will do all the real work...
return self.orch_rpc_client.create_sw_update_strategy(context,
payload)
return self.orch_rpc_client.create_sw_update_strategy(context, payload)
except RemoteError as e:
pecan.abort(
422, _("Unable to create strategy of type '%s': %s")
% (strategy_type, e.value)
422,
_("Unable to create strategy of type '%s': %s")
% (strategy_type, e.value),
)
except Exception as e:
LOG.exception(e)
pecan.abort(500, _('Unable to create strategy'))
elif actions == 'actions':
pecan.abort(500, _("Unable to create strategy"))
elif actions == "actions":
# If 'type' is in the request params, filter the update_type
update_type_filter = request.params.get('type', None)
update_type_filter = request.params.get("type", None)
# Apply or abort a strategy
action = payload.get('action')
action = payload.get("action")
if not action:
pecan.abort(400, _('action required'))
pecan.abort(400, _("action required"))
if action == consts.SW_UPDATE_ACTION_APPLY:
policy.authorize(sw_update_strat_policy.POLICY_ROOT % "apply",
{}, restcomm.extract_credentials_for_policy())
policy.authorize(
sw_update_strat_policy.POLICY_ROOT % "apply",
{},
restcomm.extract_credentials_for_policy(),
)
try:
# Ask dcmanager-manager to apply the strategy.
# It will do all the real work...
return self.orch_rpc_client.apply_sw_update_strategy(
context,
update_type=update_type_filter)
context, update_type=update_type_filter
)
except RemoteError as e:
pecan.abort(
422, _("Unable to apply strategy of type '%s': %s")
% (update_type_filter, e.value)
422,
_("Unable to apply strategy of type '%s': %s")
% (update_type_filter, e.value),
)
except Exception as e:
LOG.exception(e)
pecan.abort(500, _('Unable to apply strategy'))
pecan.abort(500, _("Unable to apply strategy"))
elif action == consts.SW_UPDATE_ACTION_ABORT:
policy.authorize(sw_update_strat_policy.POLICY_ROOT % "abort",
{}, restcomm.extract_credentials_for_policy())
policy.authorize(
sw_update_strat_policy.POLICY_ROOT % "abort",
{},
restcomm.extract_credentials_for_policy(),
)
try:
# Ask dcmanager-manager to abort the strategy.
# It will do all the real work...
return self.orch_rpc_client.abort_sw_update_strategy(
context,
update_type=update_type_filter)
context, update_type=update_type_filter
)
except RemoteError as e:
pecan.abort(
422, _("Unable to abort strategy of type '%s': %s")
% (update_type_filter, e.value)
422,
_("Unable to abort strategy of type '%s': %s")
% (update_type_filter, e.value),
)
except Exception as e:
LOG.exception(e)
pecan.abort(500, _('Unable to abort strategy'))
pecan.abort(500, _("Unable to abort strategy"))
@index.when(method='delete', template='json')
@index.when(method="delete", template="json")
def delete(self):
"""Delete the software update strategy."""
policy.authorize(sw_update_strat_policy.POLICY_ROOT % "delete", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
sw_update_strat_policy.POLICY_ROOT % "delete",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
# If 'type' is in the request params, filter the update_type
update_type_filter = request.params.get('type', None)
update_type_filter = request.params.get("type", None)
try:
# Ask dcmanager-manager to delete the strategy.
# It will do all the real work...
return self.orch_rpc_client.delete_sw_update_strategy(
context,
update_type=update_type_filter)
context, update_type=update_type_filter
)
except RemoteError as e:
pecan.abort(
422, _("Unable to delete strategy of type '%s': %s")
% (update_type_filter, e.value)
422,
_("Unable to delete strategy of type '%s': %s")
% (update_type_filter, e.value),
)
except Exception as e:
LOG.exception(e)
pecan.abort(500, _('Unable to delete strategy'))
pecan.abort(500, _("Unable to delete strategy"))

View File

@ -51,8 +51,7 @@ MAX_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD = 30
# the heartbeat failure threshold is reached
#
# We will only support alarm in the first release
SYSTEM_PEER_HEARTBEAT_FAILURE_POLICY_LIST = \
["alarm", "rehome", "delegate"]
SYSTEM_PEER_HEARTBEAT_FAILURE_POLICY_LIST = ["alarm", "rehome", "delegate"]
MIN_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT = 300
MAX_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT = 36000
@ -64,7 +63,7 @@ class SystemPeersController(restcomm.GenericPathController):
self.dcmanager_rpc_client = rpc_client.ManagerClient()
@expose(generic=True, template='json')
@expose(generic=True, template="json")
def index(self):
# Route the request to specific methods with parameters
pass
@ -74,12 +73,12 @@ class SystemPeersController(restcomm.GenericPathController):
try:
payload = json.loads(request.body)
except Exception:
error_msg = 'Request body is malformed.'
error_msg = "Request body is malformed."
LOG.exception(error_msg)
pecan.abort(400, _(error_msg))
if not isinstance(payload, dict):
pecan.abort(400, _('Invalid request body format'))
pecan.abort(400, _("Invalid request body format"))
return payload
def _get_peer_group_list_for_system_peer(self, context, peer_id):
@ -95,10 +94,10 @@ class SystemPeersController(restcomm.GenericPathController):
system_peer_list.append(peer_dict)
result = dict()
result['system_peers'] = system_peer_list
result["system_peers"] = system_peer_list
return result
@index.when(method='GET', template='json')
@index.when(method="GET", template="json")
def get(self, peer_ref=None, subcloud_peer_groups=False):
"""Retrieve information about a system peer.
@ -110,8 +109,11 @@ class SystemPeersController(restcomm.GenericPathController):
:param subcloud_peer_groups: If this request should return subcloud
peer groups
"""
policy.authorize(system_peer_policy.POLICY_ROOT % "get", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
system_peer_policy.POLICY_ROOT % "get",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
if peer_ref is None:
@ -120,7 +122,7 @@ class SystemPeersController(restcomm.GenericPathController):
peer = utils.system_peer_get_by_ref(context, peer_ref)
if peer is None:
pecan.abort(httpclient.NOT_FOUND, _('System Peer not found'))
pecan.abort(httpclient.NOT_FOUND, _("System Peer not found"))
if subcloud_peer_groups:
return self._get_peer_group_list_for_system_peer(context, peer.id)
system_peer_dict = db_api.system_peer_db_model_to_dict(peer)
@ -135,8 +137,11 @@ class SystemPeersController(restcomm.GenericPathController):
return False
def _validate_manager_endpoint(self, endpoint):
if not endpoint or len(endpoint) >= MAX_SYSTEM_PEER_MANAGER_ENDPOINT_LEN or \
not endpoint.startswith(("http", "https")):
if (
not endpoint
or len(endpoint) >= MAX_SYSTEM_PEER_MANAGER_ENDPOINT_LEN
or not endpoint.startswith(("http", "https"))
):
LOG.debug("Invalid manager_endpoint: %s" % endpoint)
return False
return True
@ -179,286 +184,334 @@ class SystemPeersController(restcomm.GenericPathController):
return False
# We do not support less than min or greater than max
if val < MIN_SYSTEM_PEER_HEARTBEAT_INTERVAL or \
val > MAX_SYSTEM_PEER_HEARTBEAT_INTERVAL:
if (
val < MIN_SYSTEM_PEER_HEARTBEAT_INTERVAL
or val > MAX_SYSTEM_PEER_HEARTBEAT_INTERVAL
):
LOG.debug("Invalid heartbeat_interval: %s" % heartbeat_interval)
return False
return True
def _validate_heartbeat_failure_threshold(self,
heartbeat_failure_threshold):
def _validate_heartbeat_failure_threshold(self, heartbeat_failure_threshold):
try:
# Check the value is an integer
val = int(heartbeat_failure_threshold)
except ValueError:
LOG.warning("Invalid heartbeat_failure_threshold: %s" %
heartbeat_failure_threshold)
LOG.warning(
"Invalid heartbeat_failure_threshold: %s" % heartbeat_failure_threshold
)
return False
# We do not support less than min or greater than max
if val < MIN_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD or \
val > MAX_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD:
LOG.debug("Invalid heartbeat_failure_threshold: %s" %
heartbeat_failure_threshold)
if (
val < MIN_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD
or val > MAX_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD
):
LOG.debug(
"Invalid heartbeat_failure_threshold: %s" % heartbeat_failure_threshold
)
return False
return True
def _validate_heartbeat_failure_policy(self, heartbeat_failure_policy):
if heartbeat_failure_policy not in SYSTEM_PEER_HEARTBEAT_FAILURE_POLICY_LIST:
LOG.debug("Invalid heartbeat_failure_policy: %s" %
heartbeat_failure_policy)
LOG.debug("Invalid heartbeat_failure_policy: %s" % heartbeat_failure_policy)
return False
return True
def _validate_heartbeat_maintenance_timeout(self,
heartbeat_maintenance_timeout):
def _validate_heartbeat_maintenance_timeout(self, heartbeat_maintenance_timeout):
try:
# Check the value is an integer
val = int(heartbeat_maintenance_timeout)
except ValueError:
LOG.warning("Invalid heartbeat_maintenance_timeout: %s" %
heartbeat_maintenance_timeout)
LOG.warning(
"Invalid heartbeat_maintenance_timeout: %s"
% heartbeat_maintenance_timeout
)
return False
# We do not support less than min or greater than max
if val < MIN_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT or \
val > MAX_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT:
LOG.debug("Invalid heartbeat_maintenance_timeout: %s" %
heartbeat_maintenance_timeout)
if (
val < MIN_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT
or val > MAX_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT
):
LOG.debug(
"Invalid heartbeat_maintenance_timeout: %s"
% heartbeat_maintenance_timeout
)
return False
return True
@index.when(method='POST', template='json')
@index.when(method="POST", template="json")
def post(self):
"""Create a new system peer."""
policy.authorize(system_peer_policy.POLICY_ROOT % "create", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
system_peer_policy.POLICY_ROOT % "create",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
LOG.info("Creating a new system peer: %s" % context)
payload = self._get_payload(request)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
# Validate payload
peer_uuid = payload.get('peer_uuid')
peer_uuid = payload.get("peer_uuid")
if not self._validate_uuid(peer_uuid):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer uuid'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer uuid"))
peer_name = payload.get('peer_name')
peer_name = payload.get("peer_name")
if not utils.validate_name(peer_name):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer name'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer name"))
endpoint = payload.get('manager_endpoint')
endpoint = payload.get("manager_endpoint")
if not self._validate_manager_endpoint(endpoint):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer manager_endpoint'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer manager_endpoint"))
username = payload.get('manager_username')
username = payload.get("manager_username")
if not self._validate_manager_username(username):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer manager_username'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer manager_username"))
password = payload.get('manager_password')
password = payload.get("manager_password")
if not self._validate_manager_password(password):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer manager_password'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer manager_password"))
gateway_ip = payload.get('peer_controller_gateway_address')
gateway_ip = payload.get("peer_controller_gateway_address")
if not self._validate_peer_controller_gateway_ip(gateway_ip):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer peer_controller_gateway_address'))
pecan.abort(
httpclient.BAD_REQUEST,
_("Invalid peer peer_controller_gateway_address"),
)
# Optional request parameters
kwargs = {}
administrative_state = payload.get('administrative_state')
administrative_state = payload.get("administrative_state")
if administrative_state:
if not self._validate_administrative_state(administrative_state):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer administrative_state'))
kwargs['administrative_state'] = administrative_state
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid peer administrative_state")
)
kwargs["administrative_state"] = administrative_state
heartbeat_interval = payload.get('heartbeat_interval')
heartbeat_interval = payload.get("heartbeat_interval")
if heartbeat_interval is not None:
if not self._validate_heartbeat_interval(heartbeat_interval):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_interval'))
kwargs['heartbeat_interval'] = heartbeat_interval
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid peer heartbeat_interval")
)
kwargs["heartbeat_interval"] = heartbeat_interval
heartbeat_failure_threshold = \
payload.get('heartbeat_failure_threshold')
heartbeat_failure_threshold = payload.get("heartbeat_failure_threshold")
if heartbeat_failure_threshold is not None:
if not self._validate_heartbeat_failure_threshold(
heartbeat_failure_threshold):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_failure_threshold'))
kwargs['heartbeat_failure_threshold'] = heartbeat_failure_threshold
heartbeat_failure_threshold
):
pecan.abort(
httpclient.BAD_REQUEST,
_("Invalid peer heartbeat_failure_threshold"),
)
kwargs["heartbeat_failure_threshold"] = heartbeat_failure_threshold
heartbeat_failure_policy = payload.get('heartbeat_failure_policy')
heartbeat_failure_policy = payload.get("heartbeat_failure_policy")
if heartbeat_failure_policy:
if not self._validate_heartbeat_failure_policy(
heartbeat_failure_policy):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_failure_policy'))
kwargs['heartbeat_failure_policy'] = heartbeat_failure_policy
if not self._validate_heartbeat_failure_policy(heartbeat_failure_policy):
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid peer heartbeat_failure_policy")
)
kwargs["heartbeat_failure_policy"] = heartbeat_failure_policy
heartbeat_maintenance_timeout = \
payload.get('heartbeat_maintenance_timeout')
heartbeat_maintenance_timeout = payload.get("heartbeat_maintenance_timeout")
if heartbeat_maintenance_timeout is not None:
if not self._validate_heartbeat_maintenance_timeout(
heartbeat_maintenance_timeout):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_maintenance_timeout'))
kwargs['heartbeat_maintenance_timeout'] = \
heartbeat_maintenance_timeout
):
pecan.abort(
httpclient.BAD_REQUEST,
_("Invalid peer heartbeat_maintenance_timeout"),
)
kwargs["heartbeat_maintenance_timeout"] = heartbeat_maintenance_timeout
try:
peer_ref = db_api.system_peer_create(context,
peer_uuid,
peer_name,
endpoint,
username,
password,
gateway_ip, **kwargs)
peer_ref = db_api.system_peer_create(
context,
peer_uuid,
peer_name,
endpoint,
username,
password,
gateway_ip,
**kwargs
)
return db_api.system_peer_db_model_to_dict(peer_ref)
except db_exc.DBDuplicateEntry:
LOG.info("Peer create failed. Peer UUID %s already exists"
% peer_uuid)
pecan.abort(httpclient.CONFLICT,
_('A system peer with this UUID already exists'))
LOG.info("Peer create failed. Peer UUID %s already exists" % peer_uuid)
pecan.abort(
httpclient.CONFLICT, _("A system peer with this UUID already exists")
)
except RemoteError as e:
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to create system peer'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Unable to create system peer")
)
@index.when(method='PATCH', template='json')
@index.when(method="PATCH", template="json")
def patch(self, peer_ref):
"""Update a system peer.
:param peer_ref: ID or UUID of system peer to update
"""
policy.authorize(system_peer_policy.POLICY_ROOT % "modify", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
system_peer_policy.POLICY_ROOT % "modify",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
LOG.info("Updating system peer: %s" % context)
if peer_ref is None:
pecan.abort(httpclient.BAD_REQUEST,
_('System Peer UUID or ID required'))
pecan.abort(httpclient.BAD_REQUEST, _("System Peer UUID or ID required"))
payload = self._get_payload(request)
if not payload:
pecan.abort(httpclient.BAD_REQUEST, _('Body required'))
pecan.abort(httpclient.BAD_REQUEST, _("Body required"))
peer = utils.system_peer_get_by_ref(context, peer_ref)
if peer is None:
pecan.abort(httpclient.NOT_FOUND, _('System Peer not found'))
pecan.abort(httpclient.NOT_FOUND, _("System Peer not found"))
peer_uuid, peer_name, endpoint, username, password, gateway_ip, \
administrative_state, heartbeat_interval, \
heartbeat_failure_threshold, heartbeat_failure_policy, \
heartbeat_maintenance_timeout = (
payload.get('peer_uuid'),
payload.get('peer_name'),
payload.get('manager_endpoint'),
payload.get('manager_username'),
payload.get('manager_password'),
payload.get('peer_controller_gateway_address'),
payload.get('administrative_state'),
payload.get('heartbeat_interval'),
payload.get('heartbeat_failure_threshold'),
payload.get('heartbeat_failure_policy'),
payload.get('heartbeat_maintenance_timeout')
)
(
peer_uuid,
peer_name,
endpoint,
username,
password,
gateway_ip,
administrative_state,
heartbeat_interval,
heartbeat_failure_threshold,
heartbeat_failure_policy,
heartbeat_maintenance_timeout,
) = (
payload.get("peer_uuid"),
payload.get("peer_name"),
payload.get("manager_endpoint"),
payload.get("manager_username"),
payload.get("manager_password"),
payload.get("peer_controller_gateway_address"),
payload.get("administrative_state"),
payload.get("heartbeat_interval"),
payload.get("heartbeat_failure_threshold"),
payload.get("heartbeat_failure_policy"),
payload.get("heartbeat_maintenance_timeout"),
)
if not (peer_uuid or peer_name or endpoint or username or password
or administrative_state or heartbeat_interval
or heartbeat_failure_threshold or heartbeat_failure_policy
or heartbeat_maintenance_timeout or gateway_ip):
pecan.abort(httpclient.BAD_REQUEST, _('nothing to update'))
if not (
peer_uuid
or peer_name
or endpoint
or username
or password
or administrative_state
or heartbeat_interval
or heartbeat_failure_threshold
or heartbeat_failure_policy
or heartbeat_maintenance_timeout
or gateway_ip
):
pecan.abort(httpclient.BAD_REQUEST, _("nothing to update"))
# Check value is not None or empty before calling validate
if peer_uuid:
if not self._validate_uuid(peer_uuid):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer uuid'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer uuid"))
if peer_name:
if not utils.validate_name(peer_name):
pecan.abort(httpclient.BAD_REQUEST, _('Invalid peer name'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer name"))
if endpoint:
if not self._validate_manager_endpoint(endpoint):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer manager_endpoint'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer manager_endpoint"))
if username:
if not self._validate_manager_username(username):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer manager_username'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer manager_username"))
if password:
if not self._validate_manager_password(password):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer manager_password'))
pecan.abort(httpclient.BAD_REQUEST, _("Invalid peer manager_password"))
if gateway_ip:
if not self._validate_peer_controller_gateway_ip(gateway_ip):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer peer_controller_gateway_address'))
pecan.abort(
httpclient.BAD_REQUEST,
_("Invalid peer peer_controller_gateway_address"),
)
if administrative_state:
if not self._validate_administrative_state(administrative_state):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer administrative_state'))
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid peer administrative_state")
)
if heartbeat_interval:
if not self._validate_heartbeat_interval(heartbeat_interval):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_interval'))
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid peer heartbeat_interval")
)
if heartbeat_failure_threshold:
if not self._validate_heartbeat_failure_threshold(
heartbeat_failure_threshold):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_failure_threshold'))
heartbeat_failure_threshold
):
pecan.abort(
httpclient.BAD_REQUEST,
_("Invalid peer heartbeat_failure_threshold"),
)
if heartbeat_failure_policy:
if not self._validate_heartbeat_failure_policy(
heartbeat_failure_policy):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_failure_policy'))
if not self._validate_heartbeat_failure_policy(heartbeat_failure_policy):
pecan.abort(
httpclient.BAD_REQUEST, _("Invalid peer heartbeat_failure_policy")
)
if heartbeat_maintenance_timeout:
if not self._validate_heartbeat_maintenance_timeout(
heartbeat_maintenance_timeout):
pecan.abort(httpclient.BAD_REQUEST,
_('Invalid peer heartbeat_maintenance_timeout'))
heartbeat_maintenance_timeout
):
pecan.abort(
httpclient.BAD_REQUEST,
_("Invalid peer heartbeat_maintenance_timeout"),
)
try:
updated_peer = db_api.system_peer_update(
context,
peer.id,
peer_uuid, peer_name,
endpoint, username, password,
peer_uuid,
peer_name,
endpoint,
username,
password,
gateway_ip,
administrative_state,
heartbeat_interval,
heartbeat_failure_threshold,
heartbeat_failure_policy,
heartbeat_maintenance_timeout)
heartbeat_maintenance_timeout,
)
updated_peer_dict = db_api.system_peer_db_model_to_dict(updated_peer)
# Set the sync status to out-of-sync if the peer_controller_gateway_ip
# was updated
if (
gateway_ip is not None and
gateway_ip != peer.peer_controller_gateway_ip
):
if gateway_ip is not None and gateway_ip != peer.peer_controller_gateway_ip:
associations = db_api.peer_group_association_get_by_system_peer_id(
context, peer.id
)
@ -467,8 +520,9 @@ class SystemPeersController(restcomm.GenericPathController):
# Update the sync status on both sites
association_ids.update(
self.dcmanager_rpc_client.update_association_sync_status(
context, association.peer_group_id,
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
context,
association.peer_group_id,
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
)
)
# Generate an informative message to remind the operator
@ -483,33 +537,37 @@ class SystemPeersController(restcomm.GenericPathController):
except Exception as e:
# additional exceptions.
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to update system peer'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Unable to update system peer")
)
@index.when(method='delete', template='json')
@index.when(method="delete", template="json")
def delete(self, peer_ref):
"""Delete the system peer."""
policy.authorize(system_peer_policy.POLICY_ROOT % "delete", {},
restcomm.extract_credentials_for_policy())
policy.authorize(
system_peer_policy.POLICY_ROOT % "delete",
{},
restcomm.extract_credentials_for_policy(),
)
context = restcomm.extract_context_from_environ()
LOG.info("Deleting system peer: %s" % context)
if peer_ref is None:
pecan.abort(httpclient.BAD_REQUEST,
_('System Peer UUID or ID required'))
pecan.abort(httpclient.BAD_REQUEST, _("System Peer UUID or ID required"))
peer = utils.system_peer_get_by_ref(context, peer_ref)
if peer is None:
pecan.abort(httpclient.NOT_FOUND, _('System Peer not found'))
pecan.abort(httpclient.NOT_FOUND, _("System Peer not found"))
# A system peer cannot be deleted if it is used by any associations
association = db_api.\
peer_group_association_get_by_system_peer_id(context,
str(peer.id))
association = db_api.peer_group_association_get_by_system_peer_id(
context, str(peer.id)
)
if len(association) > 0:
pecan.abort(httpclient.BAD_REQUEST,
_('Cannot delete a system peer which is '
'associated with peer group.'))
pecan.abort(
httpclient.BAD_REQUEST,
_("Cannot delete a system peer which is associated with peer group."),
)
try:
db_api.system_peer_destroy(context, peer.id)
@ -517,5 +575,6 @@ class SystemPeersController(restcomm.GenericPathController):
pecan.abort(httpclient.UNPROCESSABLE_ENTITY, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(httpclient.INTERNAL_SERVER_ERROR,
_('Unable to delete system peer'))
pecan.abort(
httpclient.INTERNAL_SERVER_ERROR, _("Unable to delete system peer")
)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2023 Wind River Systems, Inc.
# Copyright (c) 2023, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -33,5 +33,5 @@ def list_rules():
phased_subcloud_deploy.list_rules(),
subcloud_peer_group.list_rules(),
peer_group_association.list_rules(),
system_peers.list_rules()
system_peers.list_rules(),
)

View File

@ -8,20 +8,15 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:alarm_manager:%s'
POLICY_ROOT = "dc_api:alarm_manager:%s"
alarm_manager_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.READER_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.READER_IN_SYSTEM_PROJECTS,
description="Get alarms from subclouds.",
operations=[
{
'method': 'GET',
'path': '/v1.0/alarms'
}
]
operations=[{"method": "GET", "path": "/v1.0/alarms"}],
)
]

View File

@ -1,28 +1,26 @@
#
# Copyright (c) 2022 Wind River Systems, Inc.
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from oslo_policy import policy
ADMIN_IN_SYSTEM_PROJECTS = 'admin_in_system_projects'
READER_IN_SYSTEM_PROJECTS = 'reader_in_system_projects'
ADMIN_IN_SYSTEM_PROJECTS = "admin_in_system_projects"
READER_IN_SYSTEM_PROJECTS = "reader_in_system_projects"
base_rules = [
policy.RuleDefault(
name=ADMIN_IN_SYSTEM_PROJECTS,
check_str='role:admin and (project_name:admin or ' +
'project_name:services)',
check_str="role:admin and (project_name:admin or project_name:services)",
description="Base rule.",
),
policy.RuleDefault(
name=READER_IN_SYSTEM_PROJECTS,
check_str='role:reader and (project_name:admin or ' +
'project_name:services)',
description="Base rule."
)
check_str="role:reader and (project_name:admin or project_name:services)",
description="Base rule.",
),
]

View File

@ -8,64 +8,46 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:peer_group_associations:%s'
POLICY_ROOT = "dc_api:peer_group_associations:%s"
peer_group_associations_rules = [
# CRUD of peer_group_associations entity
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "create",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Create peer group association.",
operations=[
{
'method': 'POST',
'path': '/v1.0/peer-group-associations'
}
]
operations=[{"method": "POST", "path": "/v1.0/peer-group-associations"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete peer group association.",
operations=[
{
'method': 'DELETE',
'path': '/v1.0/peer-group-associations/{associate_id}'
}
]
{"method": "DELETE", "path": "/v1.0/peer-group-associations/{associate_id}"}
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.READER_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.READER_IN_SYSTEM_PROJECTS,
description="Get peer group associations.",
operations=[
{
'method': 'GET',
'path': '/v1.0/peer-group-associations'
},
{
'method': 'GET',
'path': '/v1.0/peer-group-associations/{associate_id}'
}
]
{"method": "GET", "path": "/v1.0/peer-group-associations"},
{"method": "GET", "path": "/v1.0/peer-group-associations/{associate_id}"},
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'modify',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "modify",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Modify peer group association.",
operations=[
{"method": "PATCH", "path": "/v1.0/peer-group-associations/{associate_id}"},
{
'method': 'PATCH',
'path': '/v1.0/peer-group-associations/{associate_id}'
"method": "PATCH",
"path": "/v1.0/peer-group-associations/{associate_id}/sync",
},
{
'method': 'PATCH',
'path': '/v1.0/peer-group-associations/{associate_id}/sync'
}
]
)
],
),
]

View File

@ -8,52 +8,47 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:phased_subcloud_deploy:%s'
POLICY_ROOT = "dc_api:phased_subcloud_deploy:%s"
phased_subcloud_deploy_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "create",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Create a subcloud",
operations=[
{
'method': 'POST',
'path': '/v1.0/phased-subcloud-deploy'
}
]
operations=[{"method": "POST", "path": "/v1.0/phased-subcloud-deploy"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'modify',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "modify",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Modify the subcloud deployment.",
operations=[
{
'method': 'PATCH',
'path': '/v1.0/phased-subcloud-deploy/{subcloud}/abort'
"method": "PATCH",
"path": "/v1.0/phased-subcloud-deploy/{subcloud}/abort",
},
{
'method': 'PATCH',
'path': '/v1.0/phased-subcloud-deploy/{subcloud}/resume'
"method": "PATCH",
"path": "/v1.0/phased-subcloud-deploy/{subcloud}/resume",
},
{
'method': 'PATCH',
'path': '/v1.0/phased-subcloud-deploy/{subcloud}/install'
"method": "PATCH",
"path": "/v1.0/phased-subcloud-deploy/{subcloud}/install",
},
{
'method': 'PATCH',
'path': '/v1.0/phased-subcloud-deploy/{subcloud}/bootstrap'
"method": "PATCH",
"path": "/v1.0/phased-subcloud-deploy/{subcloud}/bootstrap",
},
{
'method': 'PATCH',
'path': '/v1.0/phased-subcloud-deploy/{subcloud}/configure'
"method": "PATCH",
"path": "/v1.0/phased-subcloud-deploy/{subcloud}/configure",
},
{
'method': 'PATCH',
'path': '/v1.0/phased-subcloud-deploy/{subcloud}/complete'
}
]
)
"method": "PATCH",
"path": "/v1.0/phased-subcloud-deploy/{subcloud}/complete",
},
],
),
]

View File

@ -8,43 +8,33 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subcloud_backup:%s'
POLICY_ROOT = "dc_api:subcloud_backup:%s"
subcloud_backup_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "create",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Create new subcloud backup.",
operations=[
{
'method': 'POST',
'path': '/v1.0/subcloud-backup'
}
]
operations=[{"method": "POST", "path": "/v1.0/subcloud-backup"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete a subcloud backup.",
operations=[
{
'method': 'PATCH',
'path': '/v1.0/subcloud-backup/delete/{release_version}'
"method": "PATCH",
"path": "/v1.0/subcloud-backup/delete/{release_version}",
}
]
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'restore',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "restore",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Restore a subcloud backup.",
operations=[
{
'method': 'PATCH',
'path': '/v1.0/subcloud-backup/restore'
}
]
)
operations=[{"method": "PATCH", "path": "/v1.0/subcloud-backup/restore"}],
),
]

View File

@ -8,51 +8,34 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subcloud_deploy:%s'
POLICY_ROOT = "dc_api:subcloud_deploy:%s"
subcloud_deploy_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'upload',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "upload",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Upload subcloud deploy files.",
operations=[
{
'method': 'POST',
'path': '/v1.0/subcloud-deploy'
}
]
operations=[{"method": "POST", "path": "/v1.0/subcloud-deploy"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.READER_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.READER_IN_SYSTEM_PROJECTS,
description="Show subcloud deploy files.",
operations=[
{
'method': 'GET',
'path': '/v1.0/subcloud-deploy'
},
{
'method': 'GET',
'path': '/v1.0/subcloud-deploy/{release}'
}
]
{"method": "GET", "path": "/v1.0/subcloud-deploy"},
{"method": "GET", "path": "/v1.0/subcloud-deploy/{release}"},
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete subcloud deploy files.",
operations=[
{
'method': 'DELETE',
'path': '/v1.0/subcloud-deploy'
},
{
'method': 'DELETE',
'path': '/v1.0/subcloud-deploy/{release}'
}
]
)
{"method": "DELETE", "path": "/v1.0/subcloud-deploy"},
{"method": "DELETE", "path": "/v1.0/subcloud-deploy/{release}"},
],
),
]

View File

@ -8,62 +8,45 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subcloud_groups:%s'
POLICY_ROOT = "dc_api:subcloud_groups:%s"
subcloud_groups_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "create",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Create subcloud group.",
operations=[
{
'method': 'POST',
'path': '/v1.0/subcloud-groups'
}
]
operations=[{"method": "POST", "path": "/v1.0/subcloud-groups"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete subcloud group.",
operations=[
{
'method': 'DELETE',
'path': '/v1.0/subcloud-groups/{subcloud_group}'
}
]
{"method": "DELETE", "path": "/v1.0/subcloud-groups/{subcloud_group}"}
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.READER_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.READER_IN_SYSTEM_PROJECTS,
description="Get subcloud groups.",
operations=[
{"method": "GET", "path": "/v1.0/subcloud-groups"},
{"method": "GET", "path": "/v1.0/subcloud-groups/{subcloud_group}"},
{
'method': 'GET',
'path': '/v1.0/subcloud-groups'
"method": "GET",
"path": "/v1.0/subcloud-groups/{subcloud_group}/subclouds",
},
{
'method': 'GET',
'path': '/v1.0/subcloud-groups/{subcloud_group}'
},
{
'method': 'GET',
'path': '/v1.0/subcloud-groups/{subcloud_group}/subclouds'
}
]
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'modify',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "modify",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Modify subcloud group.",
operations=[
{
'method': 'PATCH',
'path': '/v1.0/subcloud-groups/{subcloud_group}'
}
]
)
{"method": "PATCH", "path": "/v1.0/subcloud-groups/{subcloud_group}"}
],
),
]

View File

@ -8,82 +8,73 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subcloud_peer_groups:%s'
POLICY_ROOT = "dc_api:subcloud_peer_groups:%s"
_subcloud_peer_groups_rules = [
# CRUD of subcloud-peer-groups entity
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "create",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Create subcloud peer group.",
operations=[
{
'method': 'POST',
'path': '/v1.0/subcloud-peer-groups'
}
]
operations=[{"method": "POST", "path": "/v1.0/subcloud-peer-groups"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete subcloud peer group.",
operations=[
{
'method': 'DELETE',
'path': '/v1.0/subcloud-peer-groups/{subcloud_peer_group}'
"method": "DELETE",
"path": "/v1.0/subcloud-peer-groups/{subcloud_peer_group}",
}
]
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Get Subcloud Peer Group data",
operations=[
{
'method': 'GET',
'path': '/v1.0/subcloud-peer-groups/'
},
{"method": "GET", "path": "/v1.0/subcloud-peer-groups/"},
# Show details of a specified Subcloud Peer Group
{
'method': 'GET',
'path': '/v1.0/subcloud-peer-groups/{subcloud_peer_group}'
"method": "GET",
"path": "/v1.0/subcloud-peer-groups/{subcloud_peer_group}",
},
# Show subclouds status of the subcloud-peer-group
{
'method': 'GET',
'path': '/v1.0/subcloud-peer-groups/{subcloud_peer_group}/status'
"method": "GET",
"path": "/v1.0/subcloud-peer-groups/{subcloud_peer_group}/status",
},
# List Subclouds assigned to the given Subcloud Peer Group
{
'method': 'GET',
'path': '/v1.0/subcloud-peer-groups/{subcloud_peer_group}/subclouds'
}
]
"method": "GET",
"path": "/v1.0/subcloud-peer-groups/{subcloud_peer_group}/subclouds",
},
],
),
# Update a Subcloud Peer Group with specified configuration
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'modify',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "modify",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Update a Subcloud Peer Group with specified configuration",
operations=[
{
'method': 'PATCH',
'path': '/v1.0/subcloud-peer-groups/{subcloud_peer_group}'
"method": "PATCH",
"path": "/v1.0/subcloud-peer-groups/{subcloud_peer_group}",
},
# Migrate subclouds entity of the subcloud-peer-group
{
'method': 'PATCH',
'path': '/v1.0/subcloud-peer-groups/{subcloud_peer_group}/migrate'
"method": "PATCH",
"path": "/v1.0/subcloud-peer-groups/{subcloud_peer_group}/migrate",
},
# Trigger a peer group audit
{
'method': 'PATCH',
'path': '/v1.0/subcloud-peer-groups/{subcloud_peer_group}/audit'
}
]
)
"method": "PATCH",
"path": "/v1.0/subcloud-peer-groups/{subcloud_peer_group}/audit",
},
],
),
]

View File

@ -8,86 +8,46 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:subclouds:%s'
POLICY_ROOT = "dc_api:subclouds:%s"
subclouds_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "create",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Create a subcloud.",
operations=[
{
'method': 'POST',
'path': '/v1.0/subclouds'
}
]
operations=[{"method": "POST", "path": "/v1.0/subclouds"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete a subcloud.",
operations=[
{
'method': 'DELETE',
'path': '/v1.0/subclouds/{alarm_uuid}'
}
]
operations=[{"method": "DELETE", "path": "/v1.0/subclouds/{alarm_uuid}"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.READER_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.READER_IN_SYSTEM_PROJECTS,
description="Get subclouds data.",
operations=[
{
'method': 'GET',
'path': '/v1.0/subclouds'
},
{
'method': 'GET',
'path': '/v1.0/subclouds/{subcloud}'
},
{
'method': 'GET',
'path': '/v1.0/subclouds/{subcloud}/detail'
}
]
{"method": "GET", "path": "/v1.0/subclouds"},
{"method": "GET", "path": "/v1.0/subclouds/{subcloud}"},
{"method": "GET", "path": "/v1.0/subclouds/{subcloud}/detail"},
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'modify',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "modify",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Modify a subcloud.",
operations=[
{
'method': 'PATCH',
'path': '/v1.0/subclouds/{subcloud}'
},
{
'method': 'PATCH',
'path': '/v1.0/subclouds/{subcloud}/prestage'
},
{
'method': 'PATCH',
'path': '/v1.0/subclouds/{subcloud}/reconfigure'
},
{
'method': 'PATCH',
'path': '/v1.0/subclouds/{subcloud}/reinstall'
},
{
'method': 'PATCH',
'path': '/v1.0/subclouds/{subcloud}/redeploy'
},
{
'method': 'PATCH',
'path': '/v1.0/subclouds/{subcloud}/restore'
},
{
'method': 'PATCH',
'path': '/v1.0/subclouds/{subcloud}/update_status'
}
]
)
{"method": "PATCH", "path": "/v1.0/subclouds/{subcloud}"},
{"method": "PATCH", "path": "/v1.0/subclouds/{subcloud}/prestage"},
{"method": "PATCH", "path": "/v1.0/subclouds/{subcloud}/reconfigure"},
{"method": "PATCH", "path": "/v1.0/subclouds/{subcloud}/reinstall"},
{"method": "PATCH", "path": "/v1.0/subclouds/{subcloud}/redeploy"},
{"method": "PATCH", "path": "/v1.0/subclouds/{subcloud}/restore"},
{"method": "PATCH", "path": "/v1.0/subclouds/{subcloud}/update_status"},
],
),
]

View File

@ -8,47 +8,31 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:sw_update_options:%s'
POLICY_ROOT = "dc_api:sw_update_options:%s"
sw_update_options_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete per subcloud sw-update options.",
operations=[
{
'method': 'DELETE',
'path': '/v1.0/sw-update-options/{subcloud}'
}
]
operations=[{"method": "DELETE", "path": "/v1.0/sw-update-options/{subcloud}"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.READER_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.READER_IN_SYSTEM_PROJECTS,
description="Get sw-update options.",
operations=[
{
'method': 'GET',
'path': '/v1.0/sw-update-options'
},
{
'method': 'GET',
'path': '/v1.0/sw-update-options/{subcloud}'
}
]
{"method": "GET", "path": "/v1.0/sw-update-options"},
{"method": "GET", "path": "/v1.0/sw-update-options/{subcloud}"},
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "update",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Update sw-update options (defaults or per subcloud).",
operations=[
{
'method': 'POST',
'path': '/v1.0/sw-update-options/{subcloud}'
}
]
)
operations=[{"method": "POST", "path": "/v1.0/sw-update-options/{subcloud}"}],
),
]

View File

@ -8,73 +8,44 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:sw_update_strategy:%s'
POLICY_ROOT = "dc_api:sw_update_strategy:%s"
sw_update_strategy_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'abort',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "abort",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Abort update strategy execution.",
operations=[
{
'method': 'POST',
'path': '/v1.0/sw-update-strategy/actions'
}
]
operations=[{"method": "POST", "path": "/v1.0/sw-update-strategy/actions"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'apply',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "apply",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Apply update strategy.",
operations=[
{
'method': 'POST',
'path': '/v1.0/sw-update-strategy/actions'
}
]
operations=[{"method": "POST", "path": "/v1.0/sw-update-strategy/actions"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "create",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Create update strategy.",
operations=[
{
'method': 'POST',
'path': '/v1.0/sw-update-strategy'
}
]
operations=[{"method": "POST", "path": "/v1.0/sw-update-strategy"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete update strategy.",
operations=[
{
'method': 'DELETE',
'path': '/v1.0/sw-update-strategy'
}
]
operations=[{"method": "DELETE", "path": "/v1.0/sw-update-strategy"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.READER_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.READER_IN_SYSTEM_PROJECTS,
description="Get update strategy.",
operations=[
{
'method': 'GET',
'path': '/v1.0/sw-update-strategy'
},
{
'method': 'GET',
'path': '/v1.0/sw-update-strategy/steps'
},
{
'method': 'GET',
'path': '/v1.0/sw-update-strategy/steps/{cloud_name}'
}
{"method": "GET", "path": "/v1.0/sw-update-strategy"},
{"method": "GET", "path": "/v1.0/sw-update-strategy/steps"},
{"method": "GET", "path": "/v1.0/sw-update-strategy/steps/{cloud_name}"},
],
)
),
]

View File

@ -7,64 +7,43 @@ from oslo_policy import policy
from dcmanager.api.policies import base
POLICY_ROOT = 'dc_api:system_peers:%s'
POLICY_ROOT = "dc_api:system_peers:%s"
system_peers_rules = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "create",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Create system peer.",
operations=[
{
'method': 'POST',
'path': '/v1.0/system-peers'
}
]
operations=[{"method": "POST", "path": "/v1.0/system-peers"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "delete",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Delete system peer.",
operations=[
{
'method': 'DELETE',
'path': '/v1.0/system-peers/{system_peer}'
}
]
operations=[{"method": "DELETE", "path": "/v1.0/system-peers/{system_peer}"}],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'get',
check_str='rule:' + base.READER_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "get",
check_str="rule:" + base.READER_IN_SYSTEM_PROJECTS,
description="Get system peers.",
operations=[
{
'method': 'GET',
'path': '/v1.0/system-peers'
},
{"method": "GET", "path": "/v1.0/system-peers"},
# Show details of a specified System Peer
{
'method': 'GET',
'path': '/v1.0/system-peers/{system_peer}'
},
{"method": "GET", "path": "/v1.0/system-peers/{system_peer}"},
# List Subcloud Peer Groups associated with the given System Peer
{
'method': 'GET',
'path': '/v1.0/system-peers/{system_peer}/subcloud-peer-groups'
}
]
"method": "GET",
"path": "/v1.0/system-peers/{system_peer}/subcloud-peer-groups",
},
],
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'modify',
check_str='rule:' + base.ADMIN_IN_SYSTEM_PROJECTS,
name=POLICY_ROOT % "modify",
check_str="rule:" + base.ADMIN_IN_SYSTEM_PROJECTS,
description="Modify system peer.",
operations=[
{
'method': 'PATCH',
'path': '/v1.0/system-peers/{system_peer}'
}
]
)
operations=[{"method": "PATCH", "path": "/v1.0/system-peers/{system_peer}"}],
),
]

View File

@ -36,22 +36,24 @@ def reset():
_ENFORCER = None
def init(policy_file='policy.yaml'):
def init(policy_file="policy.yaml"):
"""Init an Enforcer class.
:param policy_file: Custom policy file to be used.
:param policy_file: Custom policy file to be used.
:return: Returns a Enforcer instance.
:return: Returns a Enforcer instance.
"""
global _ENFORCER
if not _ENFORCER:
# https://docs.openstack.org/oslo.policy/latest/user/usage.html
_ENFORCER = policy.Enforcer(CONF,
policy_file=policy_file,
default_rule='default',
use_conf=True,
overwrite=True)
_ENFORCER = policy.Enforcer(
CONF,
policy_file=policy_file,
default_rule="default",
use_conf=True,
overwrite=True,
)
_ENFORCER.register_defaults(controller_policies.list_rules())
return _ENFORCER
@ -59,5 +61,6 @@ def init(policy_file='policy.yaml'):
def authorize(rule, target, creds, do_raise=True):
"""A wrapper around 'authorize' from 'oslo_policy.policy'."""
init()
return _ENFORCER.authorize(rule, target, creds, do_raise=do_raise,
exc=exc.HTTPForbidden)
return _ENFORCER.authorize(
rule, target, creds, do_raise=do_raise, exc=exc.HTTPForbidden
)

View File

@ -2651,7 +2651,7 @@ class TestSubcloudsDelete(BaseTestSubcloudsController):
self._assert_pecan_and_response(
response, http.client.BAD_REQUEST,
"Cannot delete a subcloud that is \"managed\" status"
"Cannot delete a subcloud that is 'managed' status"
)
def test_delete_fails_with_invalid_deploy_states(self):

View File

@ -13,12 +13,12 @@ READER_IN_SYSTEM_PROJECTS = "reader_in_system_projects"
base_rules = [
policy.RuleDefault(
name=ADMIN_IN_SYSTEM_PROJECTS,
check_str="role:admin and (project_name:admin or " + "project_name:services)",
check_str="role:admin and (project_name:admin or project_name:services)",
description="Base rule.",
),
policy.RuleDefault(
name=READER_IN_SYSTEM_PROJECTS,
check_str="role:reader and (project_name:admin or " + "project_name:services)",
check_str="role:reader and (project_name:admin or project_name:services)",
description="Base rule.",
),
]

View File

@ -22,13 +22,16 @@ formatted_modules = [
"dccommon",
"dcdbsync",
"dcorch",
"dcmanager/api",
]
# Function to run black check
def run_black_check(module):
try:
subprocess.run(["black", "--check", "--quiet", f"./{module}"], check=True)
subprocess.run(
["black", "--check", "--diff", "--quiet", f"./{module}"], check=True
)
print(f"Black check passed for {module}")
except subprocess.CalledProcessError as e:
print(f"Black check failed for {module}")