Apply black formatter to dcmanager/common
This commit applies the Black format to the `dcmanager/common` files to ensure that it adheres to the Black code style guidelines. Test Plan: PASS: Success in stx-distcloud-tox-black Story: 2011149 Task: 50444 Change-Id: I5b53966ef9a4fe505ef6fef8a9a7440b3831f495 Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
@@ -20,166 +20,175 @@ from oslo_config import cfg
|
||||
from oslo_utils import importutils
|
||||
|
||||
# Ensure keystonemiddleware options are imported
|
||||
importutils.import_module('keystonemiddleware.auth_token')
|
||||
importutils.import_module("keystonemiddleware.auth_token")
|
||||
|
||||
global_opts = [
|
||||
cfg.BoolOpt('use_default_quota_class',
|
||||
default=True,
|
||||
help='Enables or disables use of default quota class '
|
||||
'with default quota.'),
|
||||
cfg.IntOpt('report_interval',
|
||||
default=60,
|
||||
help='Seconds between running periodic reporting tasks.'),
|
||||
cfg.IntOpt('worker_rlimit_nofile',
|
||||
default=4096,
|
||||
help='Maximum number of open files per worker process.'),
|
||||
cfg.BoolOpt(
|
||||
"use_default_quota_class",
|
||||
default=True,
|
||||
help="Enables or disables use of default quota class with default quota.",
|
||||
),
|
||||
cfg.IntOpt(
|
||||
"report_interval",
|
||||
default=60,
|
||||
help="Seconds between running periodic reporting tasks.",
|
||||
),
|
||||
cfg.IntOpt(
|
||||
"worker_rlimit_nofile",
|
||||
default=4096,
|
||||
help="Maximum number of open files per worker process.",
|
||||
),
|
||||
]
|
||||
|
||||
# OpenStack credentials used for Endpoint Cache
|
||||
# We need to register the below non-standard config
|
||||
# options to dcmanager engine
|
||||
keystone_opts = [
|
||||
cfg.StrOpt('username',
|
||||
help='Username of account'),
|
||||
cfg.StrOpt('password',
|
||||
secret=True,
|
||||
help='Password of account'),
|
||||
cfg.StrOpt('project_name',
|
||||
help='Tenant name of account'),
|
||||
cfg.StrOpt('user_domain_name',
|
||||
default='Default',
|
||||
help='User domain name of account'),
|
||||
cfg.StrOpt('project_domain_name',
|
||||
default='Default',
|
||||
help='Project domain name of account'),
|
||||
cfg.StrOpt("username", help="Username of account"),
|
||||
cfg.StrOpt("password", secret=True, help="Password of account"),
|
||||
cfg.StrOpt("project_name", help="Tenant name of account"),
|
||||
cfg.StrOpt(
|
||||
"user_domain_name", default="Default", help="User domain name of account"
|
||||
),
|
||||
cfg.StrOpt(
|
||||
"project_domain_name", default="Default", help="Project domain name of account"
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# Pecan_opts
|
||||
pecan_opts = [
|
||||
cfg.StrOpt(
|
||||
'root',
|
||||
default='dcmanager.api.controllers.root.RootController',
|
||||
help='Pecan root controller'
|
||||
"root",
|
||||
default="dcmanager.api.controllers.root.RootController",
|
||||
help="Pecan root controller",
|
||||
),
|
||||
cfg.ListOpt(
|
||||
'modules',
|
||||
"modules",
|
||||
default=["dcmanager.api"],
|
||||
help='A list of modules where pecan will search for applications.'
|
||||
help="A list of modules where pecan will search for applications.",
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'debug',
|
||||
"debug",
|
||||
default=False,
|
||||
help='Enables the ability to display tracebacks in the browser and'
|
||||
'interactively debug during development.'
|
||||
help=(
|
||||
"Enables the ability to display tracebacks in the browser and "
|
||||
"interactively debug during development."
|
||||
),
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'auth_enable',
|
||||
default=True,
|
||||
help='Enables user authentication in pecan.'
|
||||
)
|
||||
"auth_enable", default=True, help="Enables user authentication in pecan."
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# OpenStack admin user credentials used for Endpoint Cache
|
||||
cache_opts = [
|
||||
cfg.StrOpt('auth_uri',
|
||||
help='Keystone authorization url'),
|
||||
cfg.StrOpt('identity_uri',
|
||||
help='Keystone service url'),
|
||||
cfg.StrOpt('admin_username',
|
||||
help='Username of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True'),
|
||||
cfg.StrOpt('admin_password',
|
||||
secret=True,
|
||||
help='Password of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True'),
|
||||
cfg.StrOpt('admin_tenant',
|
||||
help='Tenant name of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True'),
|
||||
cfg.StrOpt('admin_user_domain_name',
|
||||
default='Default',
|
||||
help='User domain name of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True'),
|
||||
cfg.StrOpt('admin_project_domain_name',
|
||||
default='Default',
|
||||
help='Project domain name of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True')
|
||||
cfg.StrOpt("auth_uri", help="Keystone authorization url"),
|
||||
cfg.StrOpt("identity_uri", help="Keystone service url"),
|
||||
cfg.StrOpt(
|
||||
"admin_username",
|
||||
help="Username of admin, when auto_refresh_endpoint set to True",
|
||||
),
|
||||
cfg.StrOpt(
|
||||
"admin_password",
|
||||
secret=True,
|
||||
help="Password of admin, when auto_refresh_endpoint set to True",
|
||||
),
|
||||
cfg.StrOpt(
|
||||
"admin_tenant",
|
||||
help="Tenant name of admin, when auto_refresh_endpoint set to True",
|
||||
),
|
||||
cfg.StrOpt(
|
||||
"admin_user_domain_name",
|
||||
default="Default",
|
||||
help="User domain name of admin, when auto_refresh_endpoint set to True",
|
||||
),
|
||||
cfg.StrOpt(
|
||||
"admin_project_domain_name",
|
||||
default="Default",
|
||||
help=("Project domain name of admin, when auto_refresh_endpoint set to True"),
|
||||
),
|
||||
]
|
||||
|
||||
# OpenStack credentials used for Endpoint Cache
|
||||
endpoint_cache_opts = [
|
||||
cfg.StrOpt('auth_uri',
|
||||
help='Keystone authorization url'),
|
||||
cfg.StrOpt('auth_plugin',
|
||||
help='Name of the plugin to load'),
|
||||
cfg.StrOpt('username',
|
||||
help='Username of account'),
|
||||
cfg.StrOpt('password',
|
||||
secret=True,
|
||||
help='Password of account'),
|
||||
cfg.StrOpt('project_name',
|
||||
help='Project name of account'),
|
||||
cfg.StrOpt('user_domain_name',
|
||||
default='Default',
|
||||
help='User domain name of account'),
|
||||
cfg.StrOpt('project_domain_name',
|
||||
default='Default',
|
||||
help='Project domain name of account'),
|
||||
cfg.IntOpt('http_connect_timeout',
|
||||
help='Request timeout value for communicating with Identity'
|
||||
' API server.'),
|
||||
cfg.StrOpt("auth_uri", help="Keystone authorization url"),
|
||||
cfg.StrOpt("auth_plugin", help="Name of the plugin to load"),
|
||||
cfg.StrOpt("username", help="Username of account"),
|
||||
cfg.StrOpt("password", secret=True, help="Password of account"),
|
||||
cfg.StrOpt("project_name", help="Project name of account"),
|
||||
cfg.StrOpt(
|
||||
"user_domain_name", default="Default", help="User domain name of account"
|
||||
),
|
||||
cfg.StrOpt(
|
||||
"project_domain_name", default="Default", help="Project domain name of account"
|
||||
),
|
||||
cfg.IntOpt(
|
||||
"http_connect_timeout",
|
||||
help="Request timeout value for communicating with Identity API server.",
|
||||
),
|
||||
]
|
||||
|
||||
scheduler_opts = [
|
||||
cfg.BoolOpt('periodic_enable',
|
||||
default=True,
|
||||
help='boolean value for enable/disable periodic tasks'),
|
||||
cfg.IntOpt('subcloud_audit_interval',
|
||||
default=30,
|
||||
help='periodic time interval for subcloud audit'),
|
||||
cfg.IntOpt('kube_rootca_update_audit_expiry_days',
|
||||
default=90,
|
||||
help='Num days remaining for a kube rootca to be out-of-sync'),
|
||||
cfg.IntOpt('patch_audit_interval',
|
||||
default=900,
|
||||
help='default time interval for patch audit')
|
||||
cfg.BoolOpt(
|
||||
"periodic_enable",
|
||||
default=True,
|
||||
help="boolean value for enable/disable periodic tasks",
|
||||
),
|
||||
cfg.IntOpt(
|
||||
"subcloud_audit_interval",
|
||||
default=30,
|
||||
help="periodic time interval for subcloud audit",
|
||||
),
|
||||
cfg.IntOpt(
|
||||
"kube_rootca_update_audit_expiry_days",
|
||||
default=90,
|
||||
help="Num days remaining for a kube rootca to be out-of-sync",
|
||||
),
|
||||
cfg.IntOpt(
|
||||
"patch_audit_interval",
|
||||
default=900,
|
||||
help="default time interval for patch audit",
|
||||
),
|
||||
]
|
||||
|
||||
common_opts = [
|
||||
cfg.IntOpt('workers', default=1,
|
||||
help='number of workers'),
|
||||
cfg.IntOpt('orch_workers', default=1,
|
||||
help='number of orchestrator workers'),
|
||||
cfg.IntOpt('state_workers', default=4,
|
||||
help='number of state workers'),
|
||||
cfg.IntOpt('audit_workers', default=1,
|
||||
help='number of audit workers'),
|
||||
cfg.IntOpt('audit_worker_workers', default=4,
|
||||
help='number of audit-worker workers'),
|
||||
cfg.StrOpt('host',
|
||||
default='localhost',
|
||||
help='hostname of the machine'),
|
||||
cfg.IntOpt('playbook_timeout', default=3600,
|
||||
help='global ansible playbook timeout (seconds)'),
|
||||
cfg.IntOpt('ipmi_capture', default=1,
|
||||
help='global IPMI capture control. 0: globally disabled '
|
||||
'1:enabled via rvmc_debug_level, 2:globally enabled')
|
||||
cfg.IntOpt("workers", default=1, help="number of workers"),
|
||||
cfg.IntOpt("orch_workers", default=1, help="number of orchestrator workers"),
|
||||
cfg.IntOpt("state_workers", default=4, help="number of state workers"),
|
||||
cfg.IntOpt("audit_workers", default=1, help="number of audit workers"),
|
||||
cfg.IntOpt(
|
||||
"audit_worker_workers", default=4, help="number of audit-worker workers"
|
||||
),
|
||||
cfg.StrOpt("host", default="localhost", help="hostname of the machine"),
|
||||
cfg.IntOpt(
|
||||
"playbook_timeout",
|
||||
default=3600,
|
||||
help="global ansible playbook timeout (seconds)",
|
||||
),
|
||||
cfg.IntOpt(
|
||||
"ipmi_capture",
|
||||
default=1,
|
||||
help=(
|
||||
"global IPMI capture control. 0: globally disabled "
|
||||
"1:enabled via rvmc_debug_level, 2:globally enabled"
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
scheduler_opt_group = cfg.OptGroup(name='scheduler',
|
||||
title='Scheduler options for periodic job')
|
||||
keystone_opt_group = cfg.OptGroup(name='keystone_authtoken',
|
||||
title='Keystone options')
|
||||
scheduler_opt_group = cfg.OptGroup(
|
||||
name="scheduler", title="Scheduler options for periodic job"
|
||||
)
|
||||
keystone_opt_group = cfg.OptGroup(name="keystone_authtoken", title="Keystone options")
|
||||
# The group stores the pecan configurations.
|
||||
pecan_group = cfg.OptGroup(name='pecan',
|
||||
title='Pecan options')
|
||||
pecan_group = cfg.OptGroup(name="pecan", title="Pecan options")
|
||||
|
||||
cache_opt_group = cfg.OptGroup(name='cache',
|
||||
title='OpenStack Admin Credentials')
|
||||
cache_opt_group = cfg.OptGroup(name="cache", title="OpenStack Admin Credentials")
|
||||
|
||||
endpoint_cache_opt_group = cfg.OptGroup(name='endpoint_cache',
|
||||
title='OpenStack Credentials')
|
||||
endpoint_cache_opt_group = cfg.OptGroup(
|
||||
name="endpoint_cache", title="OpenStack Credentials"
|
||||
)
|
||||
|
||||
|
||||
def list_opts():
|
||||
|
||||
@@ -28,35 +28,35 @@ CERTS_VAULT_DIR = "/opt/dc-vault/certs"
|
||||
LOADS_VAULT_DIR = "/opt/dc-vault/loads"
|
||||
PATCH_VAULT_DIR = "/opt/dc-vault/patches"
|
||||
|
||||
BOOTSTRAP_VALUES = 'bootstrap_values'
|
||||
BOOTSTRAP_ADDRESS = 'bootstrap-address'
|
||||
INSTALL_VALUES = 'install_values'
|
||||
BOOTSTRAP_VALUES = "bootstrap_values"
|
||||
BOOTSTRAP_ADDRESS = "bootstrap-address"
|
||||
INSTALL_VALUES = "install_values"
|
||||
|
||||
# Deploy phases
|
||||
DEPLOY_PHASE_CREATE = 'create'
|
||||
DEPLOY_PHASE_INSTALL = 'install'
|
||||
DEPLOY_PHASE_BOOTSTRAP = 'bootstrap'
|
||||
DEPLOY_PHASE_CONFIG = 'configure'
|
||||
DEPLOY_PHASE_COMPLETE = 'complete'
|
||||
DEPLOY_PHASE_ABORT = 'abort'
|
||||
DEPLOY_PHASE_RESUME = 'resume'
|
||||
DEPLOY_PHASE_ENROLL = 'enroll'
|
||||
DEPLOY_PHASE_CREATE = "create"
|
||||
DEPLOY_PHASE_INSTALL = "install"
|
||||
DEPLOY_PHASE_BOOTSTRAP = "bootstrap"
|
||||
DEPLOY_PHASE_CONFIG = "configure"
|
||||
DEPLOY_PHASE_COMPLETE = "complete"
|
||||
DEPLOY_PHASE_ABORT = "abort"
|
||||
DEPLOY_PHASE_RESUME = "resume"
|
||||
DEPLOY_PHASE_ENROLL = "enroll"
|
||||
|
||||
# Admin status for hosts
|
||||
ADMIN_LOCKED = 'locked'
|
||||
ADMIN_UNLOCKED = 'unlocked'
|
||||
ADMIN_LOCKED = "locked"
|
||||
ADMIN_UNLOCKED = "unlocked"
|
||||
|
||||
# operational status for hosts
|
||||
OPERATIONAL_ENABLED = 'enabled'
|
||||
OPERATIONAL_DISABLED = 'disabled'
|
||||
OPERATIONAL_ENABLED = "enabled"
|
||||
OPERATIONAL_DISABLED = "disabled"
|
||||
|
||||
# Availability status for hosts
|
||||
AVAILABILITY_AVAILABLE = 'available'
|
||||
AVAILABILITY_DEGRADED = 'degraded'
|
||||
AVAILABILITY_AVAILABLE = "available"
|
||||
AVAILABILITY_DEGRADED = "degraded"
|
||||
|
||||
# Personality of hosts
|
||||
PERSONALITY_CONTROLLER_ACTIVE = 'Controller-Active'
|
||||
PERSONALITY_CONTROLLER_STANDBY = 'Controller-Standby'
|
||||
PERSONALITY_CONTROLLER_ACTIVE = "Controller-Active"
|
||||
PERSONALITY_CONTROLLER_STANDBY = "Controller-Standby"
|
||||
|
||||
# Subcloud endpoint related database fields
|
||||
ENDPOINT_SYNC_STATUS = "endpoint_sync_status"
|
||||
@@ -111,8 +111,8 @@ SUBCLOUD_APPLY_TYPE_SERIAL = "serial"
|
||||
|
||||
# Values for the Default Subcloud Group
|
||||
DEFAULT_SUBCLOUD_GROUP_ID = 1
|
||||
DEFAULT_SUBCLOUD_GROUP_NAME = 'Default'
|
||||
DEFAULT_SUBCLOUD_GROUP_DESCRIPTION = 'Default Subcloud Group'
|
||||
DEFAULT_SUBCLOUD_GROUP_NAME = "Default"
|
||||
DEFAULT_SUBCLOUD_GROUP_DESCRIPTION = "Default Subcloud Group"
|
||||
DEFAULT_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE = SUBCLOUD_APPLY_TYPE_PARALLEL
|
||||
DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 2
|
||||
|
||||
@@ -144,24 +144,24 @@ STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY = "applying fw update strategy"
|
||||
STRATEGY_STATE_FINISHING_FW_UPDATE = "finishing fw update"
|
||||
|
||||
# Kubernetes update orchestration states (ordered)
|
||||
STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK = \
|
||||
"kube upgrade pre check"
|
||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY = \
|
||||
STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK = "kube upgrade pre check"
|
||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY = (
|
||||
"kube creating vim kube upgrade strategy"
|
||||
STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY = \
|
||||
)
|
||||
STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY = (
|
||||
"kube applying vim kube upgrade strategy"
|
||||
)
|
||||
|
||||
# Kube Root CA Update orchestration states (ordered)
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK = \
|
||||
"kube rootca update pre check"
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START = \
|
||||
"kube rootca update start"
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT = \
|
||||
"kube rootca update upload cert"
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY = \
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK = "kube rootca update pre check"
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START = "kube rootca update start"
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT = "kube rootca update upload cert"
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY = (
|
||||
"creating vim kube rootca update strategy"
|
||||
STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY = \
|
||||
)
|
||||
STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY = (
|
||||
"applying vim kube rootca update strategy"
|
||||
)
|
||||
|
||||
# Prestage orchestration states (ordered)
|
||||
STRATEGY_STATE_PRESTAGE_PRE_CHECK = "prestage-precheck"
|
||||
@@ -169,94 +169,85 @@ STRATEGY_STATE_PRESTAGE_PACKAGES = "prestaging-packages"
|
||||
STRATEGY_STATE_PRESTAGE_IMAGES = "prestaging-images"
|
||||
|
||||
# Subcloud deploy status states
|
||||
DEPLOY_STATE_NONE = 'not-deployed'
|
||||
DEPLOY_STATE_PRE_DEPLOY = 'pre-deploy'
|
||||
DEPLOY_STATE_DEPLOY_PREP_FAILED = 'deploy-prep-failed'
|
||||
DEPLOY_STATE_CREATING = 'creating'
|
||||
DEPLOY_STATE_CREATE_FAILED = 'create-failed'
|
||||
DEPLOY_STATE_CREATED = 'create-complete'
|
||||
DEPLOY_STATE_PRE_INSTALL = 'pre-install'
|
||||
DEPLOY_STATE_PRE_INSTALL_FAILED = 'pre-install-failed'
|
||||
DEPLOY_STATE_INSTALLING = 'installing'
|
||||
DEPLOY_STATE_INSTALL_FAILED = 'install-failed'
|
||||
DEPLOY_STATE_INSTALLED = 'install-complete'
|
||||
DEPLOY_STATE_PRE_BOOTSTRAP = 'pre-bootstrap'
|
||||
DEPLOY_STATE_PRE_BOOTSTRAP_FAILED = 'pre-bootstrap-failed'
|
||||
DEPLOY_STATE_BOOTSTRAPPING = 'bootstrapping'
|
||||
DEPLOY_STATE_BOOTSTRAP_FAILED = 'bootstrap-failed'
|
||||
DEPLOY_STATE_BOOTSTRAP_ABORTED = 'bootstrap-aborted'
|
||||
DEPLOY_STATE_BOOTSTRAPPED = 'bootstrap-complete'
|
||||
DEPLOY_STATE_PRE_CONFIG = 'pre-config'
|
||||
DEPLOY_STATE_PRE_CONFIG_FAILED = 'pre-config-failed'
|
||||
DEPLOY_STATE_CONFIGURING = 'configuring'
|
||||
DEPLOY_STATE_CONFIG_FAILED = 'config-failed'
|
||||
DEPLOY_STATE_DEPLOYING = 'deploying'
|
||||
DEPLOY_STATE_DEPLOY_FAILED = 'deploy-failed'
|
||||
DEPLOY_STATE_ABORTING_INSTALL = 'aborting-install'
|
||||
DEPLOY_STATE_INSTALL_ABORTED = 'install-aborted'
|
||||
DEPLOY_STATE_ABORTING_BOOTSTRAP = 'aborting-bootstrap'
|
||||
DEPLOY_STATE_ABORTING_CONFIG = 'aborting-config'
|
||||
DEPLOY_STATE_CONFIG_ABORTED = 'config-aborted'
|
||||
DEPLOY_STATE_ENROLLED = 'enroll-complete'
|
||||
DEPLOY_STATE_ENROLLING = 'enrolling'
|
||||
DEPLOY_STATE_ENROLL_FAILED = 'enroll-failed'
|
||||
DEPLOY_STATE_MIGRATING_DATA = 'migrating-data'
|
||||
DEPLOY_STATE_DATA_MIGRATION_FAILED = 'data-migration-failed'
|
||||
DEPLOY_STATE_MIGRATED = 'migrated'
|
||||
DEPLOY_STATE_UPGRADE_ACTIVATED = 'upgrade-activated'
|
||||
DEPLOY_STATE_PRE_RESTORE = 'pre-restore'
|
||||
DEPLOY_STATE_RESTORE_PREP_FAILED = 'restore-prep-failed'
|
||||
DEPLOY_STATE_RESTORING = 'restoring'
|
||||
DEPLOY_STATE_RESTORE_FAILED = 'restore-failed'
|
||||
DEPLOY_STATE_PRE_REHOME = 'pre-rehome'
|
||||
DEPLOY_STATE_PRE_ENROLL = 'pre-enroll'
|
||||
DEPLOY_STATE_PRE_ENROLL_FAILED = 'pre-enroll-failed'
|
||||
DEPLOY_STATE_PRE_ENROLL_COMPLETE = 'pre-enroll-complete'
|
||||
DEPLOY_STATE_PRE_INIT_ENROLL = 'pre-init-enroll'
|
||||
DEPLOY_STATE_PRE_INIT_ENROLL_FAILED = 'pre-init-enroll-failed'
|
||||
DEPLOY_STATE_INITIATING_ENROLL = 'initiating-enroll'
|
||||
DEPLOY_STATE_INIT_ENROLL_FAILED = 'init-enroll-failed'
|
||||
DEPLOY_STATE_INIT_ENROLL_COMPLETE = 'init-enroll-complete'
|
||||
DEPLOY_STATE_NONE = "not-deployed"
|
||||
DEPLOY_STATE_PRE_DEPLOY = "pre-deploy"
|
||||
DEPLOY_STATE_DEPLOY_PREP_FAILED = "deploy-prep-failed"
|
||||
DEPLOY_STATE_CREATING = "creating"
|
||||
DEPLOY_STATE_CREATE_FAILED = "create-failed"
|
||||
DEPLOY_STATE_CREATED = "create-complete"
|
||||
DEPLOY_STATE_PRE_INSTALL = "pre-install"
|
||||
DEPLOY_STATE_PRE_INSTALL_FAILED = "pre-install-failed"
|
||||
DEPLOY_STATE_INSTALLING = "installing"
|
||||
DEPLOY_STATE_INSTALL_FAILED = "install-failed"
|
||||
DEPLOY_STATE_INSTALLED = "install-complete"
|
||||
DEPLOY_STATE_PRE_BOOTSTRAP = "pre-bootstrap"
|
||||
DEPLOY_STATE_PRE_BOOTSTRAP_FAILED = "pre-bootstrap-failed"
|
||||
DEPLOY_STATE_BOOTSTRAPPING = "bootstrapping"
|
||||
DEPLOY_STATE_BOOTSTRAP_FAILED = "bootstrap-failed"
|
||||
DEPLOY_STATE_BOOTSTRAP_ABORTED = "bootstrap-aborted"
|
||||
DEPLOY_STATE_BOOTSTRAPPED = "bootstrap-complete"
|
||||
DEPLOY_STATE_PRE_CONFIG = "pre-config"
|
||||
DEPLOY_STATE_PRE_CONFIG_FAILED = "pre-config-failed"
|
||||
DEPLOY_STATE_CONFIGURING = "configuring"
|
||||
DEPLOY_STATE_CONFIG_FAILED = "config-failed"
|
||||
DEPLOY_STATE_DEPLOYING = "deploying"
|
||||
DEPLOY_STATE_DEPLOY_FAILED = "deploy-failed"
|
||||
DEPLOY_STATE_ABORTING_INSTALL = "aborting-install"
|
||||
DEPLOY_STATE_INSTALL_ABORTED = "install-aborted"
|
||||
DEPLOY_STATE_ABORTING_BOOTSTRAP = "aborting-bootstrap"
|
||||
DEPLOY_STATE_ABORTING_CONFIG = "aborting-config"
|
||||
DEPLOY_STATE_CONFIG_ABORTED = "config-aborted"
|
||||
DEPLOY_STATE_ENROLLED = "enroll-complete"
|
||||
DEPLOY_STATE_ENROLLING = "enrolling"
|
||||
DEPLOY_STATE_ENROLL_FAILED = "enroll-failed"
|
||||
DEPLOY_STATE_MIGRATING_DATA = "migrating-data"
|
||||
DEPLOY_STATE_DATA_MIGRATION_FAILED = "data-migration-failed"
|
||||
DEPLOY_STATE_MIGRATED = "migrated"
|
||||
DEPLOY_STATE_UPGRADE_ACTIVATED = "upgrade-activated"
|
||||
DEPLOY_STATE_PRE_RESTORE = "pre-restore"
|
||||
DEPLOY_STATE_RESTORE_PREP_FAILED = "restore-prep-failed"
|
||||
DEPLOY_STATE_RESTORING = "restoring"
|
||||
DEPLOY_STATE_RESTORE_FAILED = "restore-failed"
|
||||
DEPLOY_STATE_PRE_REHOME = "pre-rehome"
|
||||
DEPLOY_STATE_PRE_ENROLL = "pre-enroll"
|
||||
DEPLOY_STATE_PRE_ENROLL_FAILED = "pre-enroll-failed"
|
||||
DEPLOY_STATE_PRE_ENROLL_COMPLETE = "pre-enroll-complete"
|
||||
DEPLOY_STATE_PRE_INIT_ENROLL = "pre-init-enroll"
|
||||
DEPLOY_STATE_PRE_INIT_ENROLL_FAILED = "pre-init-enroll-failed"
|
||||
DEPLOY_STATE_INITIATING_ENROLL = "initiating-enroll"
|
||||
DEPLOY_STATE_INIT_ENROLL_FAILED = "init-enroll-failed"
|
||||
DEPLOY_STATE_INIT_ENROLL_COMPLETE = "init-enroll-complete"
|
||||
# If any of the following rehoming or secondary statuses
|
||||
# are modified, cert-mon code will need to be updated.
|
||||
DEPLOY_STATE_REHOMING = 'rehoming'
|
||||
DEPLOY_STATE_REHOME_FAILED = 'rehome-failed'
|
||||
DEPLOY_STATE_REHOME_PREP_FAILED = 'rehome-prep-failed'
|
||||
DEPLOY_STATE_REHOME_PENDING = 'rehome-pending'
|
||||
DEPLOY_STATE_SECONDARY = 'secondary'
|
||||
DEPLOY_STATE_SECONDARY_FAILED = 'secondary-failed'
|
||||
DEPLOY_STATE_DONE = 'complete'
|
||||
DEPLOY_STATE_RECONFIGURING_NETWORK = 'reconfiguring-network'
|
||||
DEPLOY_STATE_RECONFIGURING_NETWORK_FAILED = 'network-reconfiguration-failed'
|
||||
DEPLOY_STATE_REHOMING = "rehoming"
|
||||
DEPLOY_STATE_REHOME_FAILED = "rehome-failed"
|
||||
DEPLOY_STATE_REHOME_PREP_FAILED = "rehome-prep-failed"
|
||||
DEPLOY_STATE_REHOME_PENDING = "rehome-pending"
|
||||
DEPLOY_STATE_SECONDARY = "secondary"
|
||||
DEPLOY_STATE_SECONDARY_FAILED = "secondary-failed"
|
||||
DEPLOY_STATE_DONE = "complete"
|
||||
DEPLOY_STATE_RECONFIGURING_NETWORK = "reconfiguring-network"
|
||||
DEPLOY_STATE_RECONFIGURING_NETWORK_FAILED = "network-reconfiguration-failed"
|
||||
# Subcloud errors
|
||||
ERROR_DESC_EMPTY = 'No errors present'
|
||||
ERROR_DESC_FAILED = 'Failed to get error message. Please check sysinv log'
|
||||
ERROR_DESC_CMD = 'dcmanager subcloud errors <subcloud-name>'
|
||||
ERROR_DESC_EMPTY = "No errors present"
|
||||
ERROR_DESC_FAILED = "Failed to get error message. Please check sysinv log"
|
||||
ERROR_DESC_CMD = "dcmanager subcloud errors <subcloud-name>"
|
||||
|
||||
# Static content for error messages
|
||||
BOOTSTRAP_ERROR_MSG = DEPLOY_STATE_BOOTSTRAP_FAILED
|
||||
CONFIG_ERROR_MSG = DEPLOY_STATE_CONFIG_FAILED
|
||||
|
||||
ERR_MSG_DICT = {
|
||||
|
||||
BOOTSTRAP_ERROR_MSG: "For bootstrap failures, please use 'dcmanager subcloud "
|
||||
"deploy resume' after the cause of failure has been "
|
||||
"resolved.",
|
||||
|
||||
"deploy resume' after the cause of failure has been resolved.",
|
||||
CONFIG_ERROR_MSG: "For configuration failures, please use dcmanager subcloud "
|
||||
"deploy config command to reconfigure the subcloud after "
|
||||
"the cause of failure has been resolved.",
|
||||
|
||||
"deploy config command to reconfigure the subcloud after "
|
||||
"the cause of failure has been resolved.",
|
||||
"bmc_cred": "Check BMC credentials in install-values.yml. Check basic "
|
||||
"authenticacion to the BMC: curl -u <<user:pass>> "
|
||||
"<<BMC_URL>>",
|
||||
|
||||
"authenticacion to the BMC: curl -u <<user:pass>> <<BMC_URL>>",
|
||||
"ping_bmc": "Check reachability to the BMC: ping <<BMC_URL>>",
|
||||
|
||||
"rvmc_process": "Ensure the previous RVMC process is terminated.",
|
||||
|
||||
"rvmc_timeout": "Please check the dcmanager ansible log for details.",
|
||||
|
||||
"dm_pod_failed": """- Ensure you are using the correct tarball that \
|
||||
corresponds to the image.
|
||||
- Check helm overrides files, ensure the deployment manager images exist in \
|
||||
@@ -264,44 +255,38 @@ the specified registry and you can manually pull them from the registry.
|
||||
- Ensure you have installed the correct certificate.
|
||||
- Ensure you have logged in: sudo docker login registry.local:9001 \
|
||||
-u <<registry_user>> -p <<registry_password>>""",
|
||||
|
||||
"dm_apply_failed": "Check deployment yaml file and ensure the content is "
|
||||
"syntactically and semantically correct.",
|
||||
|
||||
"syntactically and semantically correct.",
|
||||
"images_download_failure": "Check docker_registries and docker proxy "
|
||||
"configurations in bootstrap values yaml "
|
||||
"file. Ensure you can manually log into the "
|
||||
"registry e.g. sudo docker login "
|
||||
"registry.local:9001 -u <registry-user> "
|
||||
"-p <registry-password>",
|
||||
|
||||
"failed_ssl_cert": "Check if the right certificate was installed."
|
||||
"configurations in bootstrap values yaml file. Ensure you can manually log into "
|
||||
"the registry e.g. sudo docker login registry.local:9001 -u <registry-user> "
|
||||
"-p <registry-password>",
|
||||
"failed_ssl_cert": "Check if the right certificate was installed.",
|
||||
}
|
||||
|
||||
# error_description max length
|
||||
ERROR_DESCRIPTION_LENGTH = 2048
|
||||
|
||||
# Subcloud backup status states
|
||||
BACKUP_STATE_INITIAL = 'initial'
|
||||
BACKUP_STATE_VALIDATING = 'validating'
|
||||
BACKUP_STATE_VALIDATE_FAILED = 'validate-failed'
|
||||
BACKUP_STATE_PRE_BACKUP = 'pre-backup'
|
||||
BACKUP_STATE_PREP_FAILED = 'backup-prep-failed'
|
||||
BACKUP_STATE_IN_PROGRESS = 'backing-up'
|
||||
BACKUP_STATE_FAILED = 'failed'
|
||||
BACKUP_STATE_UNKNOWN = 'unknown'
|
||||
BACKUP_STATE_COMPLETE_LOCAL = 'complete-local'
|
||||
BACKUP_STATE_COMPLETE_CENTRAL = 'complete-central'
|
||||
BACKUP_STATE_INITIAL = "initial"
|
||||
BACKUP_STATE_VALIDATING = "validating"
|
||||
BACKUP_STATE_VALIDATE_FAILED = "validate-failed"
|
||||
BACKUP_STATE_PRE_BACKUP = "pre-backup"
|
||||
BACKUP_STATE_PREP_FAILED = "backup-prep-failed"
|
||||
BACKUP_STATE_IN_PROGRESS = "backing-up"
|
||||
BACKUP_STATE_FAILED = "failed"
|
||||
BACKUP_STATE_UNKNOWN = "unknown"
|
||||
BACKUP_STATE_COMPLETE_LOCAL = "complete-local"
|
||||
BACKUP_STATE_COMPLETE_CENTRAL = "complete-central"
|
||||
|
||||
# Prestage States
|
||||
PRESTAGE_STATE_PACKAGES = STRATEGY_STATE_PRESTAGE_PACKAGES
|
||||
PRESTAGE_STATE_IMAGES = STRATEGY_STATE_PRESTAGE_IMAGES
|
||||
PRESTAGE_STATE_FAILED = 'failed'
|
||||
PRESTAGE_STATE_COMPLETE = 'complete'
|
||||
PRESTAGE_STATE_FAILED = "failed"
|
||||
PRESTAGE_STATE_COMPLETE = "complete"
|
||||
|
||||
# States to indicate if a prestage operation is currently in progress
|
||||
STATES_FOR_ONGOING_PRESTAGE = [PRESTAGE_STATE_PACKAGES,
|
||||
PRESTAGE_STATE_IMAGES]
|
||||
STATES_FOR_ONGOING_PRESTAGE = [PRESTAGE_STATE_PACKAGES, PRESTAGE_STATE_IMAGES]
|
||||
|
||||
# Alarm aggregation
|
||||
ALARMS_DISABLED = "disabled"
|
||||
@@ -312,25 +297,25 @@ ALARM_CRITICAL_STATUS = "critical"
|
||||
DEPLOY_PLAYBOOK = "deploy_playbook"
|
||||
DEPLOY_OVERRIDES = "deploy_overrides"
|
||||
DEPLOY_CHART = "deploy_chart"
|
||||
DEPLOY_CONFIG = 'deploy_config'
|
||||
DEPLOY_CONFIG = "deploy_config"
|
||||
DEPLOY_PRESTAGE = "prestage_images"
|
||||
|
||||
DEPLOY_COMMON_FILE_OPTIONS = [
|
||||
DEPLOY_PLAYBOOK,
|
||||
DEPLOY_OVERRIDES,
|
||||
DEPLOY_CHART,
|
||||
DEPLOY_PRESTAGE
|
||||
DEPLOY_PRESTAGE,
|
||||
]
|
||||
|
||||
|
||||
DC_LOG_DIR = '/var/log/dcmanager/'
|
||||
DC_ANSIBLE_LOG_DIR = DC_LOG_DIR + 'ansible'
|
||||
INVENTORY_FILE_POSTFIX = '_inventory.yml'
|
||||
DC_LOG_DIR = "/var/log/dcmanager/"
|
||||
DC_ANSIBLE_LOG_DIR = DC_LOG_DIR + "ansible"
|
||||
INVENTORY_FILE_POSTFIX = "_inventory.yml"
|
||||
|
||||
# The following password is just a temporary and internal password that is used
|
||||
# after a remote install as part of the upgrade. The real sysadmin password
|
||||
# will be restored af the subcloud is re-managed at the end of the upgrade.
|
||||
TEMP_SYSADMIN_PASSWORD = 'St8rlingXCloud*'
|
||||
TEMP_SYSADMIN_PASSWORD = "St8rlingXCloud*"
|
||||
|
||||
# System mode
|
||||
SYSTEM_MODE_DUPLEX = "duplex"
|
||||
@@ -338,44 +323,41 @@ SYSTEM_MODE_SIMPLEX = "simplex"
|
||||
SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct"
|
||||
|
||||
# Load states
|
||||
ACTIVE_LOAD_STATE = 'active'
|
||||
INACTIVE_LOAD_STATE = 'inactive'
|
||||
IMPORTING_LOAD_STATE = 'importing'
|
||||
IMPORTED_LOAD_STATE = 'imported'
|
||||
IMPORTED_METADATA_LOAD_STATE = 'imported-metadata'
|
||||
ERROR_LOAD_STATE = 'error'
|
||||
DELETING_LOAD_STATE = 'deleting'
|
||||
IMPORTED_LOAD_STATES = [
|
||||
IMPORTED_LOAD_STATE,
|
||||
IMPORTED_METADATA_LOAD_STATE
|
||||
]
|
||||
ACTIVE_LOAD_STATE = "active"
|
||||
INACTIVE_LOAD_STATE = "inactive"
|
||||
IMPORTING_LOAD_STATE = "importing"
|
||||
IMPORTED_LOAD_STATE = "imported"
|
||||
IMPORTED_METADATA_LOAD_STATE = "imported-metadata"
|
||||
ERROR_LOAD_STATE = "error"
|
||||
DELETING_LOAD_STATE = "deleting"
|
||||
IMPORTED_LOAD_STATES = [IMPORTED_LOAD_STATE, IMPORTED_METADATA_LOAD_STATE]
|
||||
|
||||
# extra_args for kube upgrade
|
||||
EXTRA_ARGS_TO_VERSION = 'to-version'
|
||||
EXTRA_ARGS_TO_VERSION = "to-version"
|
||||
# extra_args for kube rootca update
|
||||
EXTRA_ARGS_CERT_FILE = 'cert-file'
|
||||
EXTRA_ARGS_EXPIRY_DATE = 'expiry-date'
|
||||
EXTRA_ARGS_SUBJECT = 'subject'
|
||||
EXTRA_ARGS_SYSADMIN_PASSWORD = 'sysadmin_password'
|
||||
EXTRA_ARGS_FORCE = 'force'
|
||||
EXTRA_ARGS_CERT_FILE = "cert-file"
|
||||
EXTRA_ARGS_EXPIRY_DATE = "expiry-date"
|
||||
EXTRA_ARGS_SUBJECT = "subject"
|
||||
EXTRA_ARGS_SYSADMIN_PASSWORD = "sysadmin_password"
|
||||
EXTRA_ARGS_FORCE = "force"
|
||||
|
||||
# extra_args for patching
|
||||
EXTRA_ARGS_UPLOAD_ONLY = 'upload-only'
|
||||
EXTRA_ARGS_PATCH = 'patch'
|
||||
EXTRA_ARGS_UPLOAD_ONLY = "upload-only"
|
||||
EXTRA_ARGS_PATCH = "patch"
|
||||
|
||||
# extra_args for software
|
||||
EXTRA_ARGS_RELEASE_ID = 'release_id'
|
||||
EXTRA_ARGS_RELEASE_ID = "release_id"
|
||||
|
||||
# http request/response arguments for prestage
|
||||
PRESTAGE_SOFTWARE_VERSION = 'prestage-software-version'
|
||||
PRESTAGE_REQUEST_RELEASE = 'release'
|
||||
PRESTAGE_FOR_INSTALL = 'for_install'
|
||||
PRESTAGE_FOR_SW_DEPLOY = 'for_sw_deploy'
|
||||
PRESTAGE_SOFTWARE_VERSION = "prestage-software-version"
|
||||
PRESTAGE_REQUEST_RELEASE = "release"
|
||||
PRESTAGE_FOR_INSTALL = "for_install"
|
||||
PRESTAGE_FOR_SW_DEPLOY = "for_sw_deploy"
|
||||
|
||||
# Device Image Bitstream Types
|
||||
BITSTREAM_TYPE_ROOT_KEY = 'root-key'
|
||||
BITSTREAM_TYPE_FUNCTIONAL = 'functional'
|
||||
BITSTREAM_TYPE_KEY_REVOCATION = 'key-revocation'
|
||||
BITSTREAM_TYPE_ROOT_KEY = "root-key"
|
||||
BITSTREAM_TYPE_FUNCTIONAL = "functional"
|
||||
BITSTREAM_TYPE_KEY_REVOCATION = "key-revocation"
|
||||
|
||||
# Platform Backup size default in MB
|
||||
DEFAULT_PERSISTENT_SIZE = 30000
|
||||
@@ -388,91 +370,107 @@ PLATFORM_RETRY_MAX_ATTEMPTS = 5
|
||||
PLATFORM_RETRY_SLEEP_MILLIS = 5000
|
||||
|
||||
# States to reject when processing a subcloud-backup restore request
|
||||
INVALID_DEPLOY_STATES_FOR_RESTORE = [DEPLOY_STATE_CREATING,
|
||||
DEPLOY_STATE_PRE_INSTALL,
|
||||
DEPLOY_STATE_INSTALLING,
|
||||
DEPLOY_STATE_PRE_BOOTSTRAP,
|
||||
DEPLOY_STATE_BOOTSTRAPPING,
|
||||
DEPLOY_STATE_PRE_CONFIG,
|
||||
DEPLOY_STATE_CONFIGURING,
|
||||
DEPLOY_STATE_PRE_REHOME,
|
||||
DEPLOY_STATE_REHOMING,
|
||||
DEPLOY_STATE_PRE_RESTORE,
|
||||
DEPLOY_STATE_RESTORING]
|
||||
INVALID_DEPLOY_STATES_FOR_RESTORE = [
|
||||
DEPLOY_STATE_CREATING,
|
||||
DEPLOY_STATE_PRE_INSTALL,
|
||||
DEPLOY_STATE_INSTALLING,
|
||||
DEPLOY_STATE_PRE_BOOTSTRAP,
|
||||
DEPLOY_STATE_BOOTSTRAPPING,
|
||||
DEPLOY_STATE_PRE_CONFIG,
|
||||
DEPLOY_STATE_CONFIGURING,
|
||||
DEPLOY_STATE_PRE_REHOME,
|
||||
DEPLOY_STATE_REHOMING,
|
||||
DEPLOY_STATE_PRE_RESTORE,
|
||||
DEPLOY_STATE_RESTORING,
|
||||
]
|
||||
|
||||
# States to reject when processing a subcloud delete request
|
||||
INVALID_DEPLOY_STATES_FOR_DELETE = [
|
||||
DEPLOY_STATE_PRE_DEPLOY, DEPLOY_STATE_CREATING, DEPLOY_STATE_PRE_INSTALL,
|
||||
DEPLOY_STATE_INSTALLING, DEPLOY_STATE_PRE_BOOTSTRAP, DEPLOY_STATE_BOOTSTRAPPING,
|
||||
DEPLOY_STATE_PRE_CONFIG, DEPLOY_STATE_CONFIGURING, DEPLOY_STATE_DEPLOYING,
|
||||
DEPLOY_STATE_ABORTING_INSTALL, DEPLOY_STATE_ABORTING_BOOTSTRAP,
|
||||
DEPLOY_STATE_ABORTING_CONFIG, DEPLOY_STATE_MIGRATING_DATA,
|
||||
DEPLOY_STATE_UPGRADE_ACTIVATED, DEPLOY_STATE_PRE_RESTORE, DEPLOY_STATE_RESTORING,
|
||||
DEPLOY_STATE_PRE_REHOME, DEPLOY_STATE_REHOMING, DEPLOY_STATE_REHOME_PENDING,
|
||||
DEPLOY_STATE_RECONFIGURING_NETWORK
|
||||
DEPLOY_STATE_PRE_DEPLOY,
|
||||
DEPLOY_STATE_CREATING,
|
||||
DEPLOY_STATE_PRE_INSTALL,
|
||||
DEPLOY_STATE_INSTALLING,
|
||||
DEPLOY_STATE_PRE_BOOTSTRAP,
|
||||
DEPLOY_STATE_BOOTSTRAPPING,
|
||||
DEPLOY_STATE_PRE_CONFIG,
|
||||
DEPLOY_STATE_CONFIGURING,
|
||||
DEPLOY_STATE_DEPLOYING,
|
||||
DEPLOY_STATE_ABORTING_INSTALL,
|
||||
DEPLOY_STATE_ABORTING_BOOTSTRAP,
|
||||
DEPLOY_STATE_ABORTING_CONFIG,
|
||||
DEPLOY_STATE_MIGRATING_DATA,
|
||||
DEPLOY_STATE_UPGRADE_ACTIVATED,
|
||||
DEPLOY_STATE_PRE_RESTORE,
|
||||
DEPLOY_STATE_RESTORING,
|
||||
DEPLOY_STATE_PRE_REHOME,
|
||||
DEPLOY_STATE_REHOMING,
|
||||
DEPLOY_STATE_REHOME_PENDING,
|
||||
DEPLOY_STATE_RECONFIGURING_NETWORK,
|
||||
]
|
||||
|
||||
# States to indicate if a backup operation is currently in progress
|
||||
STATES_FOR_ONGOING_BACKUP = [BACKUP_STATE_INITIAL,
|
||||
BACKUP_STATE_VALIDATING,
|
||||
BACKUP_STATE_PRE_BACKUP,
|
||||
BACKUP_STATE_IN_PROGRESS]
|
||||
STATES_FOR_ONGOING_BACKUP = [
|
||||
BACKUP_STATE_INITIAL,
|
||||
BACKUP_STATE_VALIDATING,
|
||||
BACKUP_STATE_PRE_BACKUP,
|
||||
BACKUP_STATE_IN_PROGRESS,
|
||||
]
|
||||
|
||||
# The k8s secret that holds openldap CA certificate
|
||||
OPENLDAP_CA_CERT_SECRET_NAME = "system-local-ca"
|
||||
|
||||
CERT_NAMESPACE_PLATFORM_CA_CERTS = 'cert-manager'
|
||||
CERT_NAMESPACE_PLATFORM_CA_CERTS = "cert-manager"
|
||||
|
||||
# The ansible playbook base directories
|
||||
ANSIBLE_CURRENT_VERSION_BASE_PATH = '/usr/share/ansible/stx-ansible/playbooks'
|
||||
ANSIBLE_PREVIOUS_VERSION_BASE_PATH = '/opt/dc-vault/playbooks'
|
||||
ANSIBLE_CURRENT_VERSION_BASE_PATH = "/usr/share/ansible/stx-ansible/playbooks"
|
||||
ANSIBLE_PREVIOUS_VERSION_BASE_PATH = "/opt/dc-vault/playbooks"
|
||||
|
||||
# The deployment manager artifacts usr directories
|
||||
ALTERNATE_DEPLOY_FILES_DIR = '/usr/local/share/applications'
|
||||
ALTERNATE_DEPLOY_FILES_DIR = "/usr/local/share/applications"
|
||||
|
||||
ALTERNATE_HELM_CHART_OVERRIDES_DIR = ALTERNATE_DEPLOY_FILES_DIR + '/overrides'
|
||||
HELM_CHART_OVERRIDES_POSTFIX = '-overrides-subcloud.yaml'
|
||||
ALTERNATE_HELM_CHART_OVERRIDES_DIR = ALTERNATE_DEPLOY_FILES_DIR + "/overrides"
|
||||
HELM_CHART_OVERRIDES_POSTFIX = "-overrides-subcloud.yaml"
|
||||
|
||||
ALTERNATE_HELM_CHART_DIR = ALTERNATE_DEPLOY_FILES_DIR + '/helm'
|
||||
HELM_CHART_POSTFIX = 'deployment-manager'
|
||||
ALTERNATE_HELM_CHART_DIR = ALTERNATE_DEPLOY_FILES_DIR + "/helm"
|
||||
HELM_CHART_POSTFIX = "deployment-manager"
|
||||
|
||||
ALTERNATE_DEPLOY_PLAYBOOK_DIR = ALTERNATE_DEPLOY_FILES_DIR + '/playbooks'
|
||||
DEPLOY_PLAYBOOK_POSTFIX = 'deployment-manager.yaml'
|
||||
ALTERNATE_DEPLOY_PLAYBOOK_DIR = ALTERNATE_DEPLOY_FILES_DIR + "/playbooks"
|
||||
DEPLOY_PLAYBOOK_POSTFIX = "deployment-manager.yaml"
|
||||
|
||||
SUPPORTED_UPGRADES_METADATA_FILE_PATH = '/usr/rootdirs/opt/upgrades/metadata.xml'
|
||||
SUPPORTED_UPGRADES_METADATA_FILE_PATH = "/usr/rootdirs/opt/upgrades/metadata.xml"
|
||||
|
||||
# Required for subcloud name configuration
|
||||
CERT_MON_HTTP_AGENT = 'cert-mon/1.0'
|
||||
CERT_MON_HTTP_AGENT = "cert-mon/1.0"
|
||||
OS_REGION_NAME = "OS_REGION_NAME"
|
||||
|
||||
# Required for GEO-redundancy
|
||||
# User-Agent check for subcloud by region_name request.
|
||||
DCMANAGER_V1_HTTP_AGENT = 'dcmanager/1.0'
|
||||
DCMANAGER_V1_HTTP_AGENT = "dcmanager/1.0"
|
||||
|
||||
# batch rehome manage state wait timeout
|
||||
BATCH_REHOME_MGMT_STATES_TIMEOUT = 900
|
||||
|
||||
# System peer availability state
|
||||
SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE = 'available'
|
||||
SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE = 'unavailable'
|
||||
SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE = "available"
|
||||
SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE = "unavailable"
|
||||
|
||||
# Peer group migration status
|
||||
PEER_GROUP_MIGRATING = 'migrating'
|
||||
PEER_GROUP_MIGRATION_COMPLETE = 'complete'
|
||||
PEER_GROUP_MIGRATION_NONE = 'none'
|
||||
PEER_GROUP_MIGRATING = "migrating"
|
||||
PEER_GROUP_MIGRATION_COMPLETE = "complete"
|
||||
PEER_GROUP_MIGRATION_NONE = "none"
|
||||
|
||||
PEER_GROUP_PRIMARY_PRIORITY = 0
|
||||
|
||||
# Peer group association type
|
||||
ASSOCIATION_TYPE_PRIMARY = 'primary'
|
||||
ASSOCIATION_TYPE_NON_PRIMARY = 'non-primary'
|
||||
ASSOCIATION_TYPE_PRIMARY = "primary"
|
||||
ASSOCIATION_TYPE_NON_PRIMARY = "non-primary"
|
||||
|
||||
# Peer group association sync status
|
||||
ASSOCIATION_SYNC_STATUS_SYNCING = 'syncing'
|
||||
ASSOCIATION_SYNC_STATUS_IN_SYNC = 'in-sync'
|
||||
ASSOCIATION_SYNC_STATUS_FAILED = 'failed'
|
||||
ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC = 'out-of-sync'
|
||||
ASSOCIATION_SYNC_STATUS_UNKNOWN = 'unknown'
|
||||
ASSOCIATION_SYNC_STATUS_SYNCING = "syncing"
|
||||
ASSOCIATION_SYNC_STATUS_IN_SYNC = "in-sync"
|
||||
ASSOCIATION_SYNC_STATUS_FAILED = "failed"
|
||||
ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC = "out-of-sync"
|
||||
ASSOCIATION_SYNC_STATUS_UNKNOWN = "unknown"
|
||||
|
||||
# Peer monitor heartbeat policy
|
||||
HEARTBEAT_FAILURE_POLICY_ALARM = 'alarm'
|
||||
HEARTBEAT_FAILURE_POLICY_ALARM = "alarm"
|
||||
|
||||
@@ -29,14 +29,14 @@ from dcmanager.api.policies import base as base_policy
|
||||
from dcmanager.api import policy
|
||||
from dcmanager.db import api as db_api
|
||||
|
||||
ALLOWED_WITHOUT_AUTH = '/'
|
||||
ALLOWED_WITHOUT_AUTH = "/"
|
||||
|
||||
audit_log_name = "{}.{}".format(__name__, "auditor")
|
||||
auditLOG = log.getLogger(audit_log_name)
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-%s' % uuidutils.generate_uuid()
|
||||
return "req-%s" % uuidutils.generate_uuid()
|
||||
|
||||
|
||||
class RequestContext(base_context.RequestContext):
|
||||
@@ -46,23 +46,46 @@ class RequestContext(base_context.RequestContext):
|
||||
the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
def __init__(self, auth_token=None, user=None, project=None,
|
||||
domain=None, user_domain=None, project_domain=None,
|
||||
is_admin=None, read_only=False, show_deleted=False,
|
||||
request_id=None, auth_url=None, trusts=None,
|
||||
user_name=None, project_name=None, domain_name=None,
|
||||
user_domain_name=None, project_domain_name=None,
|
||||
auth_token_info=None, region_name=None, roles=None,
|
||||
password=None, **kwargs):
|
||||
def __init__(
|
||||
self,
|
||||
auth_token=None,
|
||||
user=None,
|
||||
project=None,
|
||||
domain=None,
|
||||
user_domain=None,
|
||||
project_domain=None,
|
||||
is_admin=None,
|
||||
read_only=False,
|
||||
show_deleted=False,
|
||||
request_id=None,
|
||||
auth_url=None,
|
||||
trusts=None,
|
||||
user_name=None,
|
||||
project_name=None,
|
||||
domain_name=None,
|
||||
user_domain_name=None,
|
||||
project_domain_name=None,
|
||||
auth_token_info=None,
|
||||
region_name=None,
|
||||
roles=None,
|
||||
password=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initializer of request context."""
|
||||
# We still have 'tenant' param because oslo_context still use it.
|
||||
# pylint: disable=E1123
|
||||
super(RequestContext, self).__init__(
|
||||
auth_token=auth_token, user=user, tenant=project,
|
||||
domain=domain, user_domain=user_domain,
|
||||
project_domain=project_domain, roles=roles,
|
||||
read_only=read_only, show_deleted=show_deleted,
|
||||
request_id=request_id)
|
||||
auth_token=auth_token,
|
||||
user=user,
|
||||
tenant=project,
|
||||
domain=domain,
|
||||
user_domain=user_domain,
|
||||
project_domain=project_domain,
|
||||
roles=roles,
|
||||
read_only=read_only,
|
||||
show_deleted=show_deleted,
|
||||
request_id=request_id,
|
||||
)
|
||||
|
||||
# request_id might be a byte array
|
||||
self.request_id = encodeutils.safe_decode(self.request_id)
|
||||
@@ -90,8 +113,8 @@ class RequestContext(base_context.RequestContext):
|
||||
# Check user is admin or not
|
||||
if is_admin is None:
|
||||
self.is_admin = policy.authorize(
|
||||
base_policy.ADMIN_IN_SYSTEM_PROJECTS, {}, self.to_dict(),
|
||||
do_raise=False)
|
||||
base_policy.ADMIN_IN_SYSTEM_PROJECTS, {}, self.to_dict(), do_raise=False
|
||||
)
|
||||
else:
|
||||
self.is_admin = is_admin
|
||||
|
||||
@@ -103,26 +126,26 @@ class RequestContext(base_context.RequestContext):
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'auth_url': self.auth_url,
|
||||
'auth_token': self.auth_token,
|
||||
'auth_token_info': self.auth_token_info,
|
||||
'user': self.user,
|
||||
'user_name': self.user_name,
|
||||
'user_domain': self.user_domain,
|
||||
'user_domain_name': self.user_domain_name,
|
||||
'project': self.project,
|
||||
'project_name': self.project_name,
|
||||
'project_domain': self.project_domain,
|
||||
'project_domain_name': self.project_domain_name,
|
||||
'domain': self.domain,
|
||||
'domain_name': self.domain_name,
|
||||
'trusts': self.trusts,
|
||||
'region_name': self.region_name,
|
||||
'roles': self.roles,
|
||||
'show_deleted': self.show_deleted,
|
||||
'is_admin': self.is_admin,
|
||||
'request_id': self.request_id,
|
||||
'password': self.password,
|
||||
"auth_url": self.auth_url,
|
||||
"auth_token": self.auth_token,
|
||||
"auth_token_info": self.auth_token_info,
|
||||
"user": self.user,
|
||||
"user_name": self.user_name,
|
||||
"user_domain": self.user_domain,
|
||||
"user_domain_name": self.user_domain_name,
|
||||
"project": self.project,
|
||||
"project_name": self.project_name,
|
||||
"project_domain": self.project_domain,
|
||||
"project_domain_name": self.project_domain_name,
|
||||
"domain": self.domain,
|
||||
"domain_name": self.domain_name,
|
||||
"trusts": self.trusts,
|
||||
"region_name": self.region_name,
|
||||
"roles": self.roles,
|
||||
"show_deleted": self.show_deleted,
|
||||
"is_admin": self.is_admin,
|
||||
"request_id": self.request_id,
|
||||
"password": self.password,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@@ -151,15 +174,14 @@ class AuthHook(hooks.PecanHook):
|
||||
if state.request.path == ALLOWED_WITHOUT_AUTH:
|
||||
return
|
||||
req = state.request
|
||||
identity_status = req.headers.get('X-Identity-Status')
|
||||
service_identity_status = req.headers.get('X-Service-Identity-Status')
|
||||
if (identity_status == 'Confirmed' or
|
||||
service_identity_status == 'Confirmed'):
|
||||
identity_status = req.headers.get("X-Identity-Status")
|
||||
service_identity_status = req.headers.get("X-Service-Identity-Status")
|
||||
if identity_status == "Confirmed" or service_identity_status == "Confirmed":
|
||||
return
|
||||
if req.headers.get('X-Auth-Token'):
|
||||
msg = 'Auth token is invalid: %s' % req.headers['X-Auth-Token']
|
||||
if req.headers.get("X-Auth-Token"):
|
||||
msg = "Auth token is invalid: %s" % req.headers["X-Auth-Token"]
|
||||
else:
|
||||
msg = 'Authentication required'
|
||||
msg = "Authentication required"
|
||||
msg = "Failed to validate access token: %s" % str(msg)
|
||||
pecan.abort(status_code=401, detail=msg)
|
||||
|
||||
@@ -195,37 +217,41 @@ class AuditLoggingHook(hooks.PecanHook):
|
||||
|
||||
response_content_length = state.response.content_length
|
||||
|
||||
user_id = state.request.headers.get('X-User-Id')
|
||||
user_name = state.request.headers.get('X-User', user_id)
|
||||
tenant_id = state.request.headers.get('X-Tenant-Id')
|
||||
tenant = state.request.headers.get('X-Tenant', tenant_id)
|
||||
domain_name = state.request.headers.get('X-User-Domain-Name')
|
||||
user_id = state.request.headers.get("X-User-Id")
|
||||
user_name = state.request.headers.get("X-User", user_id)
|
||||
tenant_id = state.request.headers.get("X-Tenant-Id")
|
||||
tenant = state.request.headers.get("X-Tenant", tenant_id)
|
||||
domain_name = state.request.headers.get("X-User-Domain-Name")
|
||||
try:
|
||||
request_id = state.request.context.request_id
|
||||
except AttributeError:
|
||||
auditLOG.info("Request id is not in request, setting it to an "
|
||||
"auto generated id.")
|
||||
auditLOG.info(
|
||||
"Request id is not in request, setting it to an auto generated id."
|
||||
)
|
||||
request_id = generate_request_id()
|
||||
|
||||
url_path = urlparse(state.request.path_qs).path
|
||||
|
||||
def json_post_data(rest_state):
|
||||
if 'form-data' in rest_state.request.headers.get('Content-Type'):
|
||||
if "form-data" in rest_state.request.headers.get("Content-Type"):
|
||||
return " POST: {}".format(rest_state.request.params)
|
||||
try:
|
||||
if not hasattr(rest_state.request, 'json'):
|
||||
if not hasattr(rest_state.request, "json"):
|
||||
return ""
|
||||
return " POST: {}".format(rest_state.request.json)
|
||||
except Exception:
|
||||
return ""
|
||||
# Filter password from log
|
||||
filtered_json = re.sub(r'{[^{}]*(passwd_hash|community|password)[^{}]*},*',
|
||||
'',
|
||||
json_post_data(state))
|
||||
|
||||
log_data = \
|
||||
"{} \"{} {} {}\" status: {} len: {} time: {}{} host:{}" \
|
||||
" agent:{} user: {} tenant: {} domain: {}".format(
|
||||
# Filter password from log
|
||||
filtered_json = re.sub(
|
||||
r"{[^{}]*(passwd_hash|community|password)[^{}]*},*",
|
||||
"",
|
||||
json_post_data(state),
|
||||
)
|
||||
|
||||
log_data = (
|
||||
"{} '{} {} {}' status: {} len: {} time: {}{} host:{} "
|
||||
"agent:{} user: {} tenant: {} domain: {}".format(
|
||||
state.request.remote_addr,
|
||||
state.request.method,
|
||||
url_path,
|
||||
@@ -238,7 +264,9 @@ class AuditLoggingHook(hooks.PecanHook):
|
||||
state.request.user_agent,
|
||||
user_name,
|
||||
tenant,
|
||||
domain_name)
|
||||
domain_name,
|
||||
)
|
||||
)
|
||||
|
||||
# The following ctx object will be output in the logger as
|
||||
# something like this:
|
||||
@@ -247,9 +275,7 @@ class AuditLoggingHook(hooks.PecanHook):
|
||||
# ca53e70c76d847fd860693f8eb301546]
|
||||
# When the ctx is defined, the formatter (defined in common/log.py) requires
|
||||
# that keys request_id, user, tenant be defined within the ctx
|
||||
ctx = {'request_id': request_id,
|
||||
'user': user_id,
|
||||
'tenant': tenant_id}
|
||||
ctx = {"request_id": request_id, "user": user_id, "tenant": tenant_id}
|
||||
|
||||
auditLOG.info("{}".format(log_data), context=ctx)
|
||||
|
||||
@@ -262,5 +288,6 @@ class AuditLoggingHook(hooks.PecanHook):
|
||||
auditLOG.exception("Exception in AuditLoggingHook on event 'after'")
|
||||
|
||||
def on_error(self, state, e):
|
||||
auditLOG.exception("Exception in AuditLoggingHook passed to event "
|
||||
"'on_error': " + str(e))
|
||||
auditLOG.exception(
|
||||
f"Exception in AuditLoggingHook passed to event 'on_error': {str(e)}"
|
||||
)
|
||||
|
||||
@@ -51,7 +51,7 @@ class DCManagerException(Exception):
|
||||
|
||||
|
||||
class BadRequest(DCManagerException):
|
||||
message = _('Bad %(resource)s request: %(msg)s')
|
||||
message = _("Bad %(resource)s request: %(msg)s")
|
||||
|
||||
|
||||
class ValidateFail(DCManagerException):
|
||||
@@ -65,7 +65,7 @@ class NotFound(DCManagerException):
|
||||
|
||||
|
||||
class Conflict(DCManagerException):
|
||||
message = _('Conflict: %(msg)s')
|
||||
message = _("Conflict: %(msg)s")
|
||||
|
||||
|
||||
class NotAuthorized(DCManagerException):
|
||||
@@ -89,8 +89,7 @@ class InUse(DCManagerException):
|
||||
|
||||
|
||||
class InvalidConfigurationOption(DCManagerException):
|
||||
message = _("An invalid value was provided for %(opt_name)s: "
|
||||
"%(opt_value)s")
|
||||
message = _("An invalid value was provided for %(opt_name)s: %(opt_value)s")
|
||||
|
||||
|
||||
class InvalidParameterValue(DCManagerException):
|
||||
@@ -126,8 +125,10 @@ class SubcloudNotOnline(DCManagerException):
|
||||
|
||||
|
||||
class SubcloudStatusNotFound(NotFound):
|
||||
message = _("SubcloudStatus with subcloud_id %(subcloud_id)s and "
|
||||
"endpoint_type %(endpoint_type)s doesn't exist.")
|
||||
message = _(
|
||||
"SubcloudStatus with subcloud_id %(subcloud_id)s and "
|
||||
"endpoint_type %(endpoint_type)s doesn't exist."
|
||||
)
|
||||
|
||||
|
||||
class SubcloudNotUnmanaged(DCManagerException):
|
||||
@@ -139,8 +140,10 @@ class SubcloudNotOffline(DCManagerException):
|
||||
|
||||
|
||||
class SubcloudPatchOptsNotFound(NotFound):
|
||||
message = _("No options found for Subcloud with id %(subcloud_id)s, "
|
||||
"defaults will be used.")
|
||||
message = _(
|
||||
"No options found for Subcloud with id %(subcloud_id)s, "
|
||||
"defaults will be used."
|
||||
)
|
||||
|
||||
|
||||
class SystemPeerNotFound(NotFound):
|
||||
@@ -172,23 +175,25 @@ class SubcloudPeerGroupNotFound(NotFound):
|
||||
|
||||
|
||||
class PeerGroupAssociationCombinationNotFound(NotFound):
|
||||
message = _("Peer Group Association between peer group: %(peer_group_id)s "
|
||||
"and system peer: %(system_peer_id)s doesn't exist.")
|
||||
message = _(
|
||||
"Peer Group Association between peer group: %(peer_group_id)s "
|
||||
"and system peer: %(system_peer_id)s doesn't exist."
|
||||
)
|
||||
|
||||
|
||||
class PeerGroupAssociationTargetNotMatch(NotFound):
|
||||
message = _("Peer Group Association with peer site controller "
|
||||
"UUID %(uuid)s doesn't match.")
|
||||
message = _(
|
||||
"Peer Group Association with peer site controller "
|
||||
"UUID %(uuid)s doesn't match."
|
||||
)
|
||||
|
||||
|
||||
class SubcloudPeerGroupHasWrongPriority(DCManagerException):
|
||||
message = _("Subcloud Peer group of peer site has wrong "
|
||||
"priority %(priority)s.")
|
||||
message = _("Subcloud Peer group of peer site has wrong priority %(priority)s.")
|
||||
|
||||
|
||||
class PeerGroupAssociationNotFound(NotFound):
|
||||
message = _("Peer Group Association with id %(association_id)s "
|
||||
"doesn't exist.")
|
||||
message = _("Peer Group Association with id %(association_id)s doesn't exist.")
|
||||
|
||||
|
||||
class SubcloudGroupNameViolation(DCManagerException):
|
||||
@@ -200,8 +205,10 @@ class SubcloudGroupDefaultNotDeletable(DCManagerException):
|
||||
|
||||
|
||||
class SubcloudBackupOperationFailed(DCManagerException):
|
||||
message = _("Failed to run subcloud-backup %(operation)s. Please run "
|
||||
"'dcmanager subcloud error' command for details")
|
||||
message = _(
|
||||
"Failed to run subcloud-backup %(operation)s. Please run "
|
||||
"'dcmanager subcloud error' command for details"
|
||||
)
|
||||
|
||||
|
||||
class SubcloudSyncFailedException(DCManagerException):
|
||||
@@ -229,8 +236,10 @@ class CertificateUploadError(DCManagerException):
|
||||
|
||||
|
||||
class LicenseInstallError(DCManagerException):
|
||||
message = _("Error while installing license on subcloud: "
|
||||
"%(subcloud_id)s. %(error_message)s")
|
||||
message = _(
|
||||
"Error while installing license on subcloud: "
|
||||
"%(subcloud_id)s. %(error_message)s"
|
||||
)
|
||||
|
||||
|
||||
class LicenseMissingError(DCManagerException):
|
||||
@@ -242,8 +251,7 @@ class KubeUpgradeFailedException(DCManagerException):
|
||||
|
||||
|
||||
class ManualRecoveryRequiredException(DCManagerException):
|
||||
message = _("Subcloud: %(subcloud)s needs manual recovery from "
|
||||
"%(error_message)s")
|
||||
message = _("Subcloud: %(subcloud)s needs manual recovery from %(error_message)s")
|
||||
|
||||
|
||||
class PreCheckFailedException(DCManagerException):
|
||||
@@ -273,9 +281,8 @@ class SoftwareDeployDeleteFailedException(DCManagerException):
|
||||
class PrestagePreCheckFailedException(DCManagerException):
|
||||
"""PrestagePreCheckFailedException
|
||||
|
||||
Extended to include 'orch_skip' property, indicating that
|
||||
the subcloud can be skipped during orchestrated prestage
|
||||
operations.
|
||||
Extended to include 'orch_skip' property, indicating that the subcloud
|
||||
can be skipped during orchestrated prestage operations.
|
||||
"""
|
||||
|
||||
def __init__(self, subcloud, details, orch_skip=False):
|
||||
@@ -285,11 +292,9 @@ class PrestagePreCheckFailedException(DCManagerException):
|
||||
if subcloud is None:
|
||||
self.message = _("Prestage failed: %s" % details)
|
||||
elif orch_skip:
|
||||
self.message = _("Prestage skipped '%s': %s"
|
||||
% (subcloud, details))
|
||||
self.message = _("Prestage skipped '%s': %s" % (subcloud, details))
|
||||
else:
|
||||
self.message = _("Prestage failed '%s': %s"
|
||||
% (subcloud, details))
|
||||
self.message = _("Prestage failed '%s': %s" % (subcloud, details))
|
||||
super(PrestagePreCheckFailedException, self).__init__()
|
||||
|
||||
|
||||
@@ -298,8 +303,7 @@ class VaultLoadMissingError(DCManagerException):
|
||||
|
||||
|
||||
class StrategyStepNotFound(NotFound):
|
||||
message = _("StrategyStep with subcloud_id %(subcloud_id)s "
|
||||
"doesn't exist.")
|
||||
message = _("StrategyStep with subcloud_id %(subcloud_id)s doesn't exist.")
|
||||
|
||||
|
||||
class StrategyStepNameNotFound(NotFound):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
import oslo_i18n
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain='dcmanager')
|
||||
_translators = oslo_i18n.TranslatorFactory(domain="dcmanager")
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
@@ -58,7 +58,7 @@ class PeriodicTasks(periodic_task.PeriodicTasks):
|
||||
|
||||
class Manager(PeriodicTasks):
|
||||
|
||||
def __init__(self, host=None, service_name='undefined'):
|
||||
def __init__(self, host=None, service_name="undefined"):
|
||||
if not host:
|
||||
host = cfg.CONF.host
|
||||
self.host = host
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@@ -63,11 +63,12 @@ def setup(url=None, optional=False):
|
||||
eventlet.monkey_patch(time=True)
|
||||
|
||||
if not TRANSPORT:
|
||||
oslo_messaging.set_transport_defaults('dcmanager')
|
||||
exmods = ['dcmanager.common.exception']
|
||||
oslo_messaging.set_transport_defaults("dcmanager")
|
||||
exmods = ["dcmanager.common.exception"]
|
||||
try:
|
||||
TRANSPORT = oslo_messaging.get_transport(
|
||||
cfg.CONF, url, allowed_remote_exmods=exmods)
|
||||
cfg.CONF, url, allowed_remote_exmods=exmods
|
||||
)
|
||||
except oslo_messaging.InvalidTransportURL as e:
|
||||
TRANSPORT = None
|
||||
if not optional or e.url:
|
||||
@@ -89,9 +90,9 @@ def cleanup():
|
||||
def get_rpc_server(target, endpoint):
|
||||
"""Return a configured oslo_messaging rpc server."""
|
||||
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||
return oslo_messaging.get_rpc_server(TRANSPORT, target, [endpoint],
|
||||
executor='eventlet',
|
||||
serializer=serializer)
|
||||
return oslo_messaging.get_rpc_server(
|
||||
TRANSPORT, target, [endpoint], executor="eventlet", serializer=serializer
|
||||
)
|
||||
|
||||
|
||||
def get_rpc_client(timeout, **msg_target_kwargs):
|
||||
@@ -99,8 +100,9 @@ def get_rpc_client(timeout, **msg_target_kwargs):
|
||||
target = oslo_messaging.Target(**msg_target_kwargs)
|
||||
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||
# With timeout == None the default value will be 60 seconds
|
||||
return oslo_messaging.RPCClient(TRANSPORT, target, timeout=timeout,
|
||||
serializer=serializer)
|
||||
return oslo_messaging.RPCClient(
|
||||
TRANSPORT, target, timeout=timeout, serializer=serializer
|
||||
)
|
||||
|
||||
|
||||
def get_transport():
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -46,31 +46,36 @@ LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
DEPLOY_BASE_DIR = dccommon_consts.DEPLOY_DIR
|
||||
ANSIBLE_PRESTAGE_SUBCLOUD_PACKAGES_PLAYBOOK = \
|
||||
ANSIBLE_PRESTAGE_SUBCLOUD_PACKAGES_PLAYBOOK = (
|
||||
"/usr/share/ansible/stx-ansible/playbooks/prestage_sw_packages.yml"
|
||||
ANSIBLE_PRESTAGE_SUBCLOUD_IMAGES_PLAYBOOK = \
|
||||
)
|
||||
ANSIBLE_PRESTAGE_SUBCLOUD_IMAGES_PLAYBOOK = (
|
||||
"/usr/share/ansible/stx-ansible/playbooks/prestage_images.yml"
|
||||
ANSIBLE_PRESTAGE_INVENTORY_SUFFIX = '_prestage_inventory.yml'
|
||||
PRINT_PRESTAGE_VERSIONS_TASK = \
|
||||
'prestage\/prestage-versions : Print prestage versions'
|
||||
PRESTAGE_VERSIONS_KEY_STR = 'prestage_versions:'
|
||||
)
|
||||
ANSIBLE_PRESTAGE_INVENTORY_SUFFIX = "_prestage_inventory.yml"
|
||||
PRINT_PRESTAGE_VERSIONS_TASK = r"prestage\/prestage-versions : Print prestage versions"
|
||||
PRESTAGE_VERSIONS_KEY_STR = "prestage_versions:"
|
||||
|
||||
|
||||
def _get_system_controller_upgrades():
|
||||
# get a cached keystone client (and token)
|
||||
try:
|
||||
os_client = OpenStackDriver(
|
||||
region_name=dccommon_consts.SYSTEM_CONTROLLER_NAME,
|
||||
region_clients=None)
|
||||
region_name=dccommon_consts.SYSTEM_CONTROLLER_NAME, region_clients=None
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception("Failed to get keystone client for %s",
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME)
|
||||
LOG.exception(
|
||||
"Failed to get keystone client for %s",
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME,
|
||||
)
|
||||
raise
|
||||
|
||||
ks_client = os_client.keystone_client
|
||||
sysinv_client = SysinvClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME,
|
||||
ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint("sysinv"),
|
||||
)
|
||||
|
||||
return sysinv_client.get_upgrades()
|
||||
|
||||
@@ -85,28 +90,36 @@ def global_prestage_validate(payload):
|
||||
if is_system_controller_upgrading():
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=dccommon_consts.SYSTEM_CONTROLLER_NAME,
|
||||
details='Prestage operations are not allowed while system'
|
||||
' controller upgrade is in progress.')
|
||||
details=(
|
||||
"Prestage operations are not allowed while system "
|
||||
"controller upgrade is in progress."
|
||||
),
|
||||
)
|
||||
|
||||
if ('sysadmin_password' not in payload
|
||||
or payload['sysadmin_password'] is None
|
||||
or payload['sysadmin_password'] == ''):
|
||||
if (
|
||||
"sysadmin_password" not in payload
|
||||
or payload["sysadmin_password"] is None
|
||||
or payload["sysadmin_password"] == ""
|
||||
):
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=None,
|
||||
orch_skip=False,
|
||||
details="Missing required parameter 'sysadmin_password'")
|
||||
details="Missing required parameter 'sysadmin_password'",
|
||||
)
|
||||
|
||||
# Ensure we can decode the sysadmin_password
|
||||
# (we decode again when running ansible)
|
||||
try:
|
||||
base64.b64decode(payload['sysadmin_password']).decode('utf-8')
|
||||
base64.b64decode(payload["sysadmin_password"]).decode("utf-8")
|
||||
except Exception as ex:
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=None,
|
||||
orch_skip=False,
|
||||
details="Failed to decode subcloud sysadmin_password,"
|
||||
" verify the password is base64 encoded."
|
||||
" Details: %s" % ex)
|
||||
details=(
|
||||
"Failed to decode subcloud sysadmin_password, verify the password "
|
||||
"is base64 encoded. Details: %s" % ex
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def initial_subcloud_validate(
|
||||
@@ -123,53 +136,64 @@ def initial_subcloud_validate(
|
||||
|
||||
if subcloud.availability_status != dccommon_consts.AVAILABILITY_ONLINE:
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
orch_skip=True,
|
||||
details="Subcloud is offline.")
|
||||
subcloud=subcloud.name, orch_skip=True, details="Subcloud is offline."
|
||||
)
|
||||
|
||||
if subcloud.management_state != dccommon_consts.MANAGEMENT_MANAGED:
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
orch_skip=True,
|
||||
details="Subcloud is not managed.")
|
||||
subcloud=subcloud.name, orch_skip=True, details="Subcloud is not managed."
|
||||
)
|
||||
|
||||
if subcloud.backup_status in consts.STATES_FOR_ONGOING_BACKUP:
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
orch_skip=True,
|
||||
details="Prestage operation is not allowed while"
|
||||
" backup is in progress.")
|
||||
details="Prestage operation is not allowed while backup is in progress.",
|
||||
)
|
||||
|
||||
if subcloud.deploy_status != consts.DEPLOY_STATE_DONE:
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
orch_skip=True,
|
||||
details="Prestage operation is not allowed when"
|
||||
" subcloud deploy is not completed.")
|
||||
details=(
|
||||
"Prestage operation is not allowed when subcloud deploy is in progress."
|
||||
),
|
||||
)
|
||||
|
||||
allowed_prestage_states = [consts.PRESTAGE_STATE_FAILED,
|
||||
consts.PRESTAGE_STATE_COMPLETE]
|
||||
if (subcloud.prestage_status and
|
||||
(subcloud.prestage_status not in allowed_prestage_states)):
|
||||
allowed_prestage_states = [
|
||||
consts.PRESTAGE_STATE_FAILED,
|
||||
consts.PRESTAGE_STATE_COMPLETE,
|
||||
]
|
||||
if subcloud.prestage_status and (
|
||||
subcloud.prestage_status not in allowed_prestage_states
|
||||
):
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
orch_skip=True,
|
||||
details="Prestage operation is only allowed while"
|
||||
" subcloud prestage status is one of: %s."
|
||||
" The current prestage status is %s."
|
||||
% (', '.join(allowed_prestage_states), subcloud.prestage_status))
|
||||
details=(
|
||||
"Prestage operation is only allowed while subcloud prestage "
|
||||
"status is one of: %s. The current prestage status is %s."
|
||||
)
|
||||
% (", ".join(allowed_prestage_states), subcloud.prestage_status),
|
||||
)
|
||||
|
||||
# The request software version must be either the same as the software version
|
||||
# of the subcloud or any available/deployed release on the system controller
|
||||
# (can be checked with "software list" command).
|
||||
if not for_sw_deploy and software_major_release and \
|
||||
software_major_release != subcloud.software_version and \
|
||||
software_major_release not in installed_releases:
|
||||
if (
|
||||
not for_sw_deploy
|
||||
and software_major_release
|
||||
and software_major_release != subcloud.software_version
|
||||
and software_major_release not in installed_releases
|
||||
):
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
orch_skip=True,
|
||||
details="Specified release is not supported. "
|
||||
f"{software_major_release} version must first be imported")
|
||||
details=(
|
||||
f"Specified release is not supported. {software_major_release} "
|
||||
"version must first be imported"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def validate_prestage(subcloud, payload):
|
||||
@@ -205,46 +229,52 @@ def validate_prestage(subcloud, payload):
|
||||
for_sw_deploy,
|
||||
)
|
||||
|
||||
subcloud_type, system_health, oam_floating_ip = \
|
||||
_get_prestage_subcloud_info(subcloud)
|
||||
subcloud_type, system_health, oam_floating_ip = _get_prestage_subcloud_info(
|
||||
subcloud
|
||||
)
|
||||
|
||||
if subcloud_type != consts.SYSTEM_MODE_SIMPLEX:
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
orch_skip=True,
|
||||
details="Prestage operation is only accepted for a simplex"
|
||||
" subcloud.")
|
||||
details="Prestage operation is only accepted for a simplex subcloud.",
|
||||
)
|
||||
|
||||
if (not payload['force']
|
||||
and not utils.pre_check_management_affected_alarm(system_health)):
|
||||
if not payload["force"] and not utils.pre_check_management_affected_alarm(
|
||||
system_health
|
||||
):
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
orch_skip=False,
|
||||
details="Subcloud has management affecting alarm(s)."
|
||||
" Please resolve the alarm condition(s)"
|
||||
" or use --force option and try again.")
|
||||
details=(
|
||||
"Subcloud has management affecting alarm(s). Please resolve the alarm "
|
||||
"condition(s) or use --force option and try again."
|
||||
),
|
||||
)
|
||||
|
||||
return oam_floating_ip
|
||||
|
||||
|
||||
def prestage_start(context, subcloud_id):
|
||||
subcloud = db_api.subcloud_update(
|
||||
context, subcloud_id,
|
||||
prestage_status=consts.PRESTAGE_STATE_PACKAGES)
|
||||
context, subcloud_id, prestage_status=consts.PRESTAGE_STATE_PACKAGES
|
||||
)
|
||||
return subcloud
|
||||
|
||||
|
||||
def prestage_complete(context, subcloud_id, prestage_versions):
|
||||
db_api.subcloud_update(
|
||||
context, subcloud_id,
|
||||
context,
|
||||
subcloud_id,
|
||||
prestage_status=consts.PRESTAGE_STATE_COMPLETE,
|
||||
prestage_versions=prestage_versions)
|
||||
prestage_versions=prestage_versions,
|
||||
)
|
||||
|
||||
|
||||
def prestage_fail(context, subcloud_id):
|
||||
db_api.subcloud_update(
|
||||
context, subcloud_id,
|
||||
prestage_status=consts.PRESTAGE_STATE_FAILED)
|
||||
context, subcloud_id, prestage_status=consts.PRESTAGE_STATE_FAILED
|
||||
)
|
||||
|
||||
|
||||
def is_local(subcloud_version, specified_version):
|
||||
@@ -272,7 +302,7 @@ def prestage_subcloud(context, payload):
|
||||
3. Images prestaging
|
||||
- run prestage_images.yml ansible playbook
|
||||
"""
|
||||
subcloud_name = payload['subcloud_name']
|
||||
subcloud_name = payload["subcloud_name"]
|
||||
for_sw_deploy = is_prestage_for_sw_deploy(payload)
|
||||
LOG.info(
|
||||
f"Prestaging subcloud: {subcloud_name}, "
|
||||
@@ -281,17 +311,19 @@ def prestage_subcloud(context, payload):
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context, subcloud_name)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
LOG.info("Prestage validation failure: "
|
||||
"subcloud '%s' does not exist", subcloud_name)
|
||||
LOG.info(
|
||||
"Prestage validation failure: subcloud '%s' does not exist",
|
||||
subcloud_name,
|
||||
)
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud_name,
|
||||
details="Subcloud does not exist")
|
||||
subcloud=subcloud_name, details="Subcloud does not exist"
|
||||
)
|
||||
|
||||
subcloud = prestage_start(context, subcloud.id)
|
||||
try:
|
||||
apply_thread = threading.Thread(
|
||||
target=_prestage_standalone_thread,
|
||||
args=(context, subcloud, payload))
|
||||
target=_prestage_standalone_thread, args=(context, subcloud, payload)
|
||||
)
|
||||
|
||||
apply_thread.start()
|
||||
|
||||
@@ -311,7 +343,8 @@ def _prestage_standalone_thread(context, subcloud, payload):
|
||||
# Get the prestage versions from the logs generated by
|
||||
# the prestage packages playbook
|
||||
prestage_versions = utils.get_msg_output_info(
|
||||
log_file, PRINT_PRESTAGE_VERSIONS_TASK, PRESTAGE_VERSIONS_KEY_STR)
|
||||
log_file, PRINT_PRESTAGE_VERSIONS_TASK, PRESTAGE_VERSIONS_KEY_STR
|
||||
)
|
||||
|
||||
# TODO(kmacleod) need to invoke this for retagging images
|
||||
if not for_sw_deploy:
|
||||
@@ -341,10 +374,10 @@ def _get_prestage_subcloud_info(subcloud):
|
||||
fetch_subcloud_ips=utils.fetch_subcloud_mgmt_ips,
|
||||
)
|
||||
keystone_client = os_client.keystone_client
|
||||
endpoint = keystone_client.endpoint_cache.get_endpoint('sysinv')
|
||||
sysinv_client = SysinvClient(subcloud.region_name,
|
||||
keystone_client.session,
|
||||
endpoint=endpoint)
|
||||
endpoint = keystone_client.endpoint_cache.get_endpoint("sysinv")
|
||||
sysinv_client = SysinvClient(
|
||||
subcloud.region_name, keystone_client.session, endpoint=endpoint
|
||||
)
|
||||
mode = sysinv_client.get_system().system_mode
|
||||
health = sysinv_client.get_system_health()
|
||||
oam_floating_ip = sysinv_client.get_oam_addresses().oam_floating_ip
|
||||
@@ -354,94 +387,107 @@ def _get_prestage_subcloud_info(subcloud):
|
||||
LOG.exception(e)
|
||||
raise exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=subcloud.name,
|
||||
details="Failed to retrieve subcloud system mode and system health.")
|
||||
details="Failed to retrieve subcloud system mode and system health.",
|
||||
)
|
||||
|
||||
|
||||
def _run_ansible(context, prestage_command, phase,
|
||||
subcloud, prestage_status,
|
||||
sysadmin_password, oam_floating_ip,
|
||||
software_version,
|
||||
ansible_subcloud_inventory_file,
|
||||
timeout_seconds=None):
|
||||
def _run_ansible(
|
||||
context,
|
||||
prestage_command,
|
||||
phase,
|
||||
subcloud,
|
||||
prestage_status,
|
||||
sysadmin_password,
|
||||
oam_floating_ip,
|
||||
software_version,
|
||||
ansible_subcloud_inventory_file,
|
||||
timeout_seconds=None,
|
||||
):
|
||||
if not timeout_seconds:
|
||||
# We always want to set a timeout in prestaging operations:
|
||||
timeout_seconds = CONF.playbook_timeout
|
||||
|
||||
LOG.info("Prestaging %s for subcloud: %s, version: %s, timeout: %ss",
|
||||
phase, subcloud.name, software_version, timeout_seconds)
|
||||
LOG.info(
|
||||
"Prestaging %s for subcloud: %s, version: %s, timeout: %ss",
|
||||
phase,
|
||||
subcloud.name,
|
||||
software_version,
|
||||
timeout_seconds,
|
||||
)
|
||||
|
||||
db_api.subcloud_update(context,
|
||||
subcloud.id,
|
||||
prestage_status=prestage_status)
|
||||
db_api.subcloud_update(context, subcloud.id, prestage_status=prestage_status)
|
||||
|
||||
# Create the ansible inventory for the new subcloud
|
||||
utils.create_subcloud_inventory_with_admin_creds(
|
||||
subcloud.name,
|
||||
ansible_subcloud_inventory_file,
|
||||
oam_floating_ip,
|
||||
ansible_pass=utils.decode_and_normalize_passwd(sysadmin_password))
|
||||
ansible_pass=utils.decode_and_normalize_passwd(sysadmin_password),
|
||||
)
|
||||
|
||||
log_file = utils.get_subcloud_ansible_log_file(subcloud.name)
|
||||
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible.run_playbook(log_file, prestage_command, timeout=timeout_seconds,
|
||||
register_cleanup=True)
|
||||
ansible.run_playbook(
|
||||
log_file, prestage_command, timeout=timeout_seconds, register_cleanup=True
|
||||
)
|
||||
except PlaybookExecutionFailed as ex:
|
||||
timeout_msg = ''
|
||||
timeout_msg = ""
|
||||
if isinstance(ex, PlaybookExecutionTimeout):
|
||||
timeout_msg = ' (TIMEOUT)'
|
||||
msg = ("Prestaging %s failed%s for subcloud %s,"
|
||||
" check individual log at %s for detailed output."
|
||||
% (phase, timeout_msg, subcloud.name, log_file))
|
||||
timeout_msg = " (TIMEOUT)"
|
||||
msg = (
|
||||
"Prestaging %s failed%s for subcloud %s, "
|
||||
"check individual log at %s for detailed output."
|
||||
% (phase, timeout_msg, subcloud.name, log_file)
|
||||
)
|
||||
LOG.exception("%s: %s", msg, ex)
|
||||
raise Exception(msg)
|
||||
finally:
|
||||
utils.delete_subcloud_inventory(ansible_subcloud_inventory_file)
|
||||
|
||||
LOG.info("Prestage %s successful for subcloud %s",
|
||||
phase, subcloud.name)
|
||||
LOG.info("Prestage %s successful for subcloud %s", phase, subcloud.name)
|
||||
|
||||
|
||||
def prestage_packages(context, subcloud, payload):
|
||||
"""Run the prestage packages ansible script."""
|
||||
|
||||
# Ansible inventory filename for the specified subcloud
|
||||
ansible_subcloud_inventory_file = \
|
||||
utils.get_ansible_filename(subcloud.name,
|
||||
ANSIBLE_PRESTAGE_INVENTORY_SUFFIX)
|
||||
|
||||
prestage_software_version = payload.get(
|
||||
consts.PRESTAGE_REQUEST_RELEASE, SW_VERSION
|
||||
ansible_subcloud_inventory_file = utils.get_ansible_filename(
|
||||
subcloud.name, ANSIBLE_PRESTAGE_INVENTORY_SUFFIX
|
||||
)
|
||||
|
||||
prestage_software_version = payload.get(consts.PRESTAGE_REQUEST_RELEASE, SW_VERSION)
|
||||
prestage_major_release = utils.get_major_release(prestage_software_version)
|
||||
extra_vars_str = f"software_version={prestage_software_version} "
|
||||
extra_vars_str += f"software_major_release={prestage_major_release}"
|
||||
|
||||
if is_prestage_for_sw_deploy(payload):
|
||||
extra_vars_str += (
|
||||
f" prestage_install={consts.PRESTAGE_FOR_SW_DEPLOY}"
|
||||
)
|
||||
extra_vars_str += f" prestage_install={consts.PRESTAGE_FOR_SW_DEPLOY}"
|
||||
else:
|
||||
# default
|
||||
extra_vars_str += (
|
||||
f" prestage_install={consts.PRESTAGE_FOR_INSTALL}"
|
||||
)
|
||||
extra_vars_str += f" prestage_install={consts.PRESTAGE_FOR_INSTALL}"
|
||||
|
||||
ostree_mount.validate_ostree_iso_mount(prestage_major_release)
|
||||
|
||||
_run_ansible(context,
|
||||
["ansible-playbook",
|
||||
ANSIBLE_PRESTAGE_SUBCLOUD_PACKAGES_PLAYBOOK,
|
||||
"--inventory", ansible_subcloud_inventory_file,
|
||||
"--extra-vars", extra_vars_str],
|
||||
"packages",
|
||||
subcloud,
|
||||
consts.PRESTAGE_STATE_PACKAGES,
|
||||
payload['sysadmin_password'],
|
||||
payload['oam_floating_ip'],
|
||||
prestage_software_version,
|
||||
ansible_subcloud_inventory_file)
|
||||
_run_ansible(
|
||||
context,
|
||||
[
|
||||
"ansible-playbook",
|
||||
ANSIBLE_PRESTAGE_SUBCLOUD_PACKAGES_PLAYBOOK,
|
||||
"--inventory",
|
||||
ansible_subcloud_inventory_file,
|
||||
"--extra-vars",
|
||||
extra_vars_str,
|
||||
],
|
||||
"packages",
|
||||
subcloud,
|
||||
consts.PRESTAGE_STATE_PACKAGES,
|
||||
payload["sysadmin_password"],
|
||||
payload["oam_floating_ip"],
|
||||
prestage_software_version,
|
||||
ansible_subcloud_inventory_file,
|
||||
)
|
||||
|
||||
|
||||
def prestage_images(context, subcloud, payload):
|
||||
@@ -458,11 +504,8 @@ def prestage_images(context, subcloud, payload):
|
||||
regardless of whether prestage_images.yml playbook is executed or skipped.
|
||||
|
||||
"""
|
||||
prestage_software_version = payload.get(
|
||||
consts.PRESTAGE_REQUEST_RELEASE, SW_VERSION)
|
||||
prestage_major_release = utils.get_major_release(
|
||||
prestage_software_version
|
||||
)
|
||||
prestage_software_version = payload.get(consts.PRESTAGE_REQUEST_RELEASE, SW_VERSION)
|
||||
prestage_major_release = utils.get_major_release(prestage_software_version)
|
||||
extra_vars_str = f"software_version={prestage_software_version} "
|
||||
extra_vars_str += f"software_major_release={prestage_major_release}"
|
||||
|
||||
@@ -476,39 +519,47 @@ def prestage_images(context, subcloud, payload):
|
||||
image_list_filename = None
|
||||
deploy_dir = os.path.join(DEPLOY_BASE_DIR, prestage_major_release)
|
||||
if os.path.isdir(deploy_dir):
|
||||
image_list_filename = utils.get_filename_by_prefix(deploy_dir,
|
||||
'prestage_images')
|
||||
image_list_filename = utils.get_filename_by_prefix(
|
||||
deploy_dir, "prestage_images"
|
||||
)
|
||||
if image_list_filename:
|
||||
image_list_file = os.path.join(deploy_dir, image_list_filename)
|
||||
# include this file in the ansible args:
|
||||
extra_vars_str += (" image_list_file=%s" % image_list_file)
|
||||
extra_vars_str += " image_list_file=%s" % image_list_file
|
||||
LOG.debug("prestage images list file: %s", image_list_file)
|
||||
else:
|
||||
LOG.debug("prestage images list file does not exist")
|
||||
if prestage_major_release != subcloud.software_version:
|
||||
# Prestage source is remote but there is no images list file so
|
||||
# skip the images prestage.
|
||||
LOG.info("Images prestage is skipped for %s as the prestage images "
|
||||
"list for release %s has not been uploaded and the "
|
||||
"subcloud is running a different load than %s."
|
||||
% (subcloud.name, prestage_major_release,
|
||||
prestage_major_release))
|
||||
LOG.info(
|
||||
"Images prestage is skipped for %s as the prestage images "
|
||||
"list for release %s has not been uploaded and the "
|
||||
"subcloud is running a different load than %s."
|
||||
% (subcloud.name, prestage_major_release, prestage_major_release)
|
||||
)
|
||||
return
|
||||
|
||||
# Ansible inventory filename for the specified subcloud
|
||||
ansible_subcloud_inventory_file = \
|
||||
utils.get_ansible_filename(subcloud.name,
|
||||
ANSIBLE_PRESTAGE_INVENTORY_SUFFIX)
|
||||
_run_ansible(context,
|
||||
["ansible-playbook",
|
||||
ANSIBLE_PRESTAGE_SUBCLOUD_IMAGES_PLAYBOOK,
|
||||
"--inventory", ansible_subcloud_inventory_file,
|
||||
"--extra-vars", extra_vars_str],
|
||||
"images",
|
||||
subcloud,
|
||||
consts.PRESTAGE_STATE_IMAGES,
|
||||
payload['sysadmin_password'],
|
||||
payload['oam_floating_ip'],
|
||||
prestage_software_version,
|
||||
ansible_subcloud_inventory_file,
|
||||
timeout_seconds=CONF.playbook_timeout * 2)
|
||||
ansible_subcloud_inventory_file = utils.get_ansible_filename(
|
||||
subcloud.name, ANSIBLE_PRESTAGE_INVENTORY_SUFFIX
|
||||
)
|
||||
_run_ansible(
|
||||
context,
|
||||
[
|
||||
"ansible-playbook",
|
||||
ANSIBLE_PRESTAGE_SUBCLOUD_IMAGES_PLAYBOOK,
|
||||
"--inventory",
|
||||
ansible_subcloud_inventory_file,
|
||||
"--extra-vars",
|
||||
extra_vars_str,
|
||||
],
|
||||
"images",
|
||||
subcloud,
|
||||
consts.PRESTAGE_STATE_IMAGES,
|
||||
payload["sysadmin_password"],
|
||||
payload["oam_floating_ip"],
|
||||
prestage_software_version,
|
||||
ansible_subcloud_inventory_file,
|
||||
timeout_seconds=CONF.playbook_timeout * 2,
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@@ -75,8 +75,7 @@ def reschedule(action, sleep_time=1):
|
||||
"""
|
||||
|
||||
if sleep_time is not None:
|
||||
LOG.debug('Action %s sleep for %s seconds' % (
|
||||
action.id, sleep_time))
|
||||
LOG.debug("Action %s sleep for %s seconds" % (action.id, sleep_time))
|
||||
eventlet.sleep(sleep_time)
|
||||
|
||||
|
||||
|
||||
@@ -28,9 +28,11 @@ class Mapping(object):
|
||||
self.reverse_mapping[value] = key
|
||||
|
||||
|
||||
_SINGLETON_MAPPING = Mapping({
|
||||
ATTR_NOT_SPECIFIED: "@@**ATTR_NOT_SPECIFIED**@@",
|
||||
})
|
||||
_SINGLETON_MAPPING = Mapping(
|
||||
{
|
||||
ATTR_NOT_SPECIFIED: "@@**ATTR_NOT_SPECIFIED**@@",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class DCManagerSerializer(oslo_messaging.Serializer):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@@ -19,7 +19,7 @@ DCMANAGER_VENDOR = "Wind River Systems"
|
||||
DCMANAGER_PRODUCT = "Distributed Cloud Manager"
|
||||
DCMANAGER_PACKAGE = None # OS distro package version suffix
|
||||
|
||||
version_info = pbr.version.VersionInfo('distributedcloud')
|
||||
version_info = pbr.version.VersionInfo("distributedcloud")
|
||||
version_string = version_info.version_string
|
||||
|
||||
|
||||
|
||||
@@ -709,9 +709,9 @@ class TestPhasedSubcloudDeployPatchInstall(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f'Failed to get {SW_VERSION} load '
|
||||
'image. Provide active/inactive load image via "system --os-region-name '
|
||||
'SystemController load-import --active/--inactive"'
|
||||
response, http.client.BAD_REQUEST, f"Failed to get {SW_VERSION} load "
|
||||
"image. Provide active/inactive load image via 'system --os-region-name "
|
||||
"SystemController load-import --active/--inactive'"
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_install.assert_not_called()
|
||||
|
||||
|
||||
@@ -930,9 +930,9 @@ class TestSubcloudsPostInstallData(BaseTestSubcloudsPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, 'Failed to get TEST.SW.VERSION load '
|
||||
'image. Provide active/inactive load image via "system --os-region-name '
|
||||
'SystemController load-import --active/--inactive"'
|
||||
response, http.client.BAD_REQUEST, "Failed to get TEST.SW.VERSION load "
|
||||
"image. Provide active/inactive load image via 'system --os-region-name "
|
||||
"SystemController load-import --active/--inactive'"
|
||||
)
|
||||
|
||||
@mock.patch.object(os.path, "isfile", return_value=True)
|
||||
@@ -2299,8 +2299,8 @@ class TestSubcloudsPatchRedeploy(BaseTestSubcloudsPatch):
|
||||
|
||||
if key == "name":
|
||||
error_msg = (
|
||||
f'The bootstrap-values "{key}" value (None) must match the '
|
||||
f'current subcloud name ({self.subcloud[key]})'
|
||||
f"The bootstrap-values '{key}' value (None) must match the "
|
||||
f"current subcloud name ({self.subcloud[key]})"
|
||||
)
|
||||
elif key == "sysadmin_password":
|
||||
error_msg = f"subcloud {key} required"
|
||||
@@ -2474,7 +2474,7 @@ class TestSubcloudsPatchPrestage(BaseTestSubcloudsPatch):
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Prestage skipped "
|
||||
f"'{self.subcloud.name}': Prestage operation is not allowed when "
|
||||
"subcloud deploy is not completed."
|
||||
"subcloud deploy is in progress."
|
||||
)
|
||||
|
||||
def test_patch_prestage_fails_with_duplex_subcloud(self):
|
||||
|
||||
@@ -845,7 +845,7 @@ class TestSubcloudManager(BaseTestSubcloudManager):
|
||||
self.mock_log.error.assert_called_once_with(
|
||||
'FAILED reconfiguring-network playbook of (subcloud1).'
|
||||
'\ncheck individual log at /var/log/dcmanager/ansible'
|
||||
'/subcloud1_playbook_output.log for detailed output ')
|
||||
'/subcloud1_playbook_output.log for detailed output')
|
||||
updated_subcloud = db_api.subcloud_get_by_name(self.ctx, self.subcloud.name)
|
||||
self.assertEqual(consts.DEPLOY_STATE_RECONFIGURING_NETWORK_FAILED,
|
||||
updated_subcloud.deploy_status)
|
||||
@@ -1141,9 +1141,9 @@ class TestSubcloudDeploy(BaseTestSubcloudManager):
|
||||
# Verify the subcloud rehomed flag is False after bootstrapped
|
||||
self.assertFalse(updated_subcloud.rehomed)
|
||||
self.mock_log.error.assert_called_once_with(
|
||||
'FAILED bootstrapping playbook of (fake subcloud1).\ncheck'
|
||||
' individual log at /var/log/dcmanager/ansible/fake'
|
||||
' subcloud1_playbook_output.log for detailed output ')
|
||||
'FAILED bootstrapping playbook of (fake subcloud1).\n'
|
||||
'check individual log at /var/log/dcmanager/ansible/fake '
|
||||
'subcloud1_playbook_output.log for detailed output')
|
||||
|
||||
@mock.patch.object(subcloud_manager.SubcloudManager,
|
||||
'_deploy_bootstrap_prep')
|
||||
@@ -1417,9 +1417,8 @@ class TestSubcloudDeploy(BaseTestSubcloudManager):
|
||||
self.mock_log.error.assert_called_once_with(
|
||||
'Enroll failed for subcloud fake subcloud1: '
|
||||
'FAILED enrolling playbook of (fake subcloud1).'
|
||||
'\ncheck individual log at '
|
||||
'/var/log/dcmanager/ansible/fake '
|
||||
'subcloud1_playbook_output.log for detailed output ')
|
||||
'\ncheck individual log at /var/log/dcmanager/ansible/fake '
|
||||
'subcloud1_playbook_output.log for detailed output')
|
||||
|
||||
|
||||
class TestSubcloudAdd(BaseTestSubcloudManager):
|
||||
@@ -3059,8 +3058,8 @@ class TestSubcloudBackup(BaseTestSubcloudManager):
|
||||
updated_subcloud.backup_status)
|
||||
self.mock_log.error.assert_called_once_with(
|
||||
f'FAILED backing-up playbook of ({self.subcloud.name}).'
|
||||
'\ncheck individual log at subcloud1_fake_file.yml_playbook_output.log'
|
||||
' for detailed output ')
|
||||
'\ncheck individual log at subcloud1_fake_file.yml_playbook_output.log '
|
||||
'for detailed output')
|
||||
|
||||
def test_backup_create_managed_online_backup_state_in_progess(self):
|
||||
self.backup_values['local_only'] = False
|
||||
@@ -3362,13 +3361,13 @@ class TestSubcloudBackup(BaseTestSubcloudManager):
|
||||
)
|
||||
Calls = [
|
||||
mock.call(
|
||||
'Failed to delete backup for subcloud subcloud1, check individual'
|
||||
' log at /var/log/dcmanager/ansible/subcloud1_playbook_output.log'
|
||||
' for detailed output.'),
|
||||
'Failed to delete backup for subcloud subcloud1, check individual '
|
||||
'log at /var/log/dcmanager/ansible/subcloud1_playbook_output.log '
|
||||
'for detailed output.'),
|
||||
mock.call(
|
||||
'FAILED failed playbook of (subcloud1).\ncheck individual'
|
||||
' log at /var/log/dcmanager/ansible/subcloud1_playbook_output.log'
|
||||
' for detailed output ')]
|
||||
'FAILED failed playbook of (subcloud1).\ncheck individual '
|
||||
'log at /var/log/dcmanager/ansible/subcloud1_playbook_output.log '
|
||||
'for detailed output')]
|
||||
self.mock_log.error.assert_has_calls(Calls)
|
||||
mock_create_backup_overrides_file.assert_called_once()
|
||||
mock_compose_backup_delete_command.assert_called_once()
|
||||
|
||||
@@ -26,6 +26,7 @@ formatted_modules = [
|
||||
"dcagent",
|
||||
"dcmanager/api",
|
||||
"dcmanager/audit",
|
||||
"dcmanager/common",
|
||||
]
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user