Implement dcmanager kube-rootca-update orchestration

Orchestration stages are:
 - create kube rootca update vim strategy
 - apply kube rootca update vim strategy

Audit for subclouds does not examine region one.
It looks for particular alarms in the subcloud.

--force allows a subcloud to be orchestrated even if it
is in-sync.  It also allows a group to be specified.
This behaviour is for kube rootca update only.

A future submission will allow:
 - expiry_date and subject support
 - upload-cert support

Story: 2008675
Task: 42900
Depends-On: https://review.opendev.org/c/starlingx/nfv/+/797555
Change-Id: Ib1fa846ef813f79a41ca0fe5f8b845041b9825c4
Signed-off-by: albailey <Al.Bailey@windriver.com>
This commit is contained in:
albailey
2021-06-29 15:31:01 -05:00
parent 43f138ddc2
commit 1692480b8e
32 changed files with 876 additions and 225 deletions

View File

@@ -11,7 +11,7 @@
# under the License.
#
#
# Copyright (c) 2018-2020 Wind River Systems, Inc.
# Copyright (c) 2018-2021 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
@@ -57,6 +57,38 @@ class FmClient(base.DriverBase):
raise e
return alarms
def get_alarms_by_id(self, alarm_id):
"""Get list of this region alarms for a particular alarm_id"""
try:
LOG.debug("get_alarms_by_id %s, region %s" % (alarm_id,
self.region_name))
alarms = self.fm.alarm.list(
q=fmclient.common.options.cli_to_array('alarm_id=' + alarm_id),
include_suppress=True)
except Exception as e:
LOG.error("get_alarms_by_id exception={}".format(e))
raise e
return alarms
def get_alarms_by_ids(self, alarm_id_list):
"""Get list of this region alarms for a list of alarm_ids"""
try:
LOG.debug("get_alarms_by_ids %s, region %s" % (alarm_id_list,
self.region_name))
# fm api does not support querying two alarm IDs at once so make
# multiple calls and join the list
alarms = []
for alarm_id in alarm_id_list:
alarms.extend(self.fm.alarm.list(
q=fmclient.common.options.cli_to_array(
'alarm_id=' + alarm_id),
include_suppress=True)
)
except Exception as e:
LOG.error("get_alarms_by_ids exception={}".format(e))
raise e
return alarms
def get_alarms(self):
"""Get this region alarms"""

View File

@@ -77,6 +77,31 @@ KUBE_HOST_UPGRADING_CONTROL_PLANE_FAILED = 'upgrading-control-plane-failed'
KUBE_HOST_UPGRADING_KUBELET = 'upgrading-kubelet'
KUBE_HOST_UPGRADING_KUBELET_FAILED = 'upgrading-kubelet-failed'
# Kubernetes rootca update states
KUBE_ROOTCA_UPDATE_STARTED = 'update-started'
KUBE_ROOTCA_UPDATE_CERT_UPLOADED = 'update-new-rootca-cert-uploaded'
KUBE_ROOTCA_UPDATE_CERT_GENERATED = 'update-new-rootca-cert-generated'
KUBE_ROOTCA_UPDATING_PODS_TRUSTBOTHCAS = 'updating-pods-trust-both-cas'
KUBE_ROOTCA_UPDATED_PODS_TRUSTBOTHCAS = 'updated-pods-trust-both-cas'
KUBE_ROOTCA_UPDATING_PODS_TRUSTBOTHCAS_FAILED = 'updating-pods-trust-both-cas-failed'
KUBE_ROOTCA_UPDATING_PODS_TRUSTNEWCA = 'updating-pods-trust-new-ca'
KUBE_ROOTCA_UPDATED_PODS_TRUSTNEWCA = 'updated-pods-trust-new-ca'
KUBE_ROOTCA_UPDATING_PODS_TRUSTNEWCA_FAILED = 'updating-pods-trust-new-ca-failed'
KUBE_ROOTCA_UPDATE_COMPLETED = 'update-completed'
KUBE_ROOTCA_UPDATE_ABORTED = 'update-aborted'
# Kubernetes rootca host update states
KUBE_ROOTCA_UPDATING_HOST_TRUSTBOTHCAS = 'updating-host-trust-both-cas'
KUBE_ROOTCA_UPDATED_HOST_TRUSTBOTHCAS = 'updated-host-trust-both-cas'
KUBE_ROOTCA_UPDATING_HOST_TRUSTBOTHCAS_FAILED = 'updating-host-trust-both-cas-failed'
KUBE_ROOTCA_UPDATING_HOST_UPDATECERTS = 'updating-host-update-certs'
KUBE_ROOTCA_UPDATED_HOST_UPDATECERTS = 'updated-host-update-certs'
KUBE_ROOTCA_UPDATING_HOST_UPDATECERTS_FAILED = 'updating-host-update-certs-failed'
KUBE_ROOTCA_UPDATING_HOST_TRUSTNEWCA = 'updating-host-trust-new-ca'
KUBE_ROOTCA_UPDATED_HOST_TRUSTNEWCA = 'updated-host-trust-new-ca'
KUBE_ROOTCA_UPDATING_HOST_TRUSTNEWCA_FAILED = 'updating-host-trust-new-ca-failed'
# The following is the name of the host filesystem 'scratch' which is used
# by dcmanager upgrade orchestration for the load import operations.
HOST_FS_NAME_SCRATCH = 'scratch'
@@ -681,6 +706,18 @@ class SysinvClient(base.DriverBase):
"""Get a list of device image states."""
return self.sysinv_client.device_image_state.list()
def get_kube_rootca_update(self, update_uuid):
"""Retrieve the details of a given kubernetes rootca update
:param update_uuid: kube rootca update uuid
If the update is not found, returns None
"""
return self.sysinv_client.kube_rootca_update.get(update_uuid)
def get_kube_rootca_updates(self):
"""Retrieve the kubernetes rootca updates if one is present."""
return self.sysinv_client.kube_rootca_update.list()
def get_kube_upgrade(self, kube_upgrade_uuid):
"""Retrieve the details of a given kubernetes upgrade

View File

@@ -31,6 +31,7 @@ from dccommon import exceptions
LOG = log.getLogger(__name__)
STRATEGY_NAME_FW_UPDATE = 'fw-update'
STRATEGY_NAME_KUBE_ROOTCA_UPDATE = 'kube-rootca-update'
STRATEGY_NAME_KUBE_UPGRADE = 'kube-upgrade'
STRATEGY_NAME_SW_PATCH = 'sw-patch'
STRATEGY_NAME_SW_UPGRADE = 'sw-upgrade'

View File

@@ -62,4 +62,10 @@ class NotificationsController(object):
# the next audit cycle.
context = restcomm.extract_context_from_environ()
self.audit_rpc_client.trigger_kubernetes_audit(context)
if 'kube-rootca-update-completed' in events:
# We're being notified that a kube rootca update has completed, so
# we want to trigger a kube rootca update audit of all subclouds on
# the next audit cycle.
context = restcomm.extract_context_from_environ()
self.audit_rpc_client.trigger_kube_rootca_update_audit(context)
return

View File

@@ -41,6 +41,7 @@ LOG = logging.getLogger(__name__)
SUPPORTED_STRATEGY_TYPES = [
consts.SW_UPDATE_TYPE_FIRMWARE,
consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE,
consts.SW_UPDATE_TYPE_KUBERNETES,
consts.SW_UPDATE_TYPE_PATCH,
consts.SW_UPDATE_TYPE_UPGRADE
@@ -166,10 +167,13 @@ class SwUpdateStrategyController(object):
if force_flag is not None:
if force_flag not in ["true", "false"]:
pecan.abort(400, _('force invalid'))
elif payload.get('cloud_name') is None:
pecan.abort(400, _('The --force option can only be applied for '
'a single subcloud. Please specify '
'the subcloud name.'))
elif strategy_type != consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
# kube rootca update allows force for all subclouds
if payload.get('cloud_name') is None:
pecan.abort(400,
_('The --force option can only be applied '
'for a single subcloud. Please specify '
'the subcloud name.'))
subcloud_group = payload.get('subcloud_group')
# prevents passing both cloud_name and subcloud_group options

View File

@@ -0,0 +1,50 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import abc
import six
from dcmanager.common import consts
class Auditor(object):
"""Abstract class that manages tasks related to types of audits."""
# todo(abailey): determine if add_metaclass is still required
six.add_metaclass(abc.ABCMeta)
def __init__(self, context, dcmanager_rpc_client, endpoint_type):
self.context = context
self.dcmanager_rpc_client = dcmanager_rpc_client
self.endpoint_type = endpoint_type
def _set_subcloud_sync_status(self, sc_name, sc_sync_status):
"""Update the sync status for endpoint."""
self.dcmanager_rpc_client.update_subcloud_endpoint_status(
self.context,
subcloud_name=sc_name,
endpoint_type=self.endpoint_type,
sync_status=sc_sync_status)
def set_subcloud_endpoint_in_sync(self, sc_name):
"""Set the endpoint sync status of this subcloud to be in sync"""
self._set_subcloud_sync_status(sc_name, consts.SYNC_STATUS_IN_SYNC)
def set_subcloud_endpoint_out_of_sync(self, sc_name):
"""Set the endpoint sync status of this subcloud to be out of sync"""
self._set_subcloud_sync_status(sc_name, consts.SYNC_STATUS_OUT_OF_SYNC)
@abc.abstractmethod
def get_regionone_audit_data(self):
"""Query RegionOne for audit information to compare against."""
@abc.abstractmethod
def subcloud_audit(self, subcloud_name, region_one_audit_data):
"""Query Subcloud audit information and compare with regionone data
This method is responsible for calling:
- set_sc_endpoint_in_sync
- set_sc_endpoint_out_of_sync
"""

View File

@@ -0,0 +1,79 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from keystoneauth1 import exceptions as keystone_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from fm_api.constants import FM_ALARM_ID_CERT_EXPIRED
from fm_api.constants import FM_ALARM_ID_CERT_EXPIRING_SOON
from dccommon.drivers.openstack.fm import FmClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dcorch.common import consts as dcorch_consts
from dcmanager.audit.auditor import Auditor
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
KUBE_ROOTCA_ALARM_LIST = [FM_ALARM_ID_CERT_EXPIRED,
FM_ALARM_ID_CERT_EXPIRING_SOON]
class KubeRootcaUpdateAudit(Auditor):
"""Manages tasks related to kube rootca update audits."""
def __init__(self, context, dcmanager_rpc_client):
super(KubeRootcaUpdateAudit, self).__init__(
context,
dcmanager_rpc_client,
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA
)
self.audit_type = "kube rootca update"
LOG.debug("%s audit initialized" % self.audit_type)
def get_regionone_audit_data(self):
"""Query RegionOne to determine kube rootca update information.
Kubernetes Root CA updates are considered out of sync based on
alarms in the subcloud, and not based on region one data.
:return: An empty list
"""
return []
def subcloud_audit(self, subcloud_name, region_one_audit_data):
"""Perform an audit of kube root CA update info in a subcloud.
:param subcloud_name: the name of the subcloud
:param region_one_audit_data: ignored. Always an empty list
"""
LOG.info("Triggered %s audit for subcloud:%s" % (self.audit_type,
subcloud_name))
# check for a particular alarm in the subcloud
try:
sc_os_client = OpenStackDriver(region_name=subcloud_name,
region_clients=None)
session = sc_os_client.keystone_client.session
fm_client = FmClient(subcloud_name, session)
except (keystone_exceptions.EndpointNotFound,
keystone_exceptions.ConnectFailure,
keystone_exceptions.ConnectTimeout,
IndexError):
LOG.exception("Endpoint for online subcloud:(%s) not found, skip "
"%s audit." % (subcloud_name, self.audit_type))
return
detected_alarms = fm_client.get_alarms_by_ids(KUBE_ROOTCA_ALARM_LIST)
if detected_alarms:
# todo(abailey): determine if the same alarm id is being shared
# for other certificates, and examine the list for the appropriate
# alarm if necessary
self.set_subcloud_endpoint_out_of_sync(subcloud_name)
else:
self.set_subcloud_endpoint_in_sync(subcloud_name)
LOG.info("%s audit completed for:(%s)" % (self.audit_type,
subcloud_name))

View File

@@ -62,6 +62,10 @@ class ManagerAuditClient(object):
def trigger_firmware_audit(self, ctxt):
return self.cast(ctxt, self.make_msg('trigger_firmware_audit'))
def trigger_kube_rootca_update_audit(self, ctxt):
return self.cast(ctxt,
self.make_msg('trigger_kube_rootca_update_audit'))
def trigger_kubernetes_audit(self, ctxt):
return self.cast(ctxt, self.make_msg('trigger_kubernetes_audit'))
@@ -83,6 +87,7 @@ class ManagerAuditWorkerClient(object):
1.0 - Initial version
"""
# todo(abailey): Does the RPC version need to increment
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
@@ -116,15 +121,17 @@ class ManagerAuditWorkerClient(object):
patch_audit_data=None,
firmware_audit_data=None,
kubernetes_audit_data=None,
do_openstack_audit=False):
do_openstack_audit=False,
kube_rootca_update_data=None):
"""Tell audit-worker to perform audit on the subclouds with these
subcloud IDs.
"""
return self.cast(
ctxt, self.make_msg('audit_subclouds',
subcloud_ids=subcloud_ids,
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
do_openstack_audit=do_openstack_audit))
return self.cast(ctxt, self.make_msg(
'audit_subclouds',
subcloud_ids=subcloud_ids,
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
do_openstack_audit=do_openstack_audit,
kube_rootca_update_audit_data=kube_rootca_update_data))

View File

@@ -116,6 +116,14 @@ class DCManagerAuditService(service.Service):
LOG.info("Trigger firmware audit.")
return self.subcloud_audit_manager.trigger_firmware_audit(context)
@request_context
def trigger_kube_rootca_update_audit(self, context):
"""Used to force a kube rootca update audit on the next interval"""
LOG.info("Trigger kube rootca update audit.")
return self.subcloud_audit_manager.trigger_kube_rootca_update_audit(
context)
@request_context
def trigger_kubernetes_audit(self, context):
"""Used to force a kubernetes audit on the next interval"""
@@ -205,7 +213,8 @@ class DCManagerAuditWorkerService(service.Service):
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit):
do_openstack_audit,
kube_rootca_update_audit_data):
"""Used to trigger audits of the specified subcloud(s)"""
self.subcloud_audit_worker_manager.audit_subclouds(
context,
@@ -213,4 +222,5 @@ class DCManagerAuditWorkerService(service.Service):
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit)
do_openstack_audit,
kube_rootca_update_audit_data)

View File

@@ -33,6 +33,7 @@ from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import sysinv_v1
from dcmanager.audit import firmware_audit
from dcmanager.audit import kube_rootca_update_audit
from dcmanager.audit import kubernetes_audit
from dcmanager.audit import patch_audit
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
@@ -61,6 +62,9 @@ HELM_APP_OPENSTACK = 'stx-openstack'
# Every 4 audits triggers a kubernetes audit
KUBERNETES_AUDIT_RATE = 4
# Every 4 audits triggers a kube rootca update audit
KUBE_ROOTCA_UPDATE_AUDIT_RATE = 4
class SubcloudAuditManager(manager.Manager):
"""Manages tasks related to audits."""
@@ -71,6 +75,9 @@ class SubcloudAuditManager(manager.Manager):
# Used to force firmware audit on the next interval
force_firmware_audit = False
# Used to force kube rootca update audit on the next interval
force_kube_rootca_update_audit = False
# Used to force kubernetes audit on the next interval
force_kubernetes_audit = False
@@ -93,10 +100,13 @@ class SubcloudAuditManager(manager.Manager):
self.context, None)
self.kubernetes_audit = kubernetes_audit.KubernetesAudit(
self.context, None)
self.kube_rootca_update_audit = \
kube_rootca_update_audit.KubeRootcaUpdateAudit(self.context, None)
def _add_missing_endpoints(self):
# Update this flag file based on the most recent new endpoint
file_path = os.path.join(CONFIG_PATH, '.kube_endpoint_added')
file_path = os.path.join(CONFIG_PATH,
'.kube_rootca_update_endpoint_added')
# If file exists on the controller, all the endpoints have been
# added to DB since last time an endpoint was added
if not os.path.isfile(file_path):
@@ -147,6 +157,18 @@ class SubcloudAuditManager(manager.Manager):
def reset_force_kubernetes_audit(cls):
cls.force_kubernetes_audit = False
@classmethod
def trigger_kube_rootca_update_audit(cls, context):
"""Trigger kubernetes rootca update audit at next interval.
This can be called from outside the dcmanager audit
"""
cls.force_kube_rootca_update_audit = True
@classmethod
def reset_force_kube_rootca_update_audit(cls):
cls.force_kube_rootca_update_audit = False
@classmethod
def trigger_patch_audit(cls, context):
"""Trigger patch audit at next interval.
@@ -171,6 +193,7 @@ class SubcloudAuditManager(manager.Manager):
'firmware_audit_requested': True,
'load_audit_requested': True,
'kubernetes_audit_requested': True,
'kube_rootca_update_audit_requested': True,
}
db_api.subcloud_audits_update(context, subcloud_id, values)
@@ -204,6 +227,7 @@ class SubcloudAuditManager(manager.Manager):
audit_load = False
audit_firmware = False
audit_kubernetes = False
audit_kube_rootca_updates = False
current_time = time.time()
# Determine whether to trigger a patch audit of each subcloud
if (SubcloudAuditManager.force_patch_audit or
@@ -228,6 +252,11 @@ class SubcloudAuditManager(manager.Manager):
audit_kubernetes = True
# Reset force_kubernetes_audit only when kubernetes audit has been fired
SubcloudAuditManager.reset_force_kubernetes_audit()
if (self.patch_audit_count % KUBE_ROOTCA_UPDATE_AUDIT_RATE == 1):
LOG.info("Trigger kube rootca update audit")
audit_kube_rootca_updates = True
# Reset force_kube_rootca_update_audit only if audit is fired
SubcloudAuditManager.reset_force_kubernetes_audit()
# the force_patch_audit flag is also used to evaluate audit_load
# so reset it here, even if it is not set
SubcloudAuditManager.reset_force_patch_audit()
@@ -244,13 +273,25 @@ class SubcloudAuditManager(manager.Manager):
audit_kubernetes = True
SubcloudAuditManager.reset_force_kubernetes_audit()
return audit_patch, audit_load, audit_firmware, audit_kubernetes
# Trigger a kube rootca update audit as it is changed through proxy
if SubcloudAuditManager.force_kube_rootca_update_audit:
LOG.info("Trigger kube rootca update audit")
audit_kube_rootca_updates = True
SubcloudAuditManager.reset_force_kube_rootca_update_audit()
def _get_audit_data(self, audit_patch, audit_firmware, audit_kubernetes):
return (audit_patch, audit_load, audit_firmware,
audit_kubernetes, audit_kube_rootca_updates)
def _get_audit_data(self,
audit_patch,
audit_firmware,
audit_kubernetes,
audit_kube_rootca_updates):
"""Return the patch / firmware / kubernetes audit data as needed."""
patch_audit_data = None
firmware_audit_data = None
kubernetes_audit_data = None
kube_rootca_update_audit_data = None
if audit_patch:
# Query RegionOne patches and software version
patch_audit_data = self.patch_audit.get_regionone_audit_data()
@@ -260,8 +301,12 @@ class SubcloudAuditManager(manager.Manager):
if audit_kubernetes:
# Query RegionOne kubernetes version info
kubernetes_audit_data = self.kubernetes_audit.get_regionone_audit_data()
return patch_audit_data, firmware_audit_data, kubernetes_audit_data
if audit_kube_rootca_updates:
# Query RegionOne kube rootca update info
kube_rootca_update_audit_data = \
self.kube_rootca_update_audit.get_regionone_audit_data()
return (patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data)
def _periodic_subcloud_audit_loop(self):
"""Audit availability of subclouds loop."""
@@ -278,13 +323,14 @@ class SubcloudAuditManager(manager.Manager):
update_subcloud_state = False
# Determine whether we want to trigger specialty audits.
audit_patch, audit_load, audit_firmware, audit_kubernetes = \
self._get_audits_needed()
(audit_patch, audit_load, audit_firmware,
audit_kubernetes,
audit_kube_rootca_update) = self._get_audits_needed()
# Set desired audit flags for all subclouds.
audit_utils.request_subcloud_audits(
self.context, update_subcloud_state, audit_patch, audit_load,
audit_firmware, audit_kubernetes)
audit_firmware, audit_kubernetes, audit_kube_rootca_update)
do_openstack_audit = False
@@ -350,15 +396,30 @@ class SubcloudAuditManager(manager.Manager):
LOG.debug("DB says kubernetes audit needed")
audit_kubernetes = True
break
LOG.info("Triggered subcloud audit: patch=(%s) firmware=(%s) kube=(%s)"
% (audit_patch, audit_firmware, audit_kubernetes))
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
self._get_audit_data(audit_patch, audit_firmware, audit_kubernetes)
if not audit_kube_rootca_update:
for audit in subcloud_audits:
if audit.kube_rootca_update_audit_requested:
LOG.debug("DB says kub rootca update audit needed")
audit_kube_rootca_update = True
break
LOG.info("Triggered subcloud audit: patch=(%s) firmware=(%s) "
"kube=(%s) kube-rootca=(%s)"
% (audit_patch, audit_firmware,
audit_kubernetes, audit_kube_rootca_update))
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
self._get_audit_data(audit_patch,
audit_firmware,
audit_kubernetes,
audit_kube_rootca_update)
LOG.debug("patch_audit_data: %s, "
"firmware_audit_data: %s, "
"kubernetes_audit_data: %s, " % (patch_audit_data,
firmware_audit_data,
kubernetes_audit_data))
"kubernetes_audit_data: %s, "
"kube_rootca_update_audit_data: : %s, "
% (patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data))
# We want a chunksize of at least 1 so add the number of workers.
chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) / CONF.audit_worker_workers
@@ -367,17 +428,25 @@ class SubcloudAuditManager(manager.Manager):
if len(subcloud_ids) == chunksize:
# We've gathered a batch of subclouds, send it for processing.
self.audit_worker_rpc_client.audit_subclouds(
self.context, subcloud_ids, patch_audit_data,
firmware_audit_data, kubernetes_audit_data,
do_openstack_audit)
self.context,
subcloud_ids,
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit,
kube_rootca_update_audit_data)
LOG.debug('Sent subcloud audit request message for subclouds: %s' % subcloud_ids)
subcloud_ids = []
if len(subcloud_ids) > 0:
# We've got a partial batch...send it off for processing.
self.audit_worker_rpc_client.audit_subclouds(
self.context, subcloud_ids, patch_audit_data,
firmware_audit_data, kubernetes_audit_data,
do_openstack_audit)
self.context,
subcloud_ids,
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit,
kube_rootca_update_audit_data)
LOG.debug('Sent final subcloud audit request message for subclouds: %s' % subcloud_ids)
else:
LOG.debug('Done sending audit request messages.')

View File

@@ -31,6 +31,7 @@ from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dcmanager.audit import alarm_aggregation
from dcmanager.audit import firmware_audit
from dcmanager.audit import kube_rootca_update_audit
from dcmanager.audit import kubernetes_audit
from dcmanager.audit import patch_audit
from dcmanager.audit.subcloud_audit_manager import HELM_APP_OPENSTACK
@@ -68,17 +69,27 @@ class SubcloudAuditWorkerManager(manager.Manager):
# Track workers created for each subcloud.
self.subcloud_workers = dict()
self.alarm_aggr = alarm_aggregation.AlarmAggregation(self.context)
# todo(abailey): refactor the design pattern for adding new audits
self.patch_audit = patch_audit.PatchAudit(
self.context, self.dcmanager_rpc_client)
self.firmware_audit = firmware_audit.FirmwareAudit(
self.context, self.dcmanager_rpc_client)
self.kubernetes_audit = kubernetes_audit.KubernetesAudit(
self.context, self.dcmanager_rpc_client)
self.kube_rootca_update_audit = \
kube_rootca_update_audit.KubeRootcaUpdateAudit(
self.context,
self.dcmanager_rpc_client)
self.pid = os.getpid()
def audit_subclouds(self, context, subcloud_ids, patch_audit_data,
firmware_audit_data, kubernetes_audit_data,
do_openstack_audit):
def audit_subclouds(self,
context,
subcloud_ids,
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
do_openstack_audit,
kube_rootca_update_audit_data):
"""Run audits of the specified subcloud(s)"""
LOG.debug('PID: %s, subclouds to audit: %s, do_openstack_audit: %s' %
@@ -130,6 +141,8 @@ class SubcloudAuditWorkerManager(manager.Manager):
do_load_audit)
do_firmware_audit = subcloud_audits.firmware_audit_requested
do_kubernetes_audit = subcloud_audits.kubernetes_audit_requested
do_kube_rootca_update_audit = \
subcloud_audits.kube_rootca_update_audit_requested
update_subcloud_state = subcloud_audits.state_update_requested
# Create a new greenthread for each subcloud to allow the audits
@@ -143,10 +156,12 @@ class SubcloudAuditWorkerManager(manager.Manager):
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
do_kubernetes_audit)
do_kubernetes_audit,
do_kube_rootca_update_audit)
def _update_subcloud_audit_fail_count(self, subcloud,
audit_fail_count):
@@ -268,18 +283,28 @@ class SubcloudAuditWorkerManager(manager.Manager):
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
do_kubernetes_audit):
do_kubernetes_audit,
do_kube_rootca_update_audit):
audits_done = []
# Do the actual subcloud audit.
try:
audits_done = self._audit_subcloud(
subcloud, update_subcloud_state, do_audit_openstack,
patch_audit_data, firmware_audit_data, kubernetes_audit_data,
subcloud,
update_subcloud_state,
do_audit_openstack,
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
do_patch_audit,
do_load_audit, do_firmware_audit, do_kubernetes_audit)
do_load_audit,
do_firmware_audit,
do_kubernetes_audit,
do_kube_rootca_update_audit)
except Exception:
LOG.exception("Got exception auditing subcloud: %s" % subcloud.name)
@@ -292,11 +317,19 @@ class SubcloudAuditWorkerManager(manager.Manager):
LOG.debug("PID: %s, done auditing subcloud: %s." %
(self.pid, subcloud.name))
def _audit_subcloud(self, subcloud, update_subcloud_state,
do_audit_openstack, patch_audit_data,
firmware_audit_data, kubernetes_audit_data,
do_patch_audit, do_load_audit, do_firmware_audit,
do_kubernetes_audit):
def _audit_subcloud(self,
subcloud,
update_subcloud_state,
do_audit_openstack,
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
do_kubernetes_audit,
do_kube_rootca_update_audit):
"""Audit a single subcloud."""
avail_status_current = subcloud.availability_status
@@ -420,6 +453,12 @@ class SubcloudAuditWorkerManager(manager.Manager):
subcloud_name,
kubernetes_audit_data)
audits_done.append('kubernetes')
# Perform kube rootca update audit
if do_kube_rootca_update_audit:
self.kube_rootca_update_audit.subcloud_audit(
subcloud_name,
kube_rootca_update_audit_data)
audits_done.append('kube-rootca-update')
# Audit openstack application in the subcloud
if do_audit_openstack and sysinv_client:
self._audit_subcloud_openstack_app(

View File

@@ -19,9 +19,13 @@
from dcmanager.db import api as db_api
def request_subcloud_audits(context, update_subcloud_state=False,
audit_patch=False, audit_load=False,
audit_firmware=False, audit_kubernetes=False):
def request_subcloud_audits(context,
update_subcloud_state=False,
audit_patch=False,
audit_load=False,
audit_firmware=False,
audit_kubernetes=False,
audit_kube_rootca=False):
values = {}
if update_subcloud_state:
values['state_update_requested'] = True
@@ -33,4 +37,6 @@ def request_subcloud_audits(context, update_subcloud_state=False,
values['firmware_audit_requested'] = True
if audit_kubernetes:
values['kubernetes_audit_requested'] = True
if audit_kube_rootca:
values['kube_rootca_update_audit_requested'] = True
db_api.subcloud_audits_update_all(context, values)

View File

@@ -140,6 +140,9 @@ scheduler_opts = [
cfg.IntOpt('subcloud_audit_interval',
default=30,
help='periodic time interval for subcloud audit'),
cfg.IntOpt('kube_rootca_update_audit_expiry_days',
default=90,
help='Num days remaining for a kube rootca to be out-of-sync'),
cfg.IntOpt('patch_audit_interval',
default=900,
help='default time interval for patch audit')

View File

@@ -84,6 +84,7 @@ AVAIL_FAIL_COUNT_MAX = 9999
# Software update strategy types
SW_UPDATE_TYPE_FIRMWARE = "firmware"
SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE = "kube-rootca-update"
SW_UPDATE_TYPE_KUBERNETES = "kubernetes"
SW_UPDATE_TYPE_PATCH = "patch"
SW_UPDATE_TYPE_UPGRADE = "upgrade"
@@ -165,6 +166,13 @@ STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY = \
STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY = \
"kube applying vim kube upgrade strategy"
# Kube Root CA Update orchestration states
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY = \
"creating vim kube rootca update strategy"
STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY = \
"applying vim kube rootca update strategy"
# Subcloud deploy status states
DEPLOY_STATE_NONE = 'not-deployed'
DEPLOY_STATE_PRE_DEPLOY = 'pre-deploy'

View File

@@ -205,6 +205,7 @@ def subcloud_audits_get_all_need_audit(context, last_audit_threshold):
(models.SubcloudAudits.patch_audit_requested == true()) |
(models.SubcloudAudits.firmware_audit_requested == true()) |
(models.SubcloudAudits.load_audit_requested == true()) |
(models.SubcloudAudits.kube_rootca_update_audit_requested == true()) |
(models.SubcloudAudits.kubernetes_audit_requested == true())).\
all()
return result
@@ -234,6 +235,8 @@ def subcloud_audits_end_audit(context, subcloud_id, audits_done):
subcloud_audits_ref.firmware_audit_requested = False
if 'load' in audits_done:
subcloud_audits_ref.load_audit_requested = False
if 'kube_rootca' in audits_done:
subcloud_audits_ref.kube_rootca_update_audit_requested = False
if 'kubernetes' in audits_done:
subcloud_audits_ref.kubernetes_audit_requested = False
subcloud_audits_ref.save(session)
@@ -256,6 +259,7 @@ def subcloud_audits_fix_expired_audits(context, last_audit_threshold,
values['firmware_audit_requested'] = True
values['load_audit_requested'] = True
values['kubernetes_audit_requested'] = True
values['kube_rootca_update_audit_requested'] = True
with write_session() as session:
result = session.query(models.SubcloudAudits).\
options(load_only("deleted", "audit_started_at",

View File

@@ -0,0 +1,28 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
subcloud_audits = Table('subcloud_audits', meta, autoload=True)
# Add the kube_rootca_update_audit_requested column to the audits table.
subcloud_audits.create_column(Column('kube_rootca_update_audit_requested',
Boolean,
nullable=False,
default=False,
server_default='0'))
return True
def downgrade(migrate_engine):
raise NotImplementedError('Database downgrade is unsupported.')

View File

@@ -135,6 +135,7 @@ class SubcloudAudits(BASE, DCManagerBase):
load_audit_requested = Column(Boolean, nullable=False, default=False)
firmware_audit_requested = Column(Boolean, nullable=False, default=False)
kubernetes_audit_requested = Column(Boolean, nullable=False, default=False)
kube_rootca_update_audit_requested = Column(Boolean, nullable=False, default=False)
spare_audit_requested = Column(Boolean, nullable=False, default=False)
spare2_audit_requested = Column(Boolean, nullable=False, default=False)
reserved = Column(Text)

View File

@@ -0,0 +1,35 @@
#
# Copyright (c) 2020-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dccommon.drivers.openstack import vim
from dcmanager.common import consts
from dcmanager.orchestrator.orch_thread import OrchThread
from dcmanager.orchestrator.states.kube_rootca.applying_vim_strategy \
import ApplyingVIMKubeRootcaUpdateStrategyState
from dcmanager.orchestrator.states.kube_rootca.creating_vim_strategy \
import CreatingVIMKubeRootcaUpdateStrategyState
class KubeRootcaUpdateOrchThread(OrchThread):
"""Kube RootCA Update Orchestration Thread"""
STATE_OPERATORS = {
consts.STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY:
CreatingVIMKubeRootcaUpdateStrategyState,
consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY:
ApplyingVIMKubeRootcaUpdateStrategyState,
}
def __init__(self, strategy_lock, audit_rpc_client):
super(KubeRootcaUpdateOrchThread, self).__init__(
strategy_lock,
audit_rpc_client,
consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE,
vim.STRATEGY_NAME_KUBE_ROOTCA_UPDATE,
consts.STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY)
def trigger_audit(self):
"""Trigger an audit for kube rootca update"""
self.audit_rpc_client.trigger_kube_rootca_update_audit(self.context)

View File

@@ -0,0 +1,19 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dccommon.drivers.openstack import vim
from dcmanager.common import consts
from dcmanager.orchestrator.states.applying_vim_strategy \
import ApplyingVIMStrategyState
class ApplyingVIMKubeRootcaUpdateStrategyState(ApplyingVIMStrategyState):
"""State for applying the VIM kube rootca update strategy."""
def __init__(self, region_name):
super(ApplyingVIMKubeRootcaUpdateStrategyState, self).__init__(
next_state=consts.STRATEGY_STATE_COMPLETE,
region_name=region_name,
strategy_name=vim.STRATEGY_NAME_KUBE_ROOTCA_UPDATE)

View File

@@ -0,0 +1,47 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from dccommon.drivers.openstack import vim
from dcmanager.common import consts
from dcmanager.common import utils as dcmanager_utils
from dcmanager.orchestrator.states.creating_vim_strategy \
import CreatingVIMStrategyState
class CreatingVIMKubeRootcaUpdateStrategyState(CreatingVIMStrategyState):
"""State for creating the VIM Kube Root CA Update strategy."""
def __init__(self, region_name):
next_state = \
consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
super(CreatingVIMKubeRootcaUpdateStrategyState, self).__init__(
next_state=next_state,
region_name=region_name,
strategy_name=vim.STRATEGY_NAME_KUBE_ROOTCA_UPDATE)
def _create_vim_strategy(self, strategy_step, region):
self.info_log(strategy_step,
"Creating (%s) VIM strategy" % self.strategy_name)
# Get the update options
opts_dict = dcmanager_utils.get_sw_update_opts(
self.context,
for_sw_update=True,
subcloud_id=strategy_step.subcloud_id)
# Call the API to build the VIM strategy
subcloud_strategy = self.get_vim_client(region).create_strategy(
self.strategy_name,
opts_dict['storage-apply-type'],
opts_dict['worker-apply-type'],
opts_dict['max-parallel-workers'],
opts_dict['default-instance-action'],
opts_dict['alarm-restriction-type'])
# a successful API call to create MUST set the state be 'building'
if subcloud_strategy.state != vim.STATE_BUILDING:
raise Exception("Unexpected VIM strategy build state: %s"
% subcloud_strategy.state)
return subcloud_strategy

View File

@@ -30,6 +30,8 @@ from dcmanager.common import manager
from dcmanager.common import utils
from dcmanager.db import api as db_api
from dcmanager.orchestrator.fw_update_orch_thread import FwUpdateOrchThread
from dcmanager.orchestrator.kube_rootca_update_orch_thread \
import KubeRootcaUpdateOrchThread
from dcmanager.orchestrator.kube_upgrade_orch_thread \
import KubeUpgradeOrchThread
from dcmanager.orchestrator.patch_orch_thread import PatchOrchThread
@@ -53,6 +55,7 @@ class SwUpdateManager(manager.Manager):
# Used to notify dcmanager-audit
self.audit_rpc_client = dcmanager_audit_rpc_client.ManagerAuditClient()
# todo(abailey): refactor/decouple orch threads into a list
# Start worker threads
# - patch orchestration thread
self.patch_orch_thread = PatchOrchThread(self.strategy_lock,
@@ -71,6 +74,12 @@ class SwUpdateManager(manager.Manager):
KubeUpgradeOrchThread(self.strategy_lock, self.audit_rpc_client)
self.kube_upgrade_orch_thread.start()
# - kube rootca update orchestration thread
self.kube_rootca_update_orch_thread = \
KubeRootcaUpdateOrchThread(self.strategy_lock,
self.audit_rpc_client)
self.kube_rootca_update_orch_thread.start()
def stop(self):
# Stop (and join) the worker threads
# - patch orchestration thread
@@ -85,9 +94,13 @@ class SwUpdateManager(manager.Manager):
# - kube upgrade orchestration thread
self.kube_upgrade_orch_thread.stop()
self.kube_upgrade_orch_thread.join()
# - kube rootca update orchestration thread
self.kube_rootca_update_orch_thread.stop()
self.kube_rootca_update_orch_thread.join()
def _validate_subcloud_status_sync(self, strategy_type,
subcloud_status, force):
subcloud_status, force,
availability_status):
"""Check the appropriate subcloud_status fields for the strategy_type
Returns: True if out of sync.
@@ -98,7 +111,8 @@ class SwUpdateManager(manager.Manager):
subcloud_status.sync_status ==
consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
if force:
# force option only has an effect in offline case for upgrade
if force and (availability_status != consts.AVAILABILITY_ONLINE):
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_LOAD and
subcloud_status.sync_status !=
@@ -118,6 +132,18 @@ class SwUpdateManager(manager.Manager):
dcorch_consts.ENDPOINT_TYPE_KUBERNETES and
subcloud_status.sync_status ==
consts.SYNC_STATUS_OUT_OF_SYNC)
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
if force:
# run for in-sync and out-of-sync (but not unknown)
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
subcloud_status.sync_status !=
consts.SYNC_STATUS_UNKNOWN)
else:
return (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
subcloud_status.sync_status ==
consts.SYNC_STATUS_OUT_OF_SYNC)
# Unimplemented strategy_type status check. Log an error
LOG.error("_validate_subcloud_status_sync for %s not implemented" %
strategy_type)
@@ -175,6 +201,7 @@ class SwUpdateManager(manager.Manager):
else:
max_parallel_subclouds = int(max_parallel_subclouds_str)
# todo(abailey): refactor common code in stop-on-failure and force
stop_on_failure_str = payload.get('stop-on-failure')
if not stop_on_failure_str:
@@ -195,6 +222,7 @@ class SwUpdateManager(manager.Manager):
force = False
# Has the user specified a specific subcloud?
# todo(abailey): refactor this code to use classes
cloud_name = payload.get('cloud_name')
if cloud_name and cloud_name != consts.SYSTEM_CONTROLLER_NAME:
# Make sure subcloud exists
@@ -229,6 +257,19 @@ class SwUpdateManager(manager.Manager):
resource='strategy',
msg='Subcloud %s does not require kubernetes update'
% cloud_name)
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
if force:
# force means we do not care about the status
pass
else:
subcloud_status = db_api.subcloud_status_get(
context, subcloud.id,
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA)
if subcloud_status.sync_status == consts.SYNC_STATUS_IN_SYNC:
raise exceptions.BadRequest(
resource='strategy',
msg='Subcloud %s does not require kube rootca update'
% cloud_name)
elif strategy_type == consts.SW_UPDATE_TYPE_PATCH:
# Make sure subcloud requires patching
subcloud_status = db_api.subcloud_status_get(
@@ -301,6 +342,17 @@ class SwUpdateManager(manager.Manager):
resource='strategy',
msg='Kubernetes sync status is unknown for one or more '
'subclouds')
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
if subcloud.availability_status != consts.AVAILABILITY_ONLINE:
continue
elif (subcloud_status.endpoint_type ==
dcorch_consts.ENDPOINT_TYPE_KUBE_ROOTCA and
subcloud_status.sync_status ==
consts.SYNC_STATUS_UNKNOWN):
raise exceptions.BadRequest(
resource='strategy',
msg='Kube rootca update sync status is unknown for '
'one or more subclouds')
# Create the strategy
strategy = db_api.sw_update_strategy_create(
@@ -311,9 +363,9 @@ class SwUpdateManager(manager.Manager):
stop_on_failure,
consts.SW_UPDATE_STATE_INITIAL)
# For 'upgrade' do not create a strategy step for the system controller
# For 'firmware' do not create a strategy step for system controller
# For 'patch', always create a strategy step for the system controller
# A strategy step for the system controller is not added for:
# 'upgrade', 'firmware', 'kube upgrade', 'kube rootca update'
if strategy_type == consts.SW_UPDATE_TYPE_PATCH:
db_api.strategy_step_create(
context,
@@ -324,6 +376,9 @@ class SwUpdateManager(manager.Manager):
# Create a strategy step for each subcloud that is managed, online and
# out of sync
# special cases:
# - kube rootca update: the 'force' option allows in-sync subclouds
# todo(abailey): fix the current_stage numbering
current_stage = 2
stage_size = 0
stage_updated = False
@@ -355,14 +410,13 @@ class SwUpdateManager(manager.Manager):
else:
continue
# force option only has an effect in offline case
forced_validate = force and (subcloud.availability_status !=
consts.AVAILABILITY_ONLINE)
subcloud_status = db_api.subcloud_status_get_all(context, subcloud.id)
subcloud_status = db_api.subcloud_status_get_all(context,
subcloud.id)
for status in subcloud_status:
if self._validate_subcloud_status_sync(strategy_type,
status, forced_validate):
status,
force,
subcloud.availability_status):
LOG.debug("Created for %s" % subcloud.id)
db_api.strategy_step_create(
context,

View File

@@ -398,6 +398,14 @@ class TestFirmwareAudit(base.DCManagerTestCase):
dict_results.append(result.to_dict())
return dict_results
def get_fw_audit_data(self, am):
patch_audit_data, firmware_audit_data, kubernetes_audit_data, kube_root = \
am._get_audit_data(True, True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
return firmware_audit_data
def test_init(self):
fm = firmware_audit.FirmwareAudit(self.ctxt,
self.fake_dcmanager_api)
@@ -425,12 +433,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.firmware_audit = fm
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
firmware_audit_data = self.get_fw_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
fm.subcloud_firmware_audit(name, firmware_audit_data)
@@ -462,12 +465,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.firmware_audit = fm
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
firmware_audit_data = self.get_fw_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
fm.subcloud_firmware_audit(name, firmware_audit_data)
@@ -498,12 +496,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.firmware_audit = fm
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
firmware_audit_data = self.get_fw_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
fm.subcloud_firmware_audit(name, firmware_audit_data)
@@ -534,12 +527,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.firmware_audit = fm
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
firmware_audit_data = self.get_fw_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
fm.subcloud_firmware_audit(name, firmware_audit_data)
@@ -570,12 +558,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.firmware_audit = fm
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
firmware_audit_data = self.get_fw_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
fm.subcloud_firmware_audit(name, firmware_audit_data)
@@ -606,12 +589,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.firmware_audit = fm
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
firmware_audit_data = self.get_fw_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
fm.subcloud_firmware_audit(name, firmware_audit_data)
@@ -642,12 +620,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.firmware_audit = fm
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
firmware_audit_data = self.get_fw_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
fm.subcloud_firmware_audit(name, firmware_audit_data)
@@ -678,12 +651,7 @@ class TestFirmwareAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.firmware_audit = fm
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
firmware_audit_data = self._rpc_convert(firmware_audit_data)
firmware_audit_data = self.get_fw_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
fm.subcloud_firmware_audit(name, firmware_audit_data)

View File

@@ -147,6 +147,13 @@ class TestKubernetesAudit(base.DCManagerTestCase):
dict_results.append(result.to_dict())
return dict_results
def get_kube_audit_data(self, am):
patch_audit_data, firmware_audit_data, kubernetes_audit_data, kube_rootca = \
am._get_audit_data(True, True, True, True)
# Convert to dict like what would happen calling via RPC
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
return kubernetes_audit_data
def test_init(self):
audit = kubernetes_audit.KubernetesAudit(self.ctxt,
self.fake_dcmanager_api)
@@ -162,11 +169,7 @@ class TestKubernetesAudit(base.DCManagerTestCase):
self.fake_dcmanager_api)
am = subcloud_audit_manager.SubcloudAuditManager()
am.kubernetes_audit = audit
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
kubernetes_audit_data = self.get_kube_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
audit.subcloud_kubernetes_audit(name, kubernetes_audit_data)
@@ -190,11 +193,7 @@ class TestKubernetesAudit(base.DCManagerTestCase):
self.kube_sysinv_client.get_kube_versions.return_value = [
FakeKubeVersion(version=UPGRADED_KUBE_VERSION),
]
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
kubernetes_audit_data = self.get_kube_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
# return different kube versions in the subclouds
@@ -222,10 +221,7 @@ class TestKubernetesAudit(base.DCManagerTestCase):
self.kube_sysinv_client.get_kube_versions.return_value = [
FakeKubeVersion(version=PREVIOUS_KUBE_VERSION),
]
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
kubernetes_audit_data = self.get_kube_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
# return different kube versions in the subclouds
@@ -254,10 +250,7 @@ class TestKubernetesAudit(base.DCManagerTestCase):
self.kube_sysinv_client.get_kube_versions.return_value = [
FakeKubeVersion(version=UPGRADED_KUBE_VERSION),
]
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
kubernetes_audit_data = self.get_kube_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
# return same kube versions in the subclouds
@@ -292,10 +285,7 @@ class TestKubernetesAudit(base.DCManagerTestCase):
self.kube_sysinv_client.get_kube_versions.return_value = [
FakeKubeVersion(version=UPGRADED_KUBE_VERSION),
]
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
kubernetes_audit_data = self.get_kube_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
# return same kube versions in the subclouds

View File

@@ -272,6 +272,14 @@ class TestPatchAudit(base.DCManagerTestCase):
self.mock_audit_worker_api.return_value = self.fake_audit_worker_api
self.addCleanup(p.stop)
def get_patch_audit_data(self, am):
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_data) = \
am._get_audit_data(True, True, True, True)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
return patch_audit_data
def test_init(self):
pm = patch_audit.PatchAudit(self.ctxt,
self.fake_dcmanager_api)
@@ -297,10 +305,7 @@ class TestPatchAudit(base.DCManagerTestCase):
am.patch_audit = pm
do_load_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
patch_audit_data = self.get_patch_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
pm.subcloud_patch_audit(name, patch_audit_data, do_load_audit)
@@ -334,10 +339,7 @@ class TestPatchAudit(base.DCManagerTestCase):
mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
do_load_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
patch_audit_data = self.get_patch_audit_data(am)
for name in ['subcloud1', 'subcloud2', 'subcloud3', 'subcloud4']:
pm.subcloud_patch_audit(name, patch_audit_data, do_load_audit)
@@ -398,10 +400,7 @@ class TestPatchAudit(base.DCManagerTestCase):
mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
do_load_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
patch_audit_data = self.get_patch_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
pm.subcloud_patch_audit(name, patch_audit_data, do_load_audit)
@@ -435,10 +434,7 @@ class TestPatchAudit(base.DCManagerTestCase):
mock_sysinv_client.side_effect = FakeSysinvClientOneLoadUnmatchedSoftwareVersion
do_load_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
patch_audit_data = self.get_patch_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
pm.subcloud_patch_audit(name, patch_audit_data, do_load_audit)
@@ -482,10 +478,7 @@ class TestPatchAudit(base.DCManagerTestCase):
mock_sysinv_client.side_effect = FakeSysinvClientOneLoadUpgradeInProgress
do_load_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
am._get_audit_data(True, True, True)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
patch_audit_data = self.get_patch_audit_data(am)
for name in ['subcloud1', 'subcloud2']:
pm.subcloud_patch_audit(name, patch_audit_data, do_load_audit)

View File

@@ -52,6 +52,12 @@ class FakeKubernetesAudit(object):
self.get_regionone_audit_data = mock.MagicMock()
class FakeKubeRootcaUpdateAudit(object):
def __init__(self):
self.get_regionone_audit_data = mock.MagicMock()
class FakeServiceGroup(object):
def __init__(self, status, desired_state, service_group_name, uuid,
node_name, state, condition, name):
@@ -248,6 +254,15 @@ class TestAuditManager(base.DCManagerTestCase):
self.fake_kubernetes_audit
self.addCleanup(p.stop)
# Mock kube rootca update audit
self.fake_kube_rootca_update_audit = FakeKubeRootcaUpdateAudit()
p = mock.patch.object(subcloud_audit_manager,
'kube_rootca_update_audit')
self.mock_kube_rootca_update_audit = p.start()
self.mock_kubernetes_audit.KubeRootcaUpdateAudit.return_value = \
self.fake_kube_rootca_update_audit
self.addCleanup(p.stop)
@staticmethod
def create_subcloud_static(ctxt, **kwargs):
values = {
@@ -288,6 +303,7 @@ class TestAuditManager(base.DCManagerTestCase):
self.assertEqual(result['firmware_audit_requested'], True)
self.assertEqual(result['load_audit_requested'], True)
self.assertEqual(result['kubernetes_audit_requested'], True)
self.assertEqual(result['kube_rootca_update_audit_requested'], True)
def test_trigger_load_audit(self):
subcloud = self.create_subcloud_static(self.ctx)

View File

@@ -74,6 +74,13 @@ class FakeKubernetesAudit(object):
self.get_regionone_audit_data = mock.MagicMock()
class FakeKubeRootcaUpdateAudit(object):
def __init__(self):
self.subcloud_audit = mock.MagicMock()
self.get_regionone_audit_data = mock.MagicMock()
class FakeServiceGroup(object):
def __init__(self, status, desired_state, service_group_name, uuid,
node_name, state, condition, name):
@@ -324,6 +331,23 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
self.fake_kubernetes_audit2
self.addCleanup(p.stop)
# Mock kube rootca update audit in Audit Worker and Audit Manager
self.fake_kube_rootca_update_audit = FakeKubeRootcaUpdateAudit()
p = mock.patch.object(subcloud_audit_worker_manager,
'kube_rootca_update_audit')
self.mock_kube_rootca_update_audit = p.start()
self.mock_kube_rootca_update_audit.KubeRootcaUpdateAudit.return_value = \
self.fake_kube_rootca_update_audit
self.addCleanup(p.stop)
self.fake_kube_rootca_update_audit2 = FakeKubeRootcaUpdateAudit()
p = mock.patch.object(subcloud_audit_manager,
'kube_rootca_update_audit')
self.mock_kube_rootca_update_audit2 = p.start()
self.mock_kube_rootca_update_audit2.KubeRootcaUpdateAudit.return_value = \
self.fake_kube_rootca_update_audit2
self.addCleanup(p.stop)
@staticmethod
def create_subcloud_static(ctxt, **kwargs):
values = {
@@ -364,25 +388,34 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
wm = subcloud_audit_worker_manager.SubcloudAuditWorkerManager()
# Audit the subcloud
update_subcloud_state = False
do_audit_openstack = False
do_patch_audit = True
do_load_audit = True
do_firmware_audit = True
do_kubernetes_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit)
do_kubernetes_audit,
do_kube_rootca_update_audit)
# Convert to dict like what would happen calling via RPC
# Note: the other data should also be converted...
patch_audit_data = patch_audit_data.to_dict()
wm._audit_subcloud(subcloud, update_subcloud_state=False,
do_audit_openstack=False,
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
do_kubernetes_audit=do_kubernetes_audit)
wm._audit_subcloud(subcloud,
update_subcloud_state,
do_audit_openstack,
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
do_kubernetes_audit,
do_kube_rootca_update_audit)
# Verify the subcloud was set to online
self.fake_dcmanager_api.update_subcloud_availability.assert_called_with(
@@ -414,6 +447,10 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_called_with(
subcloud.name, kubernetes_audit_data)
# Verify kube rootca update audit is called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_called_with(
subcloud.name, kube_rootca_update_audit_data)
def test_audit_subcloud_online_unmanaged(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
@@ -423,25 +460,34 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
wm = subcloud_audit_worker_manager.SubcloudAuditWorkerManager()
# Audit the subcloud
update_subcloud_state = False
do_audit_openstack = False
do_patch_audit = True
do_load_audit = True
do_firmware_audit = True
do_kubernetes_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit)
do_kubernetes_audit,
do_kube_rootca_update_audit)
# Convert to dict like what would happen calling via RPC
# Note: the other data should also be converted...
patch_audit_data = patch_audit_data.to_dict()
wm._audit_subcloud(subcloud, update_subcloud_state=False,
do_audit_openstack=False,
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
do_kubernetes_audit=do_kubernetes_audit)
wm._audit_subcloud(subcloud,
update_subcloud_state,
do_audit_openstack,
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
do_kubernetes_audit,
do_kube_rootca_update_audit)
# Verify the subcloud was set to online
self.fake_dcmanager_api.update_subcloud_availability.assert_called_with(
@@ -469,6 +515,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify kubernetes audit is not called
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_not_called()
# Verify kube rootca update audit is not called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_not_called()
def test_audit_subcloud_online_no_change(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
@@ -482,15 +531,16 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
availability_status=consts.AVAILABILITY_ONLINE)
# Audit the subcloud
wm._audit_subcloud(subcloud, update_subcloud_state=False,
do_audit_openstack=False, patch_audit_data=None,
firmware_audit_data=None,
kubernetes_audit_data=None,
kube_rootca_update_audit_data=None,
do_patch_audit=False,
do_load_audit=False,
do_firmware_audit=False,
do_kubernetes_audit=False)
do_kubernetes_audit=False,
do_kube_rootca_update_audit=False)
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
@@ -528,10 +578,12 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_audit_openstack=False, patch_audit_data=None,
firmware_audit_data=None,
kubernetes_audit_data=None,
kube_rootca_update_audit_data=None,
do_patch_audit=False,
do_load_audit=False,
do_firmware_audit=False,
do_kubernetes_audit=False)
do_kubernetes_audit=False,
do_kube_rootca_update_audit=False)
# Verify the subcloud state was updated even though no change
self.fake_dcmanager_api.update_subcloud_availability.assert_called_with(
@@ -559,6 +611,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify kubernetes audit is not called
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_not_called()
# Verify kube rootca update audit is not called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_not_called()
def test_audit_subcloud_go_offline(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
@@ -584,10 +639,13 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_load_audit = True
do_firmware_audit = True
do_kubernetes_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit)
do_kubernetes_audit,
do_kube_rootca_update_audit)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
wm._audit_subcloud(subcloud, update_subcloud_state=False,
@@ -595,10 +653,12 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
do_kubernetes_audit=do_kubernetes_audit)
do_kubernetes_audit=do_kubernetes_audit,
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
# Verify the audit fail count was updated in db
audit_fail_count = 1
@@ -620,10 +680,12 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
do_kubernetes_audit=do_kubernetes_audit)
do_kubernetes_audit=do_kubernetes_audit,
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
audit_fail_count = audit_fail_count + 1
@@ -642,14 +704,18 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
self.fake_patch_audit.subcloud_patch_audit.assert_called_once_with(
subcloud.name, mock.ANY, True)
# Verify firmware audit is called
# Verify firmware audit is only called once
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_once_with(
subcloud.name, mock.ANY)
# Verify firmware audit is called
# Verify kubernetes audit is only called once
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_called_once_with(
subcloud.name, mock.ANY)
# Verify kube rootca update audit is only called once
self.fake_kube_rootca_update_audit.subcloud_audit.assert_called_once_with(
subcloud.name, mock.ANY)
def test_audit_subcloud_offline_no_change(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
@@ -671,10 +737,13 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_load_audit = True
do_firmware_audit = True
do_kubernetes_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit)
do_kubernetes_audit,
do_kube_rootca_update_audit)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
wm._audit_subcloud(subcloud, update_subcloud_state=False,
@@ -682,10 +751,12 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
do_kubernetes_audit=do_kubernetes_audit)
do_kubernetes_audit=do_kubernetes_audit,
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
@@ -712,6 +783,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify kubernetes audit is not called
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_not_called()
# Verify kube rootca update audit is not called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_not_called()
def test_audit_subcloud_offline_update_audit_fail_count_only(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
@@ -735,10 +809,13 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_load_audit = True
do_firmware_audit = True
do_kubernetes_audit = True
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
do_kube_rootca_update_audit = True
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit)
do_kubernetes_audit,
do_kube_rootca_update_audit)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
wm._audit_subcloud(subcloud, update_subcloud_state=False,
@@ -746,10 +823,12 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
kube_rootca_update_audit_data=kube_rootca_update_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
do_kubernetes_audit=do_kubernetes_audit)
do_kubernetes_audit=do_kubernetes_audit,
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
# Verify the audit fail count was updated in the DB.
subcloud = db_api.subcloud_get(self.ctx, subcloud.id)
@@ -776,6 +855,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify kubernetes audit is not called
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_not_called()
# Verify kube rootca update audit is not called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_not_called()
@mock.patch.object(subcloud_audit_worker_manager, 'LOG')
def test_update_subcloud_audit_fail_count_subcloud_deleted(self, mock_logging):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
@@ -807,13 +889,18 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
availability_status=consts.AVAILABILITY_ONLINE)
# Audit the subcloud
wm._audit_subcloud(subcloud, update_subcloud_state=False,
do_audit_openstack=True, patch_audit_data=None,
firmware_audit_data=None,
kubernetes_audit_data=None,
do_patch_audit=False,
do_load_audit=False, do_firmware_audit=False,
do_kubernetes_audit=False)
wm._audit_subcloud(subcloud,
False, # update_subcloud_state
True, # do_audit_openstack
None, # patch_audit_data
None, # firmware_audit_data
None, # kubernetes_audit_data
None, # kube_rootca_update_audit_data
False, # do_patch_audit
False, # do_load_audit
False, # do_firmware_audit
False, # do_kubernetes_audit
False) # do_kube_rootca_audit
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
@@ -843,6 +930,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify kubernetes audit is not called
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_not_called()
# Verify kube rootca update audit is not called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_not_called()
def test_audit_subcloud_online_with_openstack_removed(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
@@ -861,13 +951,18 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
FAKE_APPLICATIONS.pop(1)
# Audit the subcloud
wm._audit_subcloud(subcloud, update_subcloud_state=False,
do_audit_openstack=True, patch_audit_data=None,
firmware_audit_data=None,
kubernetes_audit_data=None,
do_patch_audit=False,
do_load_audit=False, do_firmware_audit=False,
do_kubernetes_audit=False)
wm._audit_subcloud(subcloud,
False, # update_subcloud_state
True, # do_audit_openstack
None, # patch_audit_data
None, # firmware_audit_data
None, # kubernetes_audit_data
None, # kube_roota_update_audit_data
False, # do_patch_audit
False, # do_load_audit,
False, # do_firmware_audit
False, # do_kubernetes_audit
False) # do_kube_rootca_update_audit
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
@@ -896,6 +991,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify kubernetes audit is not called
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_not_called()
# Verify kube rootca update audit is not called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_not_called()
def test_audit_subcloud_online_with_openstack_inactive(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
@@ -914,12 +1012,18 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
FAKE_APPLICATIONS[1].active = False
# Audit the subcloud
wm._audit_subcloud(subcloud, update_subcloud_state=False,
do_audit_openstack=True, patch_audit_data=None,
firmware_audit_data=None,
kubernetes_audit_data=None, do_patch_audit=False,
do_load_audit=False, do_firmware_audit=False,
do_kubernetes_audit=False)
wm._audit_subcloud(subcloud,
False, # update_subcloud_state
True, # do_audit_openstack
None, # patch_audit_data
None, # firmware_audit_data
None, # kubernetes_audit_data
None, # kube_rootca_update_audit_data
False, # do_patch_audit
False, # do_load_audit
False, # do_firmware_audit
False, # do_kubernetes_audit
False) # do_kube_rootca_update_audit
# Verify the subcloud state was not updated
self.fake_dcmanager_api.update_subcloud_availability.\
@@ -948,6 +1052,9 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify kubernetes audit is not called
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_not_called()
# Verify kube rootca update audit is not called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_not_called()
def test_audit_subcloud_partial_subaudits(self):
subcloud = self.create_subcloud_static(self.ctx, name='subcloud1')
self.assertIsNotNone(subcloud)
@@ -965,10 +1072,13 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
do_load_audit = False
do_firmware_audit = False
do_kubernetes_audit = False
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
do_kube_rootca_audit = False
(patch_audit_data, firmware_audit_data,
kubernetes_audit_data, kube_rootca_update_audit_data) = \
am._get_audit_data(do_patch_audit,
do_firmware_audit,
do_kubernetes_audit)
do_kubernetes_audit,
do_kube_rootca_audit)
# Convert to dict like what would happen calling via RPC
patch_audit_data = patch_audit_data.to_dict()
@@ -983,15 +1093,18 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
self.assertEqual(audits.firmware_audit_requested, True)
# Do the actual audit
wm._do_audit_subcloud(subcloud, update_subcloud_state=False,
do_audit_openstack=False,
patch_audit_data=patch_audit_data,
firmware_audit_data=firmware_audit_data,
kubernetes_audit_data=kubernetes_audit_data,
do_patch_audit=do_patch_audit,
do_load_audit=do_load_audit,
do_firmware_audit=do_firmware_audit,
do_kubernetes_audit=do_kubernetes_audit)
wm._do_audit_subcloud(subcloud,
False, # update_subcloud_state
False, # do_audit_openstack
patch_audit_data,
firmware_audit_data,
kubernetes_audit_data,
kube_rootca_update_audit_data,
do_patch_audit,
do_load_audit,
do_firmware_audit,
do_kubernetes_audit,
do_kube_rootca_audit)
# Verify patch audit is called
self.fake_patch_audit.subcloud_patch_audit.assert_called_with(
@@ -1008,9 +1121,13 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
# Verify kubernetes audit is not called
self.fake_kubernetes_audit.subcloud_kubernetes_audit.assert_not_called()
# Verify kube rootca update audit is not called
self.fake_kube_rootca_update_audit.subcloud_audit.assert_not_called()
# Ensure the subaudits that didn't run are still requested
audits = db_api.subcloud_audits_get(self.ctx, subcloud.id)
self.assertEqual(audits.patch_audit_requested, False)
self.assertEqual(audits.load_audit_requested, True)
self.assertEqual(audits.firmware_audit_requested, True)
self.assertEqual(audits.kubernetes_audit_requested, True)
self.assertEqual(audits.kube_rootca_update_audit_requested, True)

View File

@@ -93,6 +93,7 @@ class DBAPISubcloudAuditsTest(base.DCManagerTestCase):
self.assertEqual(result['load_audit_requested'], False)
self.assertEqual(result['firmware_audit_requested'], False)
self.assertEqual(result['kubernetes_audit_requested'], False)
self.assertEqual(result['kube_rootca_update_audit_requested'], False)
self.assertEqual(result['spare_audit_requested'], False)
self.assertEqual(result['spare2_audit_requested'], False)
self.assertEqual(result['reserved'], None)
@@ -236,9 +237,11 @@ class DBAPISubcloudAuditsTest(base.DCManagerTestCase):
self.assertEqual(result['firmware_audit_requested'], True)
self.assertEqual(result['load_audit_requested'], True)
self.assertEqual(result['kubernetes_audit_requested'], True)
self.assertEqual(result['kube_rootca_update_audit_requested'], True)
# For the not-fixed-up audits, subaudits should not be requested.
result = db_api.subcloud_audits_get(self.ctx, 2)
self.assertEqual(result['patch_audit_requested'], False)
self.assertEqual(result['firmware_audit_requested'], False)
self.assertEqual(result['load_audit_requested'], False)
self.assertEqual(result['kubernetes_audit_requested'], False)
self.assertEqual(result['kube_rootca_update_audit_requested'], False)

View File

@@ -96,7 +96,7 @@ class TestSwUpdate(base.DCManagerTestCase):
worker = None
mock_strategy_lock = mock.Mock()
mock_dcmanager_audit_api = mock.Mock()
# There are 3 orch threads. Only one needs to be setup based on type
# There are many orch threads. Only one needs to be setup based on type
if strategy_type == consts.SW_UPDATE_TYPE_UPGRADE:
sw_update_manager.SwUpgradeOrchThread.stopped = lambda x: False
worker = \
@@ -131,7 +131,7 @@ class TestSwUpdate(base.DCManagerTestCase):
sw_update_manager.FwUpdateOrchThread(mock_strategy_lock,
mock_dcmanager_audit_api)
else:
# mock the patch orch thread
# mock the firmware orch thread
self.fake_fw_update_orch_thread = FakeOrchThread()
p = mock.patch.object(sw_update_manager, 'FwUpdateOrchThread')
self.mock_fw_update_orch_thread = p.start()
@@ -145,7 +145,7 @@ class TestSwUpdate(base.DCManagerTestCase):
mock_strategy_lock,
mock_dcmanager_audit_api)
else:
# mock the patch orch thread
# mock the kube upgrade orch thread
self.fake_kube_upgrade_orch_thread = FakeOrchThread()
p = mock.patch.object(sw_update_manager, 'KubeUpgradeOrchThread')
self.mock_kube_upgrade_orch_thread = p.start()
@@ -153,6 +153,22 @@ class TestSwUpdate(base.DCManagerTestCase):
self.fake_kube_upgrade_orch_thread
self.addCleanup(p.stop)
if strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
sw_update_manager.KubeRootcaUpdateOrchThread.stopped = \
lambda x: False
worker = sw_update_manager.KubeRootcaUpdateOrchThread(
mock_strategy_lock,
mock_dcmanager_audit_api)
else:
# mock the kube rootca update orch thread
self.fake_kube_rootca_update_orch_thread = FakeOrchThread()
p = mock.patch.object(sw_update_manager,
'KubeRootcaUpdateOrchThread')
self.mock_kube_rootca_update_orch_thread = p.start()
self.mock_kube_rootca_update_orch_thread.return_value = \
self.fake_kube_rootca_update_orch_thread
self.addCleanup(p.stop)
return worker
def setup_subcloud(self):

View File

@@ -453,6 +453,13 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.fake_kube_upgrade_orch_thread
self.addCleanup(p.stop)
self.fake_kube_rootca_update_orch_thread = FakeOrchThread()
p = mock.patch.object(sw_update_manager, 'KubeRootcaUpdateOrchThread')
self.mock_kube_rootca_update_orch_thread = p.start()
self.mock_kube_rootca_update_orch_thread.return_value = \
self.fake_kube_rootca_update_orch_thread
self.addCleanup(p.stop)
# Mock the dcmanager audit API
self.fake_dcmanager_audit_api = FakeDCManagerAuditAPI()
p = mock.patch('dcmanager.audit.rpcapi.ManagerAuditClient')

View File

@@ -130,6 +130,7 @@ ENDPOINT_TYPE_LOAD = "load"
ENDPOINT_TYPE_DC_CERT = 'dc-cert'
ENDPOINT_TYPE_FIRMWARE = 'firmware'
ENDPOINT_TYPE_KUBERNETES = 'kubernetes'
ENDPOINT_TYPE_KUBE_ROOTCA = 'kube-rootca'
# All endpoint types
ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
@@ -138,7 +139,8 @@ ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_LOAD,
ENDPOINT_TYPE_DC_CERT,
ENDPOINT_TYPE_FIRMWARE,
ENDPOINT_TYPE_KUBERNETES]
ENDPOINT_TYPE_KUBERNETES,
ENDPOINT_TYPE_KUBE_ROOTCA]
# Dcorch sync endpoint types
SYNC_ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,

View File

@@ -77,7 +77,7 @@ output-format=text
files-output=no
# Tells whether to display a full report or only the messages
reports=yes
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which