Create a software audit
Today, the software audit is being conducted alongside the patch audit (load/patching). However, because the software audit uses a different RestAPI, any failures in it also result in patch audit failures. This commit creates a new software audit to separate it from the patch audit. Note that the patch audit will be removed later on. TEST PLAN: PASS: Manage a subcloud and get all audits in-sync. - Software, load, and patching audits working properly. PASS: Test with Software RestAPI disabled/broken. - Load and patching audits working properly - Software audit unknown status. Story: 2010676 Task: 49598 Change-Id: I38c00bfdf4d86d56e1e656892f7de32206755865 Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
parent
23e2cf4a85
commit
01d689f0f9
@ -107,12 +107,16 @@ ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
|
|||||||
ENDPOINT_TYPE_SOFTWARE]
|
ENDPOINT_TYPE_SOFTWARE]
|
||||||
|
|
||||||
# All endpoint audit requests
|
# All endpoint audit requests
|
||||||
|
# TODO(nicodemos): The ENDPOINT_TYPE_SOFTWARE will use the 'spare_audit_requested'
|
||||||
|
# temporarily until the USM feature is fully complete. Afterward, the software audit
|
||||||
|
# will replace the patch audit.
|
||||||
ENDPOINT_AUDIT_REQUESTS = {
|
ENDPOINT_AUDIT_REQUESTS = {
|
||||||
ENDPOINT_TYPE_FIRMWARE: 'firmware_audit_requested',
|
ENDPOINT_TYPE_FIRMWARE: 'firmware_audit_requested',
|
||||||
ENDPOINT_TYPE_KUBERNETES: 'kubernetes_audit_requested',
|
ENDPOINT_TYPE_KUBERNETES: 'kubernetes_audit_requested',
|
||||||
ENDPOINT_TYPE_KUBE_ROOTCA: 'kube_rootca_update_audit_requested',
|
ENDPOINT_TYPE_KUBE_ROOTCA: 'kube_rootca_update_audit_requested',
|
||||||
ENDPOINT_TYPE_LOAD: 'load_audit_requested',
|
ENDPOINT_TYPE_LOAD: 'load_audit_requested',
|
||||||
ENDPOINT_TYPE_PATCHING: 'patch_audit_requested',
|
ENDPOINT_TYPE_PATCHING: 'patch_audit_requested',
|
||||||
|
ENDPOINT_TYPE_SOFTWARE: 'spare_audit_requested',
|
||||||
}
|
}
|
||||||
|
|
||||||
# Well known region names
|
# Well known region names
|
||||||
|
@ -21,8 +21,6 @@ from dccommon import consts as dccommon_consts
|
|||||||
from dccommon.drivers.openstack import patching_v1
|
from dccommon.drivers.openstack import patching_v1
|
||||||
from dccommon.drivers.openstack.patching_v1 import PatchingClient
|
from dccommon.drivers.openstack.patching_v1 import PatchingClient
|
||||||
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
|
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
|
||||||
from dccommon.drivers.openstack import software_v1
|
|
||||||
from dccommon.drivers.openstack.software_v1 import SoftwareClient
|
|
||||||
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
||||||
from dcmanager.common import utils
|
from dcmanager.common import utils
|
||||||
|
|
||||||
@ -53,27 +51,6 @@ class PatchAuditData(object):
|
|||||||
return cls(**values)
|
return cls(**values)
|
||||||
|
|
||||||
|
|
||||||
class SoftwareAuditData(object):
|
|
||||||
def __init__(self, releases, deployed_release_ids,
|
|
||||||
committed_release_ids):
|
|
||||||
self.releases = releases
|
|
||||||
self.deployed_release_ids = deployed_release_ids
|
|
||||||
self.committed_release_ids = committed_release_ids
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
return {
|
|
||||||
'releases': self.releases,
|
|
||||||
'deployed_release_ids': self.deployed_release_ids,
|
|
||||||
'committed_release_ids': self.committed_release_ids,
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_dict(cls, values):
|
|
||||||
if values is None:
|
|
||||||
return None
|
|
||||||
return cls(**values)
|
|
||||||
|
|
||||||
|
|
||||||
class PatchAudit(object):
|
class PatchAudit(object):
|
||||||
"""Manages tasks related to patch audits."""
|
"""Manages tasks related to patch audits."""
|
||||||
|
|
||||||
@ -102,45 +79,6 @@ class PatchAudit(object):
|
|||||||
sysinv_client.region_name)
|
sysinv_client.region_name)
|
||||||
return upgrades
|
return upgrades
|
||||||
|
|
||||||
def get_software_regionone_audit_data(self):
|
|
||||||
"""Query RegionOne to determine what releases should be deployed
|
|
||||||
|
|
||||||
to the system as well as the current software version
|
|
||||||
|
|
||||||
:return: A new SoftwareAuditData object
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
m_os_ks_client = OpenStackDriver(
|
|
||||||
region_name=dccommon_consts.DEFAULT_REGION_NAME,
|
|
||||||
region_clients=None).keystone_client
|
|
||||||
software_endpoint = m_os_ks_client.endpoint_cache.get_endpoint(
|
|
||||||
dccommon_consts.ENDPOINT_TYPE_SOFTWARE)
|
|
||||||
software_client = SoftwareClient(dccommon_consts.DEFAULT_REGION_NAME,
|
|
||||||
m_os_ks_client.session,
|
|
||||||
endpoint=software_endpoint)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception('Failure initializing OS Client, skip software audit.')
|
|
||||||
return None
|
|
||||||
# First query RegionOne to determine what releases should be deployed
|
|
||||||
# to the system.
|
|
||||||
regionone_releases = software_client.query()
|
|
||||||
LOG.debug("regionone_releases: %s" % regionone_releases)
|
|
||||||
# Build lists of releases that should be deployed or committed in all
|
|
||||||
# subclouds, based on their state in RegionOne.
|
|
||||||
deployed_release_ids = list()
|
|
||||||
committed_release_ids = list()
|
|
||||||
for release_id in regionone_releases.keys():
|
|
||||||
if regionone_releases[release_id]['state'] == \
|
|
||||||
software_v1.DEPLOYED:
|
|
||||||
deployed_release_ids.append(release_id)
|
|
||||||
elif regionone_releases[release_id]['state'] == \
|
|
||||||
software_v1.COMMITTED:
|
|
||||||
committed_release_ids.append(release_id)
|
|
||||||
LOG.debug("RegionOne deployed_release_ids: %s" % deployed_release_ids)
|
|
||||||
LOG.debug("RegionOne committed_release_ids: %s" % committed_release_ids)
|
|
||||||
return SoftwareAuditData(regionone_releases, deployed_release_ids,
|
|
||||||
committed_release_ids)
|
|
||||||
|
|
||||||
def get_regionone_audit_data(self):
|
def get_regionone_audit_data(self):
|
||||||
"""Query RegionOne to determine what patches should be applied
|
"""Query RegionOne to determine what patches should be applied
|
||||||
|
|
||||||
@ -194,104 +132,6 @@ class PatchAudit(object):
|
|||||||
return PatchAuditData(regionone_patches, applied_patch_ids,
|
return PatchAuditData(regionone_patches, applied_patch_ids,
|
||||||
committed_patch_ids, regionone_software_version)
|
committed_patch_ids, regionone_software_version)
|
||||||
|
|
||||||
def subcloud_audit(
|
|
||||||
self, subcloud_name, subcloud_region, audit_data, software_audit_data,
|
|
||||||
do_load_audit
|
|
||||||
):
|
|
||||||
if software_audit_data:
|
|
||||||
self.subcloud_software_audit(
|
|
||||||
subcloud_name, subcloud_region, software_audit_data
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.subcloud_patch_audit(subcloud_name, subcloud_region, audit_data,
|
|
||||||
do_load_audit)
|
|
||||||
|
|
||||||
def subcloud_software_audit(self, subcloud_name, subcloud_region, audit_data):
|
|
||||||
LOG.info('Triggered software audit for: %s.' % subcloud_name)
|
|
||||||
try:
|
|
||||||
sc_os_client = OpenStackDriver(region_name=subcloud_region,
|
|
||||||
region_clients=None).keystone_client
|
|
||||||
session = sc_os_client.session
|
|
||||||
software_endpoint = sc_os_client.endpoint_cache. \
|
|
||||||
get_endpoint(dccommon_consts.ENDPOINT_TYPE_SOFTWARE)
|
|
||||||
software_client = SoftwareClient(
|
|
||||||
subcloud_region, session,
|
|
||||||
endpoint=software_endpoint)
|
|
||||||
except (keystone_exceptions.EndpointNotFound,
|
|
||||||
keystone_exceptions.ConnectFailure,
|
|
||||||
keystone_exceptions.ConnectTimeout,
|
|
||||||
IndexError):
|
|
||||||
LOG.exception("Endpoint for online subcloud %s not found, skip "
|
|
||||||
"software audit." % subcloud_name)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Retrieve all the releases that are present in this subcloud.
|
|
||||||
try:
|
|
||||||
subcloud_releases = software_client.query()
|
|
||||||
LOG.debug("Releases for subcloud %s: %s" %
|
|
||||||
(subcloud_name, subcloud_releases))
|
|
||||||
except Exception:
|
|
||||||
LOG.warn('Cannot retrieve releases for subcloud: %s, skip software '
|
|
||||||
'audit' % subcloud_name)
|
|
||||||
return
|
|
||||||
|
|
||||||
out_of_sync = False
|
|
||||||
|
|
||||||
# audit_data will be a dict due to passing through RPC so objectify it
|
|
||||||
audit_data = SoftwareAuditData.from_dict(audit_data)
|
|
||||||
|
|
||||||
# Check that all releases in this subcloud are in the correct
|
|
||||||
# state, based on the state of the release in RegionOne. For the
|
|
||||||
# subcloud.
|
|
||||||
for release_id in subcloud_releases.keys():
|
|
||||||
if subcloud_releases[release_id]['state'] == \
|
|
||||||
software_v1.DEPLOYED:
|
|
||||||
if release_id not in audit_data.deployed_release_ids:
|
|
||||||
if release_id not in audit_data.committed_release_ids:
|
|
||||||
LOG.debug("Release %s should not be deployed in %s" %
|
|
||||||
(release_id, subcloud_name))
|
|
||||||
else:
|
|
||||||
LOG.debug("Release %s should be committed in %s" %
|
|
||||||
(release_id, subcloud_name))
|
|
||||||
out_of_sync = True
|
|
||||||
elif subcloud_releases[release_id]['state'] == \
|
|
||||||
software_v1.COMMITTED:
|
|
||||||
if (release_id not in audit_data.committed_release_ids and
|
|
||||||
release_id not in audit_data.deployed_release_ids):
|
|
||||||
LOG.warn("Release %s should not be committed in %s" %
|
|
||||||
(release_id, subcloud_name))
|
|
||||||
out_of_sync = True
|
|
||||||
else:
|
|
||||||
# In steady state, all releases should either be deployed
|
|
||||||
# or committed in each subcloud. Release in other
|
|
||||||
# states mean a sync is required.
|
|
||||||
out_of_sync = True
|
|
||||||
|
|
||||||
# Check that all deployed or committed releases in RegionOne are
|
|
||||||
# present in the subcloud.
|
|
||||||
for release_id in audit_data.deployed_release_ids:
|
|
||||||
if release_id not in subcloud_releases:
|
|
||||||
LOG.debug("Release %s missing from %s" %
|
|
||||||
(release_id, subcloud_name))
|
|
||||||
out_of_sync = True
|
|
||||||
for release_id in audit_data.committed_release_ids:
|
|
||||||
if release_id not in subcloud_releases:
|
|
||||||
LOG.debug("Release %s missing from %s" %
|
|
||||||
(release_id, subcloud_name))
|
|
||||||
out_of_sync = True
|
|
||||||
|
|
||||||
if out_of_sync:
|
|
||||||
self._update_subcloud_sync_status(
|
|
||||||
subcloud_name,
|
|
||||||
subcloud_region, dccommon_consts.ENDPOINT_TYPE_SOFTWARE,
|
|
||||||
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
|
||||||
else:
|
|
||||||
self._update_subcloud_sync_status(
|
|
||||||
subcloud_name,
|
|
||||||
subcloud_region, dccommon_consts.ENDPOINT_TYPE_SOFTWARE,
|
|
||||||
dccommon_consts.SYNC_STATUS_IN_SYNC)
|
|
||||||
LOG.info('Software audit completed for: %s.' % subcloud_name)
|
|
||||||
|
|
||||||
def subcloud_patch_audit(self, subcloud_name, subcloud_region, audit_data,
|
def subcloud_patch_audit(self, subcloud_name, subcloud_region, audit_data,
|
||||||
do_load_audit):
|
do_load_audit):
|
||||||
LOG.info('Triggered patch audit for: %s.' % subcloud_name)
|
LOG.info('Triggered patch audit for: %s.' % subcloud_name)
|
||||||
|
@ -71,6 +71,9 @@ class ManagerAuditClient(object):
|
|||||||
def trigger_load_audit(self, ctxt):
|
def trigger_load_audit(self, ctxt):
|
||||||
return self.cast(ctxt, self.make_msg('trigger_load_audit'))
|
return self.cast(ctxt, self.make_msg('trigger_load_audit'))
|
||||||
|
|
||||||
|
def trigger_software_audit(self, ctxt):
|
||||||
|
return self.cast(ctxt, self.make_msg('trigger_software_audit'))
|
||||||
|
|
||||||
def trigger_subcloud_audits(self, ctxt, subcloud_id, exclude_endpoints=None):
|
def trigger_subcloud_audits(self, ctxt, subcloud_id, exclude_endpoints=None):
|
||||||
return self.cast(ctxt, self.make_msg('trigger_subcloud_audits',
|
return self.cast(ctxt, self.make_msg('trigger_subcloud_audits',
|
||||||
subcloud_id=subcloud_id,
|
subcloud_id=subcloud_id,
|
||||||
|
@ -14,12 +14,12 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
import six
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
from oslo_service import service
|
from oslo_service import service
|
||||||
|
import six
|
||||||
|
|
||||||
from dcmanager.audit.subcloud_audit_manager import SubcloudAuditManager
|
from dcmanager.audit.subcloud_audit_manager import SubcloudAuditManager
|
||||||
from dcmanager.audit.subcloud_audit_worker_manager import SubcloudAuditWorkerManager
|
from dcmanager.audit.subcloud_audit_worker_manager import SubcloudAuditWorkerManager
|
||||||
@ -67,9 +67,9 @@ class DCManagerAuditService(service.Service):
|
|||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
||||||
target = oslo_messaging.Target(version=self.rpc_api_version,
|
target = oslo_messaging.Target(
|
||||||
server=self.host,
|
version=self.rpc_api_version, server=self.host, topic=self.topic
|
||||||
topic=self.topic)
|
)
|
||||||
self.target = target
|
self.target = target
|
||||||
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
||||||
self._rpc_server.start()
|
self._rpc_server.start()
|
||||||
@ -93,10 +93,9 @@ class DCManagerAuditService(service.Service):
|
|||||||
try:
|
try:
|
||||||
self._rpc_server.stop()
|
self._rpc_server.stop()
|
||||||
self._rpc_server.wait()
|
self._rpc_server.wait()
|
||||||
LOG.info('Engine service stopped successfully')
|
LOG.info("Engine service stopped successfully")
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error('Failed to stop engine service: %s',
|
LOG.error("Failed to stop engine service: %s", six.text_type(ex))
|
||||||
six.text_type(ex))
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self._stop_rpc_server()
|
self._stop_rpc_server()
|
||||||
@ -120,8 +119,7 @@ class DCManagerAuditService(service.Service):
|
|||||||
"""Used to force a kube rootca update audit on the next interval"""
|
"""Used to force a kube rootca update audit on the next interval"""
|
||||||
|
|
||||||
LOG.info("Trigger kube rootca update audit.")
|
LOG.info("Trigger kube rootca update audit.")
|
||||||
return self.subcloud_audit_manager.trigger_kube_rootca_update_audit(
|
return self.subcloud_audit_manager.trigger_kube_rootca_update_audit(context)
|
||||||
context)
|
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def trigger_kubernetes_audit(self, context):
|
def trigger_kubernetes_audit(self, context):
|
||||||
@ -144,27 +142,39 @@ class DCManagerAuditService(service.Service):
|
|||||||
LOG.info("Trigger load audit.")
|
LOG.info("Trigger load audit.")
|
||||||
return self.subcloud_audit_manager.trigger_load_audit(context)
|
return self.subcloud_audit_manager.trigger_load_audit(context)
|
||||||
|
|
||||||
|
@request_context
|
||||||
|
def trigger_software_audit(self, context):
|
||||||
|
"""Used to force a software audit on the next interval"""
|
||||||
|
|
||||||
|
LOG.info("Trigger software audit.")
|
||||||
|
return self.subcloud_audit_manager.trigger_software_audit(context)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def trigger_subcloud_audits(self, context, subcloud_id, exclude_endpoints):
|
def trigger_subcloud_audits(self, context, subcloud_id, exclude_endpoints):
|
||||||
"""Trigger all subcloud audits for one subcloud."""
|
"""Trigger all subcloud audits for one subcloud."""
|
||||||
LOG.info("Trigger all audits for subcloud %s except endpoints %s" %
|
LOG.info(
|
||||||
(subcloud_id, exclude_endpoints))
|
"Trigger all audits for subcloud %s except endpoints %s"
|
||||||
|
% (subcloud_id, exclude_endpoints)
|
||||||
|
)
|
||||||
return self.subcloud_audit_manager.trigger_subcloud_audits(
|
return self.subcloud_audit_manager.trigger_subcloud_audits(
|
||||||
context, subcloud_id, exclude_endpoints)
|
context, subcloud_id, exclude_endpoints
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def trigger_subcloud_patch_load_audits(self, context, subcloud_id):
|
def trigger_subcloud_patch_load_audits(self, context, subcloud_id):
|
||||||
"""Trigger patch and load audits for one subcloud."""
|
"""Trigger patch and load audits for one subcloud."""
|
||||||
LOG.info("Trigger patch and load audits for subcloud %s", subcloud_id)
|
LOG.info("Trigger patch and load audits for subcloud %s", subcloud_id)
|
||||||
return self.subcloud_audit_manager.trigger_subcloud_patch_load_audits(
|
return self.subcloud_audit_manager.trigger_subcloud_patch_load_audits(
|
||||||
context, subcloud_id)
|
context, subcloud_id
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def trigger_subcloud_endpoints_update(self, context, subcloud_name, endpoints):
|
def trigger_subcloud_endpoints_update(self, context, subcloud_name, endpoints):
|
||||||
"""Trigger update endpoints of services for a subcloud region."""
|
"""Trigger update endpoints of services for a subcloud region."""
|
||||||
LOG.info("Trigger update endpoints for subcloud %s", subcloud_name)
|
LOG.info("Trigger update endpoints for subcloud %s", subcloud_name)
|
||||||
return self.subcloud_audit_manager.trigger_subcloud_endpoints_update(
|
return self.subcloud_audit_manager.trigger_subcloud_endpoints_update(
|
||||||
context, subcloud_name, endpoints)
|
context, subcloud_name, endpoints
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class DCManagerAuditWorkerService(service.Service):
|
class DCManagerAuditWorkerService(service.Service):
|
||||||
@ -187,9 +197,9 @@ class DCManagerAuditWorkerService(service.Service):
|
|||||||
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
||||||
self.init_tgm()
|
self.init_tgm()
|
||||||
self.init_audit_managers()
|
self.init_audit_managers()
|
||||||
target = oslo_messaging.Target(version=self.rpc_api_version,
|
target = oslo_messaging.Target(
|
||||||
server=self.host,
|
version=self.rpc_api_version, server=self.host, topic=self.topic
|
||||||
topic=self.topic)
|
)
|
||||||
self.target = target
|
self.target = target
|
||||||
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
||||||
self._rpc_server.start()
|
self._rpc_server.start()
|
||||||
@ -207,10 +217,11 @@ class DCManagerAuditWorkerService(service.Service):
|
|||||||
try:
|
try:
|
||||||
self._rpc_server.stop()
|
self._rpc_server.stop()
|
||||||
self._rpc_server.wait()
|
self._rpc_server.wait()
|
||||||
LOG.info('Audit-worker RPC service stopped successfully')
|
LOG.info("Audit-worker RPC service stopped successfully")
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error('Failed to stop audit-worker RPC service: %s',
|
LOG.error(
|
||||||
six.text_type(ex))
|
"Failed to stop audit-worker RPC service: %s", six.text_type(ex)
|
||||||
|
)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self._stop_rpc_server()
|
self._stop_rpc_server()
|
||||||
@ -223,7 +234,8 @@ class DCManagerAuditWorkerService(service.Service):
|
|||||||
super(DCManagerAuditWorkerService, self).stop()
|
super(DCManagerAuditWorkerService, self).stop()
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def audit_subclouds(self,
|
def audit_subclouds(
|
||||||
|
self,
|
||||||
context,
|
context,
|
||||||
subcloud_ids,
|
subcloud_ids,
|
||||||
patch_audit_data,
|
patch_audit_data,
|
||||||
@ -231,7 +243,8 @@ class DCManagerAuditWorkerService(service.Service):
|
|||||||
kubernetes_audit_data,
|
kubernetes_audit_data,
|
||||||
do_openstack_audit,
|
do_openstack_audit,
|
||||||
kube_rootca_update_audit_data,
|
kube_rootca_update_audit_data,
|
||||||
software_audit_data):
|
software_audit_data,
|
||||||
|
):
|
||||||
"""Used to trigger audits of the specified subcloud(s)"""
|
"""Used to trigger audits of the specified subcloud(s)"""
|
||||||
self.subcloud_audit_worker_manager.audit_subclouds(
|
self.subcloud_audit_worker_manager.audit_subclouds(
|
||||||
context,
|
context,
|
||||||
@ -241,13 +254,12 @@ class DCManagerAuditWorkerService(service.Service):
|
|||||||
kubernetes_audit_data,
|
kubernetes_audit_data,
|
||||||
do_openstack_audit,
|
do_openstack_audit,
|
||||||
kube_rootca_update_audit_data,
|
kube_rootca_update_audit_data,
|
||||||
software_audit_data)
|
software_audit_data,
|
||||||
|
)
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def update_subcloud_endpoints(self, context, subcloud_name, endpoints):
|
def update_subcloud_endpoints(self, context, subcloud_name, endpoints):
|
||||||
"""Update endpoints of services for a subcloud region"""
|
"""Update endpoints of services for a subcloud region"""
|
||||||
self.subcloud_audit_worker_manager.update_subcloud_endpoints(
|
self.subcloud_audit_worker_manager.update_subcloud_endpoints(
|
||||||
context,
|
context, subcloud_name, endpoints
|
||||||
subcloud_name,
|
|
||||||
endpoints
|
|
||||||
)
|
)
|
||||||
|
209
distributedcloud/dcmanager/audit/software_audit.py
Normal file
209
distributedcloud/dcmanager/audit/software_audit.py
Normal file
@ -0,0 +1,209 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
from keystoneauth1 import exceptions as keystone_exceptions
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
from dccommon import consts as dccommon_consts
|
||||||
|
from dccommon.drivers.openstack import sdk_platform
|
||||||
|
from dccommon.drivers.openstack import software_v1
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SoftwareAuditData(object):
|
||||||
|
def __init__(self, releases, deployed_release_ids, committed_release_ids):
|
||||||
|
self.releases = releases
|
||||||
|
self.deployed_release_ids = deployed_release_ids
|
||||||
|
self.committed_release_ids = committed_release_ids
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
"releases": self.releases,
|
||||||
|
"deployed_release_ids": self.deployed_release_ids,
|
||||||
|
"committed_release_ids": self.committed_release_ids,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, values):
|
||||||
|
if values is None:
|
||||||
|
return None
|
||||||
|
return cls(**values)
|
||||||
|
|
||||||
|
|
||||||
|
class SoftwareAudit(object):
|
||||||
|
"""Manages tasks related to software audits."""
|
||||||
|
|
||||||
|
def __init__(self, context, dcmanager_state_rpc_client):
|
||||||
|
LOG.debug("SoftwareAudit initialization...")
|
||||||
|
self.context = context
|
||||||
|
self.state_rpc_client = dcmanager_state_rpc_client
|
||||||
|
self.audit_count = 0
|
||||||
|
|
||||||
|
def _update_subcloud_sync_status(
|
||||||
|
self, sc_name, sc_region, sc_endpoint_type, sc_status
|
||||||
|
):
|
||||||
|
self.state_rpc_client.update_subcloud_endpoint_status(
|
||||||
|
self.context,
|
||||||
|
subcloud_name=sc_name,
|
||||||
|
subcloud_region=sc_region,
|
||||||
|
endpoint_type=sc_endpoint_type,
|
||||||
|
sync_status=sc_status,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_upgrades(sysinv_client):
|
||||||
|
upgrades = None
|
||||||
|
try:
|
||||||
|
upgrades = sysinv_client.get_upgrades()
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(
|
||||||
|
"Cannot retrieve upgrade info for "
|
||||||
|
f"subcloud: {sysinv_client.region_name}"
|
||||||
|
)
|
||||||
|
return upgrades
|
||||||
|
|
||||||
|
def get_regionone_audit_data(self):
|
||||||
|
"""Query RegionOne to determine what releases should be deployed
|
||||||
|
|
||||||
|
to the system as well as the current software version
|
||||||
|
|
||||||
|
:return: A new SoftwareAuditData object
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
m_os_ks_client = sdk_platform.OpenStackDriver(
|
||||||
|
region_name=dccommon_consts.DEFAULT_REGION_NAME, region_clients=None
|
||||||
|
).keystone_client
|
||||||
|
software_endpoint = m_os_ks_client.endpoint_cache.get_endpoint(
|
||||||
|
dccommon_consts.ENDPOINT_TYPE_SOFTWARE
|
||||||
|
)
|
||||||
|
software_client = software_v1.SoftwareClient(
|
||||||
|
dccommon_consts.DEFAULT_REGION_NAME,
|
||||||
|
m_os_ks_client.session,
|
||||||
|
endpoint=software_endpoint,
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Failure initializing OS Client, skip software audit.")
|
||||||
|
return None
|
||||||
|
# First query RegionOne to determine what releases should be deployed
|
||||||
|
# to the system.
|
||||||
|
regionone_releases = software_client.query()
|
||||||
|
LOG.debug(f"regionone_releases: {regionone_releases}")
|
||||||
|
# Build lists of releases that should be deployed or committed in all
|
||||||
|
# subclouds, based on their state in RegionOne.
|
||||||
|
deployed_release_ids = list()
|
||||||
|
committed_release_ids = list()
|
||||||
|
for release_id in regionone_releases.keys():
|
||||||
|
if regionone_releases[release_id]["state"] == software_v1.DEPLOYED:
|
||||||
|
deployed_release_ids.append(release_id)
|
||||||
|
elif regionone_releases[release_id]["state"] == software_v1.COMMITTED:
|
||||||
|
committed_release_ids.append(release_id)
|
||||||
|
LOG.debug(f"RegionOne deployed_release_ids: {deployed_release_ids}")
|
||||||
|
LOG.debug(f"RegionOne committed_release_ids: {committed_release_ids}")
|
||||||
|
return SoftwareAuditData(
|
||||||
|
regionone_releases, deployed_release_ids, committed_release_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
def subcloud_software_audit(self, subcloud_name, subcloud_region, audit_data):
|
||||||
|
LOG.info(f"Triggered software audit for: {subcloud_name}.")
|
||||||
|
try:
|
||||||
|
sc_os_client = sdk_platform.OpenStackDriver(
|
||||||
|
region_name=subcloud_region, region_clients=None
|
||||||
|
).keystone_client
|
||||||
|
session = sc_os_client.session
|
||||||
|
software_endpoint = sc_os_client.endpoint_cache.get_endpoint(
|
||||||
|
dccommon_consts.ENDPOINT_TYPE_SOFTWARE
|
||||||
|
)
|
||||||
|
software_client = software_v1.SoftwareClient(
|
||||||
|
subcloud_region, session, endpoint=software_endpoint
|
||||||
|
)
|
||||||
|
except (
|
||||||
|
keystone_exceptions.EndpointNotFound,
|
||||||
|
keystone_exceptions.ConnectFailure,
|
||||||
|
keystone_exceptions.ConnectTimeout,
|
||||||
|
IndexError,
|
||||||
|
):
|
||||||
|
LOG.exception(
|
||||||
|
f"Endpoint for online subcloud {subcloud_name} not found, skip "
|
||||||
|
"software audit."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Retrieve all the releases that are present in this subcloud.
|
||||||
|
try:
|
||||||
|
subcloud_releases = software_client.query()
|
||||||
|
LOG.debug(f"Releases for subcloud {subcloud_name}: {subcloud_releases}")
|
||||||
|
except Exception:
|
||||||
|
LOG.warn(
|
||||||
|
f"Cannot retrieve releases for subcloud: {subcloud_name}, "
|
||||||
|
"skip software audit."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
out_of_sync = False
|
||||||
|
|
||||||
|
# audit_data will be a dict due to passing through RPC so objectify it
|
||||||
|
audit_data = SoftwareAuditData.from_dict(audit_data)
|
||||||
|
|
||||||
|
# Check that all releases in this subcloud are in the correct
|
||||||
|
# state, based on the state of the release in RegionOne. For the
|
||||||
|
# subcloud.
|
||||||
|
for release_id in subcloud_releases.keys():
|
||||||
|
if subcloud_releases[release_id]["state"] == software_v1.DEPLOYED:
|
||||||
|
if release_id not in audit_data.deployed_release_ids:
|
||||||
|
if release_id not in audit_data.committed_release_ids:
|
||||||
|
LOG.debug(
|
||||||
|
f"Release {release_id} should not be deployed "
|
||||||
|
f"in {subcloud_name}."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
LOG.debug(
|
||||||
|
f"Release {release_id} should be committed "
|
||||||
|
f"in {subcloud_name}."
|
||||||
|
)
|
||||||
|
out_of_sync = True
|
||||||
|
elif subcloud_releases[release_id]["state"] == software_v1.COMMITTED:
|
||||||
|
if (
|
||||||
|
release_id not in audit_data.committed_release_ids
|
||||||
|
and release_id not in audit_data.deployed_release_ids
|
||||||
|
):
|
||||||
|
LOG.warn(
|
||||||
|
f"Release {release_id} should not be committed "
|
||||||
|
f"in {subcloud_name}."
|
||||||
|
)
|
||||||
|
out_of_sync = True
|
||||||
|
else:
|
||||||
|
# In steady state, all releases should either be deployed
|
||||||
|
# or committed in each subcloud. Release in other
|
||||||
|
# states mean a sync is required.
|
||||||
|
out_of_sync = True
|
||||||
|
|
||||||
|
# Check that all deployed or committed releases in RegionOne are
|
||||||
|
# present in the subcloud.
|
||||||
|
for release_id in audit_data.deployed_release_ids:
|
||||||
|
if release_id not in subcloud_releases:
|
||||||
|
LOG.debug(f"Release {release_id} missing from {subcloud_name}.")
|
||||||
|
out_of_sync = True
|
||||||
|
for release_id in audit_data.committed_release_ids:
|
||||||
|
if release_id not in subcloud_releases:
|
||||||
|
LOG.debug(f"Release {release_id} missing from {subcloud_name}.")
|
||||||
|
out_of_sync = True
|
||||||
|
|
||||||
|
if out_of_sync:
|
||||||
|
self._update_subcloud_sync_status(
|
||||||
|
subcloud_name,
|
||||||
|
subcloud_region,
|
||||||
|
dccommon_consts.ENDPOINT_TYPE_SOFTWARE,
|
||||||
|
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._update_subcloud_sync_status(
|
||||||
|
subcloud_name,
|
||||||
|
subcloud_region,
|
||||||
|
dccommon_consts.ENDPOINT_TYPE_SOFTWARE,
|
||||||
|
dccommon_consts.SYNC_STATUS_IN_SYNC,
|
||||||
|
)
|
||||||
|
LOG.info(f"Software audit completed for: {subcloud_name}.")
|
@ -30,6 +30,7 @@ from dcmanager.audit import kube_rootca_update_audit
|
|||||||
from dcmanager.audit import kubernetes_audit
|
from dcmanager.audit import kubernetes_audit
|
||||||
from dcmanager.audit import patch_audit
|
from dcmanager.audit import patch_audit
|
||||||
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
|
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
|
||||||
|
from dcmanager.audit import software_audit
|
||||||
from dcmanager.audit import utils as audit_utils
|
from dcmanager.audit import utils as audit_utils
|
||||||
from dcmanager.common import context
|
from dcmanager.common import context
|
||||||
from dcmanager.common.i18n import _
|
from dcmanager.common.i18n import _
|
||||||
@ -41,15 +42,16 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
# We will update the state of each subcloud in the dcorch about once per hour.
|
# We will update the state of each subcloud in the dcorch about once per hour.
|
||||||
# Calculate how many iterations that will be.
|
# Calculate how many iterations that will be.
|
||||||
SUBCLOUD_STATE_UPDATE_ITERATIONS = \
|
SUBCLOUD_STATE_UPDATE_ITERATIONS = (
|
||||||
dccommon_consts.SECONDS_IN_HOUR // CONF.scheduler.subcloud_audit_interval
|
dccommon_consts.SECONDS_IN_HOUR // CONF.scheduler.subcloud_audit_interval
|
||||||
|
)
|
||||||
|
|
||||||
# Patch audit normally happens every CONF.scheduler.patch_audit_interval
|
# Patch audit normally happens every CONF.scheduler.patch_audit_interval
|
||||||
# seconds, but can be forced to happen on the next audit interval by calling
|
# seconds, but can be forced to happen on the next audit interval by calling
|
||||||
# trigger_patch_audit.
|
# trigger_patch_audit.
|
||||||
|
|
||||||
# Name of starlingx openstack helm application
|
# Name of starlingx openstack helm application
|
||||||
HELM_APP_OPENSTACK = 'openstack'
|
HELM_APP_OPENSTACK = "openstack"
|
||||||
|
|
||||||
# Every 4 audits triggers a kubernetes audit
|
# Every 4 audits triggers a kubernetes audit
|
||||||
KUBERNETES_AUDIT_RATE = 4
|
KUBERNETES_AUDIT_RATE = 4
|
||||||
@ -73,36 +75,36 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
# Used to force kubernetes audit on the next interval
|
# Used to force kubernetes audit on the next interval
|
||||||
force_kubernetes_audit = False
|
force_kubernetes_audit = False
|
||||||
|
|
||||||
|
# Used to force patch audit on the next interval
|
||||||
|
force_software_audit = False
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
LOG.debug(_('SubcloudAuditManager initialization...'))
|
LOG.debug(_("SubcloudAuditManager initialization..."))
|
||||||
|
|
||||||
super(SubcloudAuditManager, self).__init__(
|
super(SubcloudAuditManager, self).__init__(
|
||||||
service_name="subcloud_audit_manager")
|
service_name="subcloud_audit_manager"
|
||||||
|
)
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
self.audit_worker_rpc_client = (
|
self.audit_worker_rpc_client = (
|
||||||
dcmanager_audit_rpc_client.ManagerAuditWorkerClient())
|
dcmanager_audit_rpc_client.ManagerAuditWorkerClient()
|
||||||
|
)
|
||||||
# Number of audits since last subcloud state update
|
# Number of audits since last subcloud state update
|
||||||
self.audit_count = SUBCLOUD_STATE_UPDATE_ITERATIONS - 2
|
self.audit_count = SUBCLOUD_STATE_UPDATE_ITERATIONS - 2
|
||||||
|
self.patch_audit = patch_audit.PatchAudit(self.context, None)
|
||||||
# Number of patch audits
|
# Number of patch audits
|
||||||
self.patch_audit_count = 0
|
self.patch_audit_count = 0
|
||||||
self.patch_audit = patch_audit.PatchAudit(
|
|
||||||
self.context, None)
|
|
||||||
# trigger a patch audit on startup
|
# trigger a patch audit on startup
|
||||||
self.patch_audit_time = 0
|
self.patch_audit_time = 0
|
||||||
self.firmware_audit = firmware_audit.FirmwareAudit(
|
self.firmware_audit = firmware_audit.FirmwareAudit(self.context, None)
|
||||||
self.context, None)
|
self.kubernetes_audit = kubernetes_audit.KubernetesAudit(self.context, None)
|
||||||
self.kubernetes_audit = kubernetes_audit.KubernetesAudit(
|
self.kube_rootca_update_audit = (
|
||||||
self.context, None)
|
|
||||||
self.kube_rootca_update_audit = \
|
|
||||||
kube_rootca_update_audit.KubeRootcaUpdateAudit(self.context, None)
|
kube_rootca_update_audit.KubeRootcaUpdateAudit(self.context, None)
|
||||||
|
)
|
||||||
|
self.software_audit = software_audit.SoftwareAudit(self.context, None)
|
||||||
|
|
||||||
def _add_missing_endpoints(self):
|
def _add_missing_endpoints(self):
|
||||||
# Update this flag file based on the most recent new endpoint
|
# Update this flag file based on the most recent new endpoint
|
||||||
file_path_list = [
|
file_path = os.path.join(CONFIG_PATH, ".kube_rootca_update_endpoint_added")
|
||||||
os.path.join(CONFIG_PATH, '.kube_rootca_update_endpoint_added'),
|
|
||||||
os.path.join(CONFIG_PATH, '.usm_endpoint_added')
|
|
||||||
]
|
|
||||||
for file_path in file_path_list:
|
|
||||||
# If file exists on the controller, all the endpoints have been
|
# If file exists on the controller, all the endpoints have been
|
||||||
# added to DB since last time an endpoint was added
|
# added to DB since last time an endpoint was added
|
||||||
if not os.path.isfile(file_path):
|
if not os.path.isfile(file_path):
|
||||||
@ -111,7 +113,8 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
# in endpoint_status table
|
# in endpoint_status table
|
||||||
for subcloud in db_api.subcloud_get_all(self.context):
|
for subcloud in db_api.subcloud_get_all(self.context):
|
||||||
subcloud_statuses = db_api.subcloud_status_get_all(
|
subcloud_statuses = db_api.subcloud_status_get_all(
|
||||||
self.context, subcloud.id)
|
self.context, subcloud.id
|
||||||
|
)
|
||||||
# Use set difference to find missing endpoints
|
# Use set difference to find missing endpoints
|
||||||
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST)
|
endpoint_type_set = set(dccommon_consts.ENDPOINT_TYPES_LIST)
|
||||||
subcloud_set = set()
|
subcloud_set = set()
|
||||||
@ -121,12 +124,12 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
missing_endpoints = list(endpoint_type_set - subcloud_set)
|
missing_endpoints = list(endpoint_type_set - subcloud_set)
|
||||||
|
|
||||||
for endpoint in missing_endpoints:
|
for endpoint in missing_endpoints:
|
||||||
db_api.subcloud_status_create(self.context,
|
db_api.subcloud_status_create(
|
||||||
subcloud.id,
|
self.context, subcloud.id, endpoint
|
||||||
endpoint)
|
)
|
||||||
# Add a flag on a replicated filesystem to avoid re-running
|
# Add a flag on a replicated filesystem to avoid re-running
|
||||||
# the DB checks for missing subcloud endpoints
|
# the DB checks for missing subcloud endpoints
|
||||||
open(file_path, 'w').close()
|
open(file_path, "w").close()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def trigger_firmware_audit(cls, context):
|
def trigger_firmware_audit(cls, context):
|
||||||
@ -181,21 +184,35 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
def reset_force_patch_audit(cls):
|
def reset_force_patch_audit(cls):
|
||||||
cls.force_patch_audit = False
|
cls.force_patch_audit = False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def trigger_software_audit(cls, context):
|
||||||
|
"""Trigger software audit at next interval.
|
||||||
|
|
||||||
|
This can be called from outside the dcmanager audit
|
||||||
|
"""
|
||||||
|
cls.force_software_audit = True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def reset_software_audit(cls):
|
||||||
|
cls.force_software_audit = False
|
||||||
|
|
||||||
def trigger_subcloud_audits(self, context, subcloud_id, exclude_endpoints):
|
def trigger_subcloud_audits(self, context, subcloud_id, exclude_endpoints):
|
||||||
"""Trigger all subcloud audits for one subcloud."""
|
"""Trigger all subcloud audits for one subcloud."""
|
||||||
values = {
|
values = {
|
||||||
'patch_audit_requested': True,
|
"patch_audit_requested": True,
|
||||||
'firmware_audit_requested': True,
|
"firmware_audit_requested": True,
|
||||||
'load_audit_requested': True,
|
"load_audit_requested": True,
|
||||||
'kubernetes_audit_requested': True,
|
"kubernetes_audit_requested": True,
|
||||||
'kube_rootca_update_audit_requested': True,
|
"kube_rootca_update_audit_requested": True,
|
||||||
|
"spare_audit_requested": True,
|
||||||
}
|
}
|
||||||
# For the endpoints excluded in the audit, set it to False in db
|
# For the endpoints excluded in the audit, set it to False in db
|
||||||
# to disable the audit explicitly.
|
# to disable the audit explicitly.
|
||||||
if exclude_endpoints:
|
if exclude_endpoints:
|
||||||
for exclude_endpoint in exclude_endpoints:
|
for exclude_endpoint in exclude_endpoints:
|
||||||
exclude_request = dccommon_consts.ENDPOINT_AUDIT_REQUESTS.get(
|
exclude_request = dccommon_consts.ENDPOINT_AUDIT_REQUESTS.get(
|
||||||
exclude_endpoint)
|
exclude_endpoint
|
||||||
|
)
|
||||||
if exclude_request:
|
if exclude_request:
|
||||||
values.update({exclude_request: False})
|
values.update({exclude_request: False})
|
||||||
db_api.subcloud_audits_update(context, subcloud_id, values)
|
db_api.subcloud_audits_update(context, subcloud_id, values)
|
||||||
@ -211,7 +228,8 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
def trigger_subcloud_endpoints_update(self, context, subcloud_name, endpoints):
|
def trigger_subcloud_endpoints_update(self, context, subcloud_name, endpoints):
|
||||||
"""Trigger update endpoints of services for a subcloud region."""
|
"""Trigger update endpoints of services for a subcloud region."""
|
||||||
self.audit_worker_rpc_client.update_subcloud_endpoints(
|
self.audit_worker_rpc_client.update_subcloud_endpoints(
|
||||||
context, subcloud_name, endpoints)
|
context, subcloud_name, endpoints
|
||||||
|
)
|
||||||
|
|
||||||
def periodic_subcloud_audit(self):
|
def periodic_subcloud_audit(self):
|
||||||
"""Audit availability of subclouds."""
|
"""Audit availability of subclouds."""
|
||||||
@ -223,13 +241,13 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
# audit them and request all sub-audits.
|
# audit them and request all sub-audits.
|
||||||
# (This is for swact and process restart.)
|
# (This is for swact and process restart.)
|
||||||
db_api.subcloud_audits_fix_expired_audits(
|
db_api.subcloud_audits_fix_expired_audits(
|
||||||
self.context, datetime.datetime.utcnow(), trigger_audits=True)
|
self.context, datetime.datetime.utcnow(), trigger_audits=True
|
||||||
|
)
|
||||||
# Blanket catch all exceptions in the audit so that the audit
|
# Blanket catch all exceptions in the audit so that the audit
|
||||||
# does not die.
|
# does not die.
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
eventlet.greenthread.sleep(
|
eventlet.greenthread.sleep(CONF.scheduler.subcloud_audit_interval)
|
||||||
CONF.scheduler.subcloud_audit_interval)
|
|
||||||
self._periodic_subcloud_audit_loop()
|
self._periodic_subcloud_audit_loop()
|
||||||
except eventlet.greenlet.GreenletExit:
|
except eventlet.greenlet.GreenletExit:
|
||||||
# We have been told to exit
|
# We have been told to exit
|
||||||
@ -244,20 +262,30 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
audit_firmware = False
|
audit_firmware = False
|
||||||
audit_kubernetes = False
|
audit_kubernetes = False
|
||||||
audit_kube_rootca_updates = False
|
audit_kube_rootca_updates = False
|
||||||
|
audit_software = False
|
||||||
current_time = time.time()
|
current_time = time.time()
|
||||||
|
|
||||||
# Determine whether to trigger a patch audit of each subcloud
|
# Determine whether to trigger a patch audit of each subcloud
|
||||||
if (SubcloudAuditManager.force_patch_audit or
|
if SubcloudAuditManager.force_patch_audit or (
|
||||||
(current_time - self.patch_audit_time >=
|
current_time - self.patch_audit_time
|
||||||
CONF.scheduler.patch_audit_interval)):
|
>= CONF.scheduler.patch_audit_interval
|
||||||
|
):
|
||||||
LOG.info("Trigger patch audit")
|
LOG.info("Trigger patch audit")
|
||||||
audit_patch = True
|
audit_patch = True
|
||||||
self.patch_audit_time = current_time
|
self.patch_audit_time = current_time
|
||||||
self.patch_audit_count += 1
|
self.patch_audit_count += 1
|
||||||
# Check subcloud software version every other patch audit cycle
|
# Check subcloud software version every other patch audit cycle
|
||||||
if (self.patch_audit_count % 2 != 0 or
|
if (
|
||||||
SubcloudAuditManager.force_patch_audit):
|
self.patch_audit_count % 2 != 0
|
||||||
|
or SubcloudAuditManager.force_patch_audit
|
||||||
|
):
|
||||||
LOG.info("Trigger load audit")
|
LOG.info("Trigger load audit")
|
||||||
audit_load = True
|
audit_load = True
|
||||||
|
if self.patch_audit_count % 2 != 0:
|
||||||
|
LOG.info("Trigger software audit")
|
||||||
|
audit_software = True
|
||||||
|
# Reset force_software_audit only when software audit has been
|
||||||
|
SubcloudAuditManager.reset_software_audit()
|
||||||
if self.patch_audit_count % 4 == 1:
|
if self.patch_audit_count % 4 == 1:
|
||||||
LOG.info("Trigger firmware audit")
|
LOG.info("Trigger firmware audit")
|
||||||
audit_firmware = True
|
audit_firmware = True
|
||||||
@ -296,31 +324,47 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
audit_kube_rootca_updates = True
|
audit_kube_rootca_updates = True
|
||||||
SubcloudAuditManager.reset_force_kube_rootca_update_audit()
|
SubcloudAuditManager.reset_force_kube_rootca_update_audit()
|
||||||
|
|
||||||
return (audit_patch, audit_load, audit_firmware,
|
# Trigger a software audit as it is changed through proxy
|
||||||
audit_kubernetes, audit_kube_rootca_updates)
|
if SubcloudAuditManager.force_software_audit:
|
||||||
|
LOG.info("Trigger software audit")
|
||||||
|
audit_software = True
|
||||||
|
SubcloudAuditManager.reset_software_audit()
|
||||||
|
|
||||||
def _get_audit_data(self,
|
return (
|
||||||
|
audit_patch,
|
||||||
|
audit_load,
|
||||||
|
audit_firmware,
|
||||||
|
audit_kubernetes,
|
||||||
|
audit_kube_rootca_updates,
|
||||||
|
audit_software,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_audit_data(
|
||||||
|
self,
|
||||||
audit_patch,
|
audit_patch,
|
||||||
audit_firmware,
|
audit_firmware,
|
||||||
audit_kubernetes,
|
audit_kubernetes,
|
||||||
audit_kube_rootca_updates):
|
audit_kube_rootca_updates,
|
||||||
|
audit_software,
|
||||||
|
):
|
||||||
"""Return the patch / firmware / kubernetes audit data as needed."""
|
"""Return the patch / firmware / kubernetes audit data as needed."""
|
||||||
patch_audit_data = None
|
patch_audit_data = None
|
||||||
software_audit_data = None
|
software_audit_data = None
|
||||||
firmware_audit_data = None
|
firmware_audit_data = None
|
||||||
kubernetes_audit_data = None
|
kubernetes_audit_data = None
|
||||||
kube_rootca_update_audit_data = None
|
kube_rootca_update_audit_data = None
|
||||||
|
software_audit_data = None
|
||||||
|
|
||||||
# TODO(nicodemos): After the integration with VIM the patch audit and patch
|
# TODO(nicodemos): After the integration with VIM the patch audit and patch
|
||||||
# orchestration will be removed from the dcmanager. The audit_patch will
|
# orchestration will be removed from the dcmanager. The audit_patch will
|
||||||
# be substituted by the software_audit. The software_audit will be
|
# be substituted by the software_audit. The software_audit will be
|
||||||
# responsible for the patch and load audit.
|
# responsible for the patch and load audit.
|
||||||
if audit_patch:
|
if audit_patch:
|
||||||
# Query RegionOne releases
|
|
||||||
software_audit_data = (
|
|
||||||
self.patch_audit.get_software_regionone_audit_data())
|
|
||||||
# Query RegionOne patches and software version
|
# Query RegionOne patches and software version
|
||||||
patch_audit_data = self.patch_audit.get_regionone_audit_data()
|
patch_audit_data = self.patch_audit.get_regionone_audit_data()
|
||||||
|
if audit_software:
|
||||||
|
# Query RegionOne releases
|
||||||
|
software_audit_data = self.patch_audit.get_regionone_audit_data()
|
||||||
if audit_firmware:
|
if audit_firmware:
|
||||||
# Query RegionOne firmware
|
# Query RegionOne firmware
|
||||||
firmware_audit_data = self.firmware_audit.get_regionone_audit_data()
|
firmware_audit_data = self.firmware_audit.get_regionone_audit_data()
|
||||||
@ -329,17 +373,22 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
kubernetes_audit_data = self.kubernetes_audit.get_regionone_audit_data()
|
kubernetes_audit_data = self.kubernetes_audit.get_regionone_audit_data()
|
||||||
if audit_kube_rootca_updates:
|
if audit_kube_rootca_updates:
|
||||||
# Query RegionOne kube rootca update info
|
# Query RegionOne kube rootca update info
|
||||||
kube_rootca_update_audit_data = \
|
kube_rootca_update_audit_data = (
|
||||||
self.kube_rootca_update_audit.get_regionone_audit_data()
|
self.kube_rootca_update_audit.get_regionone_audit_data()
|
||||||
return (patch_audit_data, firmware_audit_data,
|
)
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
return (
|
||||||
software_audit_data)
|
patch_audit_data,
|
||||||
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
)
|
||||||
|
|
||||||
def _periodic_subcloud_audit_loop(self):
|
def _periodic_subcloud_audit_loop(self):
|
||||||
"""Audit availability of subclouds loop."""
|
"""Audit availability of subclouds loop."""
|
||||||
|
|
||||||
# We will be running in our own green thread here.
|
# We will be running in our own green thread here.
|
||||||
LOG.debug('Triggered subcloud audit.')
|
LOG.debug("Triggered subcloud audit.")
|
||||||
self.audit_count += 1
|
self.audit_count += 1
|
||||||
|
|
||||||
# Determine whether to trigger a state update to each subcloud.
|
# Determine whether to trigger a state update to each subcloud.
|
||||||
@ -350,14 +399,26 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
update_subcloud_state = False
|
update_subcloud_state = False
|
||||||
|
|
||||||
# Determine whether we want to trigger specialty audits.
|
# Determine whether we want to trigger specialty audits.
|
||||||
(audit_patch, audit_load, audit_firmware,
|
(
|
||||||
|
audit_patch,
|
||||||
|
audit_load,
|
||||||
|
audit_firmware,
|
||||||
audit_kubernetes,
|
audit_kubernetes,
|
||||||
audit_kube_rootca_update) = self._get_audits_needed()
|
audit_kube_rootca_update,
|
||||||
|
audit_software,
|
||||||
|
) = self._get_audits_needed()
|
||||||
|
|
||||||
# Set desired audit flags for all subclouds.
|
# Set desired audit flags for all subclouds.
|
||||||
audit_utils.request_subcloud_audits(
|
audit_utils.request_subcloud_audits(
|
||||||
self.context, update_subcloud_state, audit_patch, audit_load,
|
self.context,
|
||||||
audit_firmware, audit_kubernetes, audit_kube_rootca_update)
|
update_subcloud_state,
|
||||||
|
audit_patch,
|
||||||
|
audit_load,
|
||||||
|
audit_firmware,
|
||||||
|
audit_kubernetes,
|
||||||
|
audit_kube_rootca_update,
|
||||||
|
audit_software,
|
||||||
|
)
|
||||||
|
|
||||||
do_openstack_audit = False
|
do_openstack_audit = False
|
||||||
|
|
||||||
@ -379,28 +440,35 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
|
|
||||||
current_time = datetime.datetime.utcnow()
|
current_time = datetime.datetime.utcnow()
|
||||||
last_audit_threshold = current_time - datetime.timedelta(
|
last_audit_threshold = current_time - datetime.timedelta(
|
||||||
seconds=CONF.scheduler.subcloud_audit_interval)
|
seconds=CONF.scheduler.subcloud_audit_interval
|
||||||
|
)
|
||||||
# The sysinv and patching subcloud REST API timeouts are 600 sec,
|
# The sysinv and patching subcloud REST API timeouts are 600 sec,
|
||||||
# and we need to be greater than that, so lets go with that plus
|
# and we need to be greater than that, so lets go with that plus
|
||||||
# an extra audit interval.
|
# an extra audit interval.
|
||||||
last_audit_fixup_threshold = current_time - datetime.timedelta(
|
last_audit_fixup_threshold = current_time - datetime.timedelta(
|
||||||
seconds=(sysinv_v1.SYSINV_CLIENT_REST_DEFAULT_TIMEOUT +
|
seconds=(
|
||||||
CONF.scheduler.subcloud_audit_interval))
|
sysinv_v1.SYSINV_CLIENT_REST_DEFAULT_TIMEOUT
|
||||||
|
+ CONF.scheduler.subcloud_audit_interval
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# Fix up any stale audit timestamps for subclouds that started an
|
# Fix up any stale audit timestamps for subclouds that started an
|
||||||
# audit but never finished it.
|
# audit but never finished it.
|
||||||
start = datetime.datetime.utcnow()
|
start = datetime.datetime.utcnow()
|
||||||
num_fixed = db_api.subcloud_audits_fix_expired_audits(
|
num_fixed = db_api.subcloud_audits_fix_expired_audits(
|
||||||
self.context, last_audit_fixup_threshold)
|
self.context, last_audit_fixup_threshold
|
||||||
|
)
|
||||||
end = datetime.datetime.utcnow()
|
end = datetime.datetime.utcnow()
|
||||||
if num_fixed > 0:
|
if num_fixed > 0:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
'Fixed up subcloud audit timestamp for %s subclouds.' % num_fixed)
|
"Fixed up subcloud audit timestamp for %s subclouds." % num_fixed
|
||||||
LOG.info('Fixup took %s seconds' % (end - start))
|
)
|
||||||
|
LOG.info("Fixup took %s seconds" % (end - start))
|
||||||
|
|
||||||
subcloud_ids = []
|
subcloud_ids = []
|
||||||
subcloud_audits = db_api.subcloud_audits_get_all_need_audit(
|
subcloud_audits = db_api.subcloud_audits_get_all_need_audit(
|
||||||
self.context, last_audit_threshold)
|
self.context, last_audit_threshold
|
||||||
|
)
|
||||||
|
|
||||||
# Now check whether any of these subclouds need patch audit or firmware
|
# Now check whether any of these subclouds need patch audit or firmware
|
||||||
# audit data and grab it if needed.
|
# audit data and grab it if needed.
|
||||||
@ -430,29 +498,55 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
LOG.debug("DB says kube-rootca-update audit needed")
|
LOG.debug("DB says kube-rootca-update audit needed")
|
||||||
audit_kube_rootca_update = True
|
audit_kube_rootca_update = True
|
||||||
break
|
break
|
||||||
LOG.info("Triggered subcloud audit: patch=(%s) firmware=(%s) "
|
if not audit_software:
|
||||||
"kube=(%s) kube-rootca=(%s)"
|
for audit in subcloud_audits:
|
||||||
% (audit_patch, audit_firmware,
|
if audit.spare_audit_requested:
|
||||||
audit_kubernetes, audit_kube_rootca_update))
|
LOG.debug("DB says software audit needed")
|
||||||
(patch_audit_data, firmware_audit_data,
|
audit_software = True
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
break
|
||||||
software_audit_data) = \
|
LOG.info(
|
||||||
self._get_audit_data(audit_patch,
|
"Triggered subcloud audit: patch=(%s) firmware=(%s) "
|
||||||
|
"kube=(%s) kube-rootca=(%s) software=(%s)"
|
||||||
|
% (
|
||||||
|
audit_patch,
|
||||||
audit_firmware,
|
audit_firmware,
|
||||||
audit_kubernetes,
|
audit_kubernetes,
|
||||||
audit_kube_rootca_update)
|
audit_kube_rootca_update,
|
||||||
LOG.debug("patch_audit_data: %s, "
|
audit_software,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
(
|
||||||
|
patch_audit_data,
|
||||||
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = self._get_audit_data(
|
||||||
|
audit_patch,
|
||||||
|
audit_firmware,
|
||||||
|
audit_kubernetes,
|
||||||
|
audit_kube_rootca_update,
|
||||||
|
audit_software,
|
||||||
|
)
|
||||||
|
LOG.debug(
|
||||||
|
"patch_audit_data: %s, "
|
||||||
"firmware_audit_data: %s, "
|
"firmware_audit_data: %s, "
|
||||||
"kubernetes_audit_data: %s, "
|
"kubernetes_audit_data: %s, "
|
||||||
"kube_rootca_update_audit_data: : %s, "
|
"kube_rootca_update_audit_data: : %s, "
|
||||||
% (patch_audit_data,
|
"software_audit_data: %s"
|
||||||
|
% (
|
||||||
|
patch_audit_data,
|
||||||
firmware_audit_data,
|
firmware_audit_data,
|
||||||
kubernetes_audit_data,
|
kubernetes_audit_data,
|
||||||
kube_rootca_update_audit_data))
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# We want a chunksize of at least 1 so add the number of workers.
|
# We want a chunksize of at least 1 so add the number of workers.
|
||||||
chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) // (
|
chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) // (
|
||||||
CONF.audit_worker_workers)
|
CONF.audit_worker_workers
|
||||||
|
)
|
||||||
for audit in subcloud_audits:
|
for audit in subcloud_audits:
|
||||||
subcloud_ids.append(audit.subcloud_id)
|
subcloud_ids.append(audit.subcloud_id)
|
||||||
if len(subcloud_ids) == chunksize:
|
if len(subcloud_ids) == chunksize:
|
||||||
@ -465,10 +559,11 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
kubernetes_audit_data,
|
kubernetes_audit_data,
|
||||||
do_openstack_audit,
|
do_openstack_audit,
|
||||||
kube_rootca_update_audit_data,
|
kube_rootca_update_audit_data,
|
||||||
software_audit_data)
|
software_audit_data,
|
||||||
|
)
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
'Sent subcloud audit request message for subclouds: %s' %
|
"Sent subcloud audit request message for subclouds: %s"
|
||||||
subcloud_ids
|
% subcloud_ids
|
||||||
)
|
)
|
||||||
subcloud_ids = []
|
subcloud_ids = []
|
||||||
if len(subcloud_ids) > 0:
|
if len(subcloud_ids) > 0:
|
||||||
@ -481,10 +576,11 @@ class SubcloudAuditManager(manager.Manager):
|
|||||||
kubernetes_audit_data,
|
kubernetes_audit_data,
|
||||||
do_openstack_audit,
|
do_openstack_audit,
|
||||||
kube_rootca_update_audit_data,
|
kube_rootca_update_audit_data,
|
||||||
software_audit_data)
|
software_audit_data,
|
||||||
|
)
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
'Sent final subcloud audit request message for subclouds: %s' %
|
"Sent final subcloud audit request message for subclouds: %s"
|
||||||
subcloud_ids
|
% subcloud_ids
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
LOG.debug('Done sending audit request messages.')
|
LOG.debug("Done sending audit request messages.")
|
||||||
|
@ -27,6 +27,7 @@ from dcmanager.audit import firmware_audit
|
|||||||
from dcmanager.audit import kube_rootca_update_audit
|
from dcmanager.audit import kube_rootca_update_audit
|
||||||
from dcmanager.audit import kubernetes_audit
|
from dcmanager.audit import kubernetes_audit
|
||||||
from dcmanager.audit import patch_audit
|
from dcmanager.audit import patch_audit
|
||||||
|
from dcmanager.audit import software_audit
|
||||||
from dcmanager.audit.subcloud_audit_manager import HELM_APP_OPENSTACK
|
from dcmanager.audit.subcloud_audit_manager import HELM_APP_OPENSTACK
|
||||||
from dcmanager.common import consts
|
from dcmanager.common import consts
|
||||||
from dcmanager.common import context
|
from dcmanager.common import context
|
||||||
@ -70,10 +71,13 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
|||||||
self.context, self.state_rpc_client)
|
self.context, self.state_rpc_client)
|
||||||
self.kubernetes_audit = kubernetes_audit.KubernetesAudit(
|
self.kubernetes_audit = kubernetes_audit.KubernetesAudit(
|
||||||
self.context, self.state_rpc_client)
|
self.context, self.state_rpc_client)
|
||||||
self.kube_rootca_update_audit = \
|
self.kube_rootca_update_audit = (
|
||||||
kube_rootca_update_audit.KubeRootcaUpdateAudit(
|
kube_rootca_update_audit.KubeRootcaUpdateAudit(
|
||||||
self.context,
|
self.context, self.state_rpc_client
|
||||||
self.state_rpc_client)
|
)
|
||||||
|
)
|
||||||
|
self.software_audit = software_audit.SoftwareAudit(
|
||||||
|
self.context, self.state_rpc_client)
|
||||||
self.pid = os.getpid()
|
self.pid = os.getpid()
|
||||||
|
|
||||||
def audit_subclouds(self,
|
def audit_subclouds(self,
|
||||||
@ -149,6 +153,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
|||||||
do_kube_rootca_update_audit = \
|
do_kube_rootca_update_audit = \
|
||||||
subcloud_audits.kube_rootca_update_audit_requested
|
subcloud_audits.kube_rootca_update_audit_requested
|
||||||
update_subcloud_state = subcloud_audits.state_update_requested
|
update_subcloud_state = subcloud_audits.state_update_requested
|
||||||
|
do_software_audit = subcloud_audits.spare_audit_requested
|
||||||
|
|
||||||
# Create a new greenthread for each subcloud to allow the audits
|
# Create a new greenthread for each subcloud to allow the audits
|
||||||
# to be done in parallel. If there are not enough greenthreads
|
# to be done in parallel. If there are not enough greenthreads
|
||||||
@ -167,7 +172,8 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
|||||||
do_load_audit,
|
do_load_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit)
|
||||||
|
|
||||||
def update_subcloud_endpoints(self, context, subcloud_name, endpoints):
|
def update_subcloud_endpoints(self, context, subcloud_name, endpoints):
|
||||||
try:
|
try:
|
||||||
@ -310,7 +316,8 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
|||||||
do_load_audit,
|
do_load_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit):
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit):
|
||||||
audits_done = list()
|
audits_done = list()
|
||||||
failures = list()
|
failures = list()
|
||||||
# Do the actual subcloud audit.
|
# Do the actual subcloud audit.
|
||||||
@ -328,7 +335,8 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
|||||||
do_load_audit,
|
do_load_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Got exception auditing subcloud: %s" % subcloud.name)
|
LOG.exception("Got exception auditing subcloud: %s" % subcloud.name)
|
||||||
|
|
||||||
@ -360,7 +368,8 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
|||||||
do_load_audit,
|
do_load_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit):
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit):
|
||||||
"""Audit a single subcloud."""
|
"""Audit a single subcloud."""
|
||||||
|
|
||||||
avail_status_current = subcloud.availability_status
|
avail_status_current = subcloud.availability_status
|
||||||
@ -492,12 +501,11 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
|||||||
failmsg = "Audit failure subcloud: %s, endpoint: %s"
|
failmsg = "Audit failure subcloud: %s, endpoint: %s"
|
||||||
|
|
||||||
# If we have patch audit data, audit the subcloud
|
# If we have patch audit data, audit the subcloud
|
||||||
if do_patch_audit and (patch_audit_data or software_audit_data):
|
if do_patch_audit and patch_audit_data:
|
||||||
try:
|
try:
|
||||||
self.patch_audit.subcloud_audit(subcloud_name,
|
self.patch_audit.subcloud_patch_audit(subcloud_name,
|
||||||
subcloud_region,
|
subcloud_region,
|
||||||
patch_audit_data,
|
patch_audit_data,
|
||||||
software_audit_data,
|
|
||||||
do_load_audit)
|
do_load_audit)
|
||||||
audits_done.append('patch')
|
audits_done.append('patch')
|
||||||
if do_load_audit:
|
if do_load_audit:
|
||||||
@ -550,4 +558,13 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
|||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(failmsg % (subcloud.name, 'openstack'))
|
LOG.exception(failmsg % (subcloud.name, 'openstack'))
|
||||||
failures.append('openstack')
|
failures.append('openstack')
|
||||||
|
# Perform software audit
|
||||||
|
if do_software_audit:
|
||||||
|
try:
|
||||||
|
self.software_audit.subcloud_software_audit(
|
||||||
|
subcloud_name, subcloud_region, software_audit_data)
|
||||||
|
audits_done.append('software')
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(failmsg % (subcloud.name, 'software'))
|
||||||
|
failures.append('software')
|
||||||
return audits_done, failures
|
return audits_done, failures
|
||||||
|
@ -26,7 +26,8 @@ def request_subcloud_audits(context,
|
|||||||
audit_load=False,
|
audit_load=False,
|
||||||
audit_firmware=False,
|
audit_firmware=False,
|
||||||
audit_kubernetes=False,
|
audit_kubernetes=False,
|
||||||
audit_kube_rootca=False):
|
audit_kube_rootca=False,
|
||||||
|
audit_software=False,):
|
||||||
values = {}
|
values = {}
|
||||||
if update_subcloud_state:
|
if update_subcloud_state:
|
||||||
values['state_update_requested'] = True
|
values['state_update_requested'] = True
|
||||||
@ -40,4 +41,6 @@ def request_subcloud_audits(context,
|
|||||||
values['kubernetes_audit_requested'] = True
|
values['kubernetes_audit_requested'] = True
|
||||||
if audit_kube_rootca:
|
if audit_kube_rootca:
|
||||||
values['kube_rootca_update_audit_requested'] = True
|
values['kube_rootca_update_audit_requested'] = True
|
||||||
|
if audit_software:
|
||||||
|
values['spare_audit_requested'] = True
|
||||||
db_api.subcloud_audits_update_all(context, values)
|
db_api.subcloud_audits_update_all(context, values)
|
||||||
|
@ -32,8 +32,7 @@ from oslo_utils import uuidutils
|
|||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
from sqlalchemy import desc
|
from sqlalchemy import desc
|
||||||
from sqlalchemy import or_
|
from sqlalchemy import or_
|
||||||
from sqlalchemy.orm.exc import MultipleResultsFound
|
from sqlalchemy.orm import exc
|
||||||
from sqlalchemy.orm.exc import NoResultFound
|
|
||||||
from sqlalchemy.orm import joinedload_all
|
from sqlalchemy.orm import joinedload_all
|
||||||
from sqlalchemy.orm import load_only
|
from sqlalchemy.orm import load_only
|
||||||
from sqlalchemy.sql.expression import true
|
from sqlalchemy.sql.expression import true
|
||||||
@ -204,7 +203,8 @@ def subcloud_audits_get_all_need_audit(context, last_audit_threshold):
|
|||||||
(models.SubcloudAudits.load_audit_requested == true()) |
|
(models.SubcloudAudits.load_audit_requested == true()) |
|
||||||
(models.SubcloudAudits.kube_rootca_update_audit_requested ==
|
(models.SubcloudAudits.kube_rootca_update_audit_requested ==
|
||||||
true()) |
|
true()) |
|
||||||
(models.SubcloudAudits.kubernetes_audit_requested == true())).\
|
(models.SubcloudAudits.kubernetes_audit_requested == true()) |
|
||||||
|
(models.SubcloudAudits.spare_audit_requested == true())).\
|
||||||
all()
|
all()
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -239,6 +239,8 @@ def subcloud_audits_end_audit(context, subcloud_id, audits_done):
|
|||||||
subcloud_audits_ref.kube_rootca_update_audit_requested = False
|
subcloud_audits_ref.kube_rootca_update_audit_requested = False
|
||||||
if 'kubernetes' in audits_done:
|
if 'kubernetes' in audits_done:
|
||||||
subcloud_audits_ref.kubernetes_audit_requested = False
|
subcloud_audits_ref.kubernetes_audit_requested = False
|
||||||
|
if 'software' in audits_done:
|
||||||
|
subcloud_audits_ref.spare_audit_requested = False
|
||||||
subcloud_audits_ref.save(session)
|
subcloud_audits_ref.save(session)
|
||||||
return subcloud_audits_ref
|
return subcloud_audits_ref
|
||||||
|
|
||||||
@ -260,6 +262,7 @@ def subcloud_audits_fix_expired_audits(context, last_audit_threshold,
|
|||||||
values['load_audit_requested'] = True
|
values['load_audit_requested'] = True
|
||||||
values['kubernetes_audit_requested'] = True
|
values['kubernetes_audit_requested'] = True
|
||||||
values['kube_rootca_update_audit_requested'] = True
|
values['kube_rootca_update_audit_requested'] = True
|
||||||
|
values['spare_audit_requested'] = True
|
||||||
with write_session() as session:
|
with write_session() as session:
|
||||||
result = session.query(models.SubcloudAudits).\
|
result = session.query(models.SubcloudAudits).\
|
||||||
options(load_only("deleted", "audit_started_at",
|
options(load_only("deleted", "audit_started_at",
|
||||||
@ -817,9 +820,9 @@ def system_peer_get(context, peer_id):
|
|||||||
filter_by(deleted=0). \
|
filter_by(deleted=0). \
|
||||||
filter_by(id=peer_id). \
|
filter_by(id=peer_id). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.SystemPeerNotFound(peer_id=peer_id)
|
raise exception.SystemPeerNotFound(peer_id=peer_id)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for system peer %s" % peer_id)
|
err="Multiple entries found for system peer %s" % peer_id)
|
||||||
|
|
||||||
@ -833,9 +836,9 @@ def system_peer_get_by_name(context, name):
|
|||||||
filter_by(deleted=0). \
|
filter_by(deleted=0). \
|
||||||
filter_by(peer_name=name). \
|
filter_by(peer_name=name). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.SystemPeerNameNotFound(name=name)
|
raise exception.SystemPeerNameNotFound(name=name)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
# This exception should never happen due to the UNIQUE setting for name
|
# This exception should never happen due to the UNIQUE setting for name
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for system peer %s" % name)
|
err="Multiple entries found for system peer %s" % name)
|
||||||
@ -850,9 +853,9 @@ def system_peer_get_by_uuid(context, uuid):
|
|||||||
filter_by(deleted=0). \
|
filter_by(deleted=0). \
|
||||||
filter_by(peer_uuid=uuid). \
|
filter_by(peer_uuid=uuid). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.SystemPeerUUIDNotFound(uuid=uuid)
|
raise exception.SystemPeerUUIDNotFound(uuid=uuid)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
# This exception should never happen due to the UNIQUE setting for uuid
|
# This exception should never happen due to the UNIQUE setting for uuid
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for system peer %s" % uuid)
|
err="Multiple entries found for system peer %s" % uuid)
|
||||||
@ -973,9 +976,9 @@ def subcloud_group_get(context, group_id):
|
|||||||
filter_by(deleted=0). \
|
filter_by(deleted=0). \
|
||||||
filter_by(id=group_id). \
|
filter_by(id=group_id). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.SubcloudGroupNotFound(group_id=group_id)
|
raise exception.SubcloudGroupNotFound(group_id=group_id)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for subcloud group %s" % group_id)
|
err="Multiple entries found for subcloud group %s" % group_id)
|
||||||
|
|
||||||
@ -989,9 +992,9 @@ def subcloud_group_get_by_name(context, name):
|
|||||||
filter_by(deleted=0). \
|
filter_by(deleted=0). \
|
||||||
filter_by(name=name). \
|
filter_by(name=name). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.SubcloudGroupNameNotFound(name=name)
|
raise exception.SubcloudGroupNameNotFound(name=name)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
# This exception should never happen due to the UNIQUE setting for name
|
# This exception should never happen due to the UNIQUE setting for name
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for subcloud group %s" % name)
|
err="Multiple entries found for subcloud group %s" % name)
|
||||||
@ -1109,9 +1112,9 @@ def subcloud_peer_group_get(context, group_id):
|
|||||||
filter_by(deleted=0). \
|
filter_by(deleted=0). \
|
||||||
filter_by(id=group_id). \
|
filter_by(id=group_id). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.SubcloudPeerGroupNotFound(group_id=group_id)
|
raise exception.SubcloudPeerGroupNotFound(group_id=group_id)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for subcloud peer group %s" % group_id)
|
err="Multiple entries found for subcloud peer group %s" % group_id)
|
||||||
|
|
||||||
@ -1149,9 +1152,9 @@ def subcloud_peer_group_get_by_name(context, name):
|
|||||||
filter_by(deleted=0). \
|
filter_by(deleted=0). \
|
||||||
filter_by(peer_group_name=name). \
|
filter_by(peer_group_name=name). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.SubcloudPeerGroupNameNotFound(name=name)
|
raise exception.SubcloudPeerGroupNameNotFound(name=name)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
# This exception should never happen due to the UNIQUE setting for name
|
# This exception should never happen due to the UNIQUE setting for name
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for subcloud peer group %s" % name)
|
err="Multiple entries found for subcloud peer group %s" % name)
|
||||||
@ -1288,10 +1291,10 @@ def peer_group_association_get(context, association_id):
|
|||||||
filter_by(deleted=0). \
|
filter_by(deleted=0). \
|
||||||
filter_by(id=association_id). \
|
filter_by(id=association_id). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.PeerGroupAssociationNotFound(
|
raise exception.PeerGroupAssociationNotFound(
|
||||||
association_id=association_id)
|
association_id=association_id)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for peer group association %s" %
|
err="Multiple entries found for peer group association %s" %
|
||||||
association_id)
|
association_id)
|
||||||
@ -1321,10 +1324,10 @@ def peer_group_association_get_by_peer_group_and_system_peer_id(context,
|
|||||||
filter_by(peer_group_id=peer_group_id). \
|
filter_by(peer_group_id=peer_group_id). \
|
||||||
filter_by(system_peer_id=system_peer_id). \
|
filter_by(system_peer_id=system_peer_id). \
|
||||||
one()
|
one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.PeerGroupAssociationCombinationNotFound(
|
raise exception.PeerGroupAssociationCombinationNotFound(
|
||||||
peer_group_id=peer_group_id, system_peer_id=system_peer_id)
|
peer_group_id=peer_group_id, system_peer_id=system_peer_id)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
# This exception should never happen due to the UNIQUE setting for name
|
# This exception should never happen due to the UNIQUE setting for name
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for peer group association %s,%s" %
|
err="Multiple entries found for peer group association %s,%s" %
|
||||||
@ -1485,9 +1488,9 @@ def _subcloud_alarms_get(context, name):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
return query.one()
|
return query.one()
|
||||||
except NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise exception.SubcloudNameNotFound(name=name)
|
raise exception.SubcloudNameNotFound(name=name)
|
||||||
except MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
raise exception.InvalidParameterValue(
|
raise exception.InvalidParameterValue(
|
||||||
err="Multiple entries found for subcloud %s" % name)
|
err="Multiple entries found for subcloud %s" % name)
|
||||||
|
|
||||||
|
@ -203,20 +203,23 @@ class SubcloudAudits(BASE, DCManagerBase):
|
|||||||
__tablename__ = 'subcloud_audits'
|
__tablename__ = 'subcloud_audits'
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, nullable=False)
|
id = Column(Integer, primary_key=True, nullable=False)
|
||||||
subcloud_id = Column(Integer,
|
subcloud_id = Column(
|
||||||
ForeignKey('subclouds.id', ondelete='CASCADE'),
|
Integer, ForeignKey('subclouds.id', ondelete='CASCADE'), unique=True
|
||||||
unique=True)
|
)
|
||||||
audit_started_at = Column(DateTime(timezone=False),
|
audit_started_at = Column(
|
||||||
default=datetime.datetime.min)
|
DateTime(timezone=False), default=datetime.datetime.min
|
||||||
audit_finished_at = Column(DateTime(timezone=False),
|
)
|
||||||
default=datetime.datetime.min)
|
audit_finished_at = Column(
|
||||||
|
DateTime(timezone=False), default=datetime.datetime.min
|
||||||
|
)
|
||||||
state_update_requested = Column(Boolean, nullable=False, default=False)
|
state_update_requested = Column(Boolean, nullable=False, default=False)
|
||||||
patch_audit_requested = Column(Boolean, nullable=False, default=False)
|
patch_audit_requested = Column(Boolean, nullable=False, default=False)
|
||||||
load_audit_requested = Column(Boolean, nullable=False, default=False)
|
load_audit_requested = Column(Boolean, nullable=False, default=False)
|
||||||
firmware_audit_requested = Column(Boolean, nullable=False, default=False)
|
firmware_audit_requested = Column(Boolean, nullable=False, default=False)
|
||||||
kubernetes_audit_requested = Column(Boolean, nullable=False, default=False)
|
kubernetes_audit_requested = Column(Boolean, nullable=False, default=False)
|
||||||
kube_rootca_update_audit_requested = Column(Boolean, nullable=False,
|
kube_rootca_update_audit_requested = Column(
|
||||||
default=False)
|
Boolean, nullable=False, default=False
|
||||||
|
)
|
||||||
spare_audit_requested = Column(Boolean, nullable=False, default=False)
|
spare_audit_requested = Column(Boolean, nullable=False, default=False)
|
||||||
spare2_audit_requested = Column(Boolean, nullable=False, default=False)
|
spare2_audit_requested = Column(Boolean, nullable=False, default=False)
|
||||||
reserved = Column(Text)
|
reserved = Column(Text)
|
||||||
|
@ -28,8 +28,7 @@ from dcmanager.orchestrator.states.patch.finishing_patch_strategy import \
|
|||||||
FinishingPatchStrategyState
|
FinishingPatchStrategyState
|
||||||
from dcmanager.orchestrator.states.patch.job_data import PatchJobData
|
from dcmanager.orchestrator.states.patch.job_data import PatchJobData
|
||||||
from dcmanager.orchestrator.states.patch.pre_check import PreCheckState
|
from dcmanager.orchestrator.states.patch.pre_check import PreCheckState
|
||||||
from dcmanager.orchestrator.states.patch.updating_patches import \
|
from dcmanager.orchestrator.states.patch.updating_patches import UpdatingPatchesState
|
||||||
UpdatingPatchesState
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -7,16 +7,16 @@
|
|||||||
from dccommon.drivers.openstack import vim
|
from dccommon.drivers.openstack import vim
|
||||||
from dcmanager.common import consts
|
from dcmanager.common import consts
|
||||||
from dcmanager.orchestrator.orch_thread import OrchThread
|
from dcmanager.orchestrator.orch_thread import OrchThread
|
||||||
from dcmanager.orchestrator.states.software.apply_vim_software_strategy \
|
from dcmanager.orchestrator.states.software.apply_vim_software_strategy import \
|
||||||
import ApplyVIMSoftwareStrategyState
|
ApplyVIMSoftwareStrategyState
|
||||||
from dcmanager.orchestrator.states.software.cache.shared_cache_repository import \
|
from dcmanager.orchestrator.states.software.cache.shared_cache_repository import \
|
||||||
SharedCacheRepository
|
SharedCacheRepository
|
||||||
from dcmanager.orchestrator.states.software.create_vim_software_strategy \
|
from dcmanager.orchestrator.states.software.create_vim_software_strategy import \
|
||||||
import CreateVIMSoftwareStrategyState
|
CreateVIMSoftwareStrategyState
|
||||||
from dcmanager.orchestrator.states.software.finish_strategy \
|
from dcmanager.orchestrator.states.software.finish_strategy import \
|
||||||
import FinishStrategyState
|
FinishStrategyState
|
||||||
from dcmanager.orchestrator.states.software.install_license \
|
from dcmanager.orchestrator.states.software.install_license import \
|
||||||
import InstallLicenseState
|
InstallLicenseState
|
||||||
from dcmanager.orchestrator.states.software.pre_check import PreCheckState
|
from dcmanager.orchestrator.states.software.pre_check import PreCheckState
|
||||||
|
|
||||||
|
|
||||||
@ -59,8 +59,8 @@ class SoftwareOrchThread(OrchThread):
|
|||||||
self._shared_caches.initialize_caches()
|
self._shared_caches.initialize_caches()
|
||||||
|
|
||||||
def trigger_audit(self):
|
def trigger_audit(self):
|
||||||
"""Trigger an audit for upgrade (which is combined with patch audit)"""
|
"""Trigger an audit for software"""
|
||||||
self.audit_rpc_client.trigger_patch_audit(self.context)
|
self.audit_rpc_client.trigger_software_audit(self.context)
|
||||||
|
|
||||||
def pre_apply_setup(self):
|
def pre_apply_setup(self):
|
||||||
# Restart caches for next strategy
|
# Restart caches for next strategy
|
||||||
|
@ -18,15 +18,14 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
import six
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
from oslo_service import service
|
from oslo_service import service
|
||||||
|
import six
|
||||||
|
|
||||||
from dccommon import consts as dccommon_consts
|
from dccommon import consts as dccommon_consts
|
||||||
|
|
||||||
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
|
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
|
||||||
from dcmanager.common import consts
|
from dcmanager.common import consts
|
||||||
from dcmanager.common import context
|
from dcmanager.common import context
|
||||||
@ -82,9 +81,9 @@ class DCManagerStateService(service.Service):
|
|||||||
LOG.info("Starting %s", self.__class__.__name__)
|
LOG.info("Starting %s", self.__class__.__name__)
|
||||||
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
||||||
self._init_managers()
|
self._init_managers()
|
||||||
target = oslo_messaging.Target(version=self.rpc_api_version,
|
target = oslo_messaging.Target(
|
||||||
server=self.host,
|
version=self.rpc_api_version, server=self.host, topic=self.topic
|
||||||
topic=self.topic)
|
)
|
||||||
self.target = target
|
self.target = target
|
||||||
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
||||||
self._rpc_server.start()
|
self._rpc_server.start()
|
||||||
@ -99,10 +98,9 @@ class DCManagerStateService(service.Service):
|
|||||||
try:
|
try:
|
||||||
self._rpc_server.stop()
|
self._rpc_server.stop()
|
||||||
self._rpc_server.wait()
|
self._rpc_server.wait()
|
||||||
LOG.info('Engine service stopped successfully')
|
LOG.info("Engine service stopped successfully")
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error('Failed to stop engine service: %s',
|
LOG.error("Failed to stop engine service: %s", six.text_type(ex))
|
||||||
six.text_type(ex))
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
LOG.info("Stopping %s", self.__class__.__name__)
|
LOG.info("Stopping %s", self.__class__.__name__)
|
||||||
@ -113,57 +111,83 @@ class DCManagerStateService(service.Service):
|
|||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def update_subcloud_endpoint_status(
|
def update_subcloud_endpoint_status(
|
||||||
self, context, subcloud_name=None, subcloud_region=None, endpoint_type=None,
|
self,
|
||||||
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC, alarmable=True,
|
context,
|
||||||
ignore_endpoints=None
|
subcloud_name=None,
|
||||||
|
subcloud_region=None,
|
||||||
|
endpoint_type=None,
|
||||||
|
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||||
|
alarmable=True,
|
||||||
|
ignore_endpoints=None,
|
||||||
):
|
):
|
||||||
# Updates subcloud endpoint sync status
|
# Updates subcloud endpoint sync status
|
||||||
LOG.info("Handling update_subcloud_endpoint_status request for "
|
LOG.info(
|
||||||
|
"Handling update_subcloud_endpoint_status request for "
|
||||||
"subcloud: (%s) endpoint: (%s) status:(%s) "
|
"subcloud: (%s) endpoint: (%s) status:(%s) "
|
||||||
% (subcloud_name, endpoint_type, sync_status))
|
% (subcloud_name, endpoint_type, sync_status)
|
||||||
|
)
|
||||||
|
|
||||||
self.subcloud_state_manager. \
|
self.subcloud_state_manager.update_subcloud_endpoint_status(
|
||||||
update_subcloud_endpoint_status(context,
|
context,
|
||||||
subcloud_region,
|
subcloud_region,
|
||||||
endpoint_type,
|
endpoint_type,
|
||||||
sync_status,
|
sync_status,
|
||||||
alarmable,
|
alarmable,
|
||||||
ignore_endpoints)
|
ignore_endpoints,
|
||||||
|
)
|
||||||
|
|
||||||
# If the patching sync status is being set to unknown, trigger the
|
# If the patching sync status is being set to unknown, trigger the
|
||||||
# patching audit so it can update the sync status ASAP.
|
# patching audit so it can update the sync status ASAP.
|
||||||
if (endpoint_type == dccommon_consts.ENDPOINT_TYPE_PATCHING
|
if (
|
||||||
or endpoint_type == dccommon_consts.ENDPOINT_TYPE_SOFTWARE) and \
|
endpoint_type == dccommon_consts.ENDPOINT_TYPE_PATCHING
|
||||||
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
|
and sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||||
|
):
|
||||||
self.audit_rpc_client.trigger_patch_audit(context)
|
self.audit_rpc_client.trigger_patch_audit(context)
|
||||||
|
|
||||||
|
# If the software sync status is being set to unknown, trigger the
|
||||||
|
# software audit so it can update the sync status ASAP.
|
||||||
|
if (
|
||||||
|
endpoint_type == dccommon_consts.ENDPOINT_TYPE_SOFTWARE
|
||||||
|
and sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||||
|
):
|
||||||
|
self.audit_rpc_client.trigger_software_audit(context)
|
||||||
|
|
||||||
# If the firmware sync status is being set to unknown, trigger the
|
# If the firmware sync status is being set to unknown, trigger the
|
||||||
# firmware audit so it can update the sync status ASAP.
|
# firmware audit so it can update the sync status ASAP.
|
||||||
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_FIRMWARE and \
|
if (
|
||||||
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
|
endpoint_type == dccommon_consts.ENDPOINT_TYPE_FIRMWARE
|
||||||
|
and sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||||
|
):
|
||||||
self.audit_rpc_client.trigger_firmware_audit(context)
|
self.audit_rpc_client.trigger_firmware_audit(context)
|
||||||
|
|
||||||
# If the kubernetes sync status is being set to unknown, trigger the
|
# If the kubernetes sync status is being set to unknown, trigger the
|
||||||
# kubernetes audit so it can update the sync status ASAP.
|
# kubernetes audit so it can update the sync status ASAP.
|
||||||
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_KUBERNETES and \
|
if (
|
||||||
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
|
endpoint_type == dccommon_consts.ENDPOINT_TYPE_KUBERNETES
|
||||||
|
and sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||||
|
):
|
||||||
self.audit_rpc_client.trigger_kubernetes_audit(context)
|
self.audit_rpc_client.trigger_kubernetes_audit(context)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@request_context
|
@request_context
|
||||||
def update_subcloud_availability(self, context,
|
def update_subcloud_availability(
|
||||||
|
self,
|
||||||
|
context,
|
||||||
subcloud_name,
|
subcloud_name,
|
||||||
subcloud_region,
|
subcloud_region,
|
||||||
availability_status,
|
availability_status,
|
||||||
update_state_only=False,
|
update_state_only=False,
|
||||||
audit_fail_count=None):
|
audit_fail_count=None,
|
||||||
|
):
|
||||||
# Updates subcloud availability
|
# Updates subcloud availability
|
||||||
LOG.info("Handling update_subcloud_availability request for: %s" %
|
LOG.info(
|
||||||
subcloud_name)
|
"Handling update_subcloud_availability request for: %s" % subcloud_name
|
||||||
|
)
|
||||||
self.subcloud_state_manager.update_subcloud_availability(
|
self.subcloud_state_manager.update_subcloud_availability(
|
||||||
context,
|
context,
|
||||||
subcloud_region,
|
subcloud_region,
|
||||||
availability_status,
|
availability_status,
|
||||||
update_state_only,
|
update_state_only,
|
||||||
audit_fail_count)
|
audit_fail_count,
|
||||||
|
)
|
||||||
|
@ -413,10 +413,6 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
|||||||
self.mock_audit_worker_api.return_value = self.fake_audit_worker_api
|
self.mock_audit_worker_api.return_value = self.fake_audit_worker_api
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
|
||||||
p = mock.patch.object(patch_audit, 'SoftwareClient')
|
|
||||||
self.mock_patch_audit_sc = p.start()
|
|
||||||
self.addCleanup(p.stop)
|
|
||||||
|
|
||||||
def _rpc_convert(self, object_list):
|
def _rpc_convert(self, object_list):
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
dict_results = []
|
dict_results = []
|
||||||
@ -425,8 +421,13 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
|||||||
return dict_results
|
return dict_results
|
||||||
|
|
||||||
def get_fw_audit_data(self, am):
|
def get_fw_audit_data(self, am):
|
||||||
patch_audit_data, firmware_audit_data, kubernetes_audit_data, kube_root,\
|
(
|
||||||
software_audit_data = am._get_audit_data(True, True, True, True)
|
_,
|
||||||
|
firmware_audit_data,
|
||||||
|
_,
|
||||||
|
_,
|
||||||
|
_
|
||||||
|
) = am._get_audit_data(True, True, True, True, True)
|
||||||
|
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
firmware_audit_data = self._rpc_convert(firmware_audit_data)
|
firmware_audit_data = self._rpc_convert(firmware_audit_data)
|
||||||
|
@ -110,10 +110,6 @@ class TestKubernetesAudit(base.DCManagerTestCase):
|
|||||||
self.mock_patch_audit_pc.return_value = mock.MagicMock()
|
self.mock_patch_audit_pc.return_value = mock.MagicMock()
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
|
||||||
p = mock.patch.object(patch_audit, 'SoftwareClient')
|
|
||||||
self.mock_patch_audit_sc = p.start()
|
|
||||||
self.addCleanup(p.stop)
|
|
||||||
|
|
||||||
p = mock.patch.object(firmware_audit, 'OpenStackDriver')
|
p = mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||||
self.mock_firmware_audit_driver = p.start()
|
self.mock_firmware_audit_driver = p.start()
|
||||||
self.mock_firmware_audit_driver.return_value = mock.MagicMock()
|
self.mock_firmware_audit_driver.return_value = mock.MagicMock()
|
||||||
@ -147,8 +143,13 @@ class TestKubernetesAudit(base.DCManagerTestCase):
|
|||||||
return dict_results
|
return dict_results
|
||||||
|
|
||||||
def get_kube_audit_data(self, am):
|
def get_kube_audit_data(self, am):
|
||||||
patch_audit_data, firmware_audit_data, kubernetes_audit_data, kube_rootca, \
|
(
|
||||||
software_audit_data = am._get_audit_data(True, True, True, True)
|
_,
|
||||||
|
_,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
_,
|
||||||
|
_
|
||||||
|
) = am._get_audit_data(True, True, True, True, True)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
|
kubernetes_audit_data = self._rpc_convert(kubernetes_audit_data)
|
||||||
return kubernetes_audit_data
|
return kubernetes_audit_data
|
||||||
|
@ -9,7 +9,6 @@ import mock
|
|||||||
from dccommon import consts as dccommon_consts
|
from dccommon import consts as dccommon_consts
|
||||||
from dcmanager.audit import kube_rootca_update_audit
|
from dcmanager.audit import kube_rootca_update_audit
|
||||||
from dcmanager.audit import subcloud_audit_manager
|
from dcmanager.audit import subcloud_audit_manager
|
||||||
|
|
||||||
from dcmanager.tests import base
|
from dcmanager.tests import base
|
||||||
from dcmanager.tests import utils
|
from dcmanager.tests import utils
|
||||||
|
|
||||||
@ -106,9 +105,13 @@ class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
|||||||
self.rootca_fm_client.get_alarms_by_ids.return_value = None
|
self.rootca_fm_client.get_alarms_by_ids.return_value = None
|
||||||
|
|
||||||
def get_rootca_audit_data(self, am):
|
def get_rootca_audit_data(self, am):
|
||||||
patch_audit_data, firmware_audit_data, kubernetes_audit_data, \
|
(
|
||||||
kube_rootca_audit_data, software_audit_data = am._get_audit_data(
|
_,
|
||||||
True, True, True, True)
|
_,
|
||||||
|
_,
|
||||||
|
kube_rootca_audit_data,
|
||||||
|
_
|
||||||
|
) = am._get_audit_data(True, True, True, True, True)
|
||||||
|
|
||||||
return kube_rootca_audit_data
|
return kube_rootca_audit_data
|
||||||
|
|
||||||
|
@ -263,16 +263,14 @@ class TestPatchAudit(base.DCManagerTestCase):
|
|||||||
self.mock_audit_worker_api.return_value = self.fake_audit_worker_api
|
self.mock_audit_worker_api.return_value = self.fake_audit_worker_api
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
|
||||||
# Mock the Software Client
|
|
||||||
p = mock.patch.object(patch_audit, 'SoftwareClient')
|
|
||||||
self.mock_patch_audit_sc = p.start()
|
|
||||||
self.addCleanup(p.stop)
|
|
||||||
|
|
||||||
def get_patch_audit_data(self, am):
|
def get_patch_audit_data(self, am):
|
||||||
(patch_audit_data, firmware_audit_data,
|
(
|
||||||
kubernetes_audit_data, kube_rootca_data,
|
patch_audit_data,
|
||||||
software_audit_data) = \
|
_,
|
||||||
am._get_audit_data(True, True, True, True)
|
_,
|
||||||
|
_,
|
||||||
|
_
|
||||||
|
) = am._get_audit_data(True, True, True, True, True)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
return patch_audit_data
|
return patch_audit_data
|
||||||
|
@ -18,9 +18,8 @@ import copy
|
|||||||
import random
|
import random
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from keystoneauth1 import exceptions as keystone_exceptions
|
from keystoneauth1 import exceptions as keystone_exceptions
|
||||||
|
import mock
|
||||||
|
|
||||||
from dccommon import consts as dccommon_consts
|
from dccommon import consts as dccommon_consts
|
||||||
from dcmanager.audit import subcloud_audit_manager
|
from dcmanager.audit import subcloud_audit_manager
|
||||||
@ -412,13 +411,20 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_firmware_audit = True
|
do_firmware_audit = True
|
||||||
do_kubernetes_audit = True
|
do_kubernetes_audit = True
|
||||||
do_kube_rootca_update_audit = True
|
do_kube_rootca_update_audit = True
|
||||||
(patch_audit_data, firmware_audit_data,
|
do_software_audit = True
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
(
|
||||||
software_audit_data) = \
|
patch_audit_data,
|
||||||
am._get_audit_data(do_patch_audit,
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = am._get_audit_data(
|
||||||
|
do_patch_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit,
|
||||||
|
)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
# Note: the other data should also be converted...
|
# Note: the other data should also be converted...
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
@ -434,7 +440,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_load_audit,
|
do_load_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit)
|
||||||
|
|
||||||
# Verify the subcloud was set to online
|
# Verify the subcloud was set to online
|
||||||
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
||||||
@ -457,9 +464,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
subcloud.name, self.fake_openstack_client.fm_client)
|
subcloud.name, self.fake_openstack_client.fm_client)
|
||||||
|
|
||||||
# Verify patch audit is called
|
# Verify patch audit is called
|
||||||
self.fake_patch_audit.subcloud_audit.assert_called_with(
|
self.fake_patch_audit.subcloud_patch_audit.assert_called_with(
|
||||||
subcloud.name, subcloud.region_name, patch_audit_data,
|
subcloud.name, subcloud.region_name, patch_audit_data, do_load_audit)
|
||||||
software_audit_data, do_load_audit)
|
|
||||||
|
|
||||||
# Verify firmware audit is called
|
# Verify firmware audit is called
|
||||||
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_with(
|
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_with(
|
||||||
@ -494,13 +500,20 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_firmware_audit = True
|
do_firmware_audit = True
|
||||||
do_kubernetes_audit = True
|
do_kubernetes_audit = True
|
||||||
do_kube_rootca_update_audit = True
|
do_kube_rootca_update_audit = True
|
||||||
(patch_audit_data, firmware_audit_data,
|
do_software_audit = True
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
(
|
||||||
software_audit_data) = \
|
patch_audit_data,
|
||||||
am._get_audit_data(do_patch_audit,
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = am._get_audit_data(
|
||||||
|
do_patch_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit,
|
||||||
|
)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
# Note: the other data should also be converted...
|
# Note: the other data should also be converted...
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
@ -516,7 +529,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_load_audit,
|
do_load_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit)
|
||||||
|
|
||||||
# Verify the subcloud was set to online
|
# Verify the subcloud was set to online
|
||||||
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
||||||
@ -566,13 +580,20 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_firmware_audit = True
|
do_firmware_audit = True
|
||||||
do_kubernetes_audit = True
|
do_kubernetes_audit = True
|
||||||
do_kube_rootca_update_audit = True
|
do_kube_rootca_update_audit = True
|
||||||
(patch_audit_data, firmware_audit_data,
|
do_software_audit = True
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
(
|
||||||
software_audit_data) = \
|
patch_audit_data,
|
||||||
am._get_audit_data(do_patch_audit,
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = am._get_audit_data(
|
||||||
|
do_patch_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit,
|
||||||
|
)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
# Note: the other data should also be converted...
|
# Note: the other data should also be converted...
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
@ -581,7 +602,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
firmware_audit_data, kubernetes_audit_data,
|
firmware_audit_data, kubernetes_audit_data,
|
||||||
kube_rootca_update_audit_data, software_audit_data, do_patch_audit,
|
kube_rootca_update_audit_data, software_audit_data, do_patch_audit,
|
||||||
do_load_audit, do_firmware_audit, do_kubernetes_audit,
|
do_load_audit, do_firmware_audit, do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit
|
do_kube_rootca_update_audit, do_software_audit
|
||||||
)
|
)
|
||||||
|
|
||||||
# Verify the subcloud was set to online
|
# Verify the subcloud was set to online
|
||||||
@ -639,7 +660,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_load_audit=False,
|
do_load_audit=False,
|
||||||
do_firmware_audit=False,
|
do_firmware_audit=False,
|
||||||
do_kubernetes_audit=False,
|
do_kubernetes_audit=False,
|
||||||
do_kube_rootca_update_audit=False)
|
do_kube_rootca_update_audit=False,
|
||||||
|
do_software_audit=False)
|
||||||
|
|
||||||
# Verify the subcloud state was not updated
|
# Verify the subcloud state was not updated
|
||||||
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
||||||
@ -683,7 +705,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_load_audit=False,
|
do_load_audit=False,
|
||||||
do_firmware_audit=False,
|
do_firmware_audit=False,
|
||||||
do_kubernetes_audit=False,
|
do_kubernetes_audit=False,
|
||||||
do_kube_rootca_update_audit=False)
|
do_kube_rootca_update_audit=False,
|
||||||
|
do_software_audit=False)
|
||||||
|
|
||||||
# Verify the subcloud state was updated even though no change
|
# Verify the subcloud state was updated even though no change
|
||||||
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
||||||
@ -744,13 +767,20 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_firmware_audit = True
|
do_firmware_audit = True
|
||||||
do_kubernetes_audit = True
|
do_kubernetes_audit = True
|
||||||
do_kube_rootca_update_audit = True
|
do_kube_rootca_update_audit = True
|
||||||
(patch_audit_data, firmware_audit_data,
|
do_software_audit = True
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
(
|
||||||
software_audit_data) = \
|
patch_audit_data,
|
||||||
am._get_audit_data(do_patch_audit,
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = am._get_audit_data(
|
||||||
|
do_patch_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit,
|
||||||
|
)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
wm._audit_subcloud(
|
wm._audit_subcloud(
|
||||||
@ -763,15 +793,16 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
||||||
do_firmware_audit=do_firmware_audit,
|
do_firmware_audit=do_firmware_audit,
|
||||||
do_kubernetes_audit=do_kubernetes_audit,
|
do_kubernetes_audit=do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit=do_kube_rootca_update_audit,
|
||||||
|
do_software_audit=do_software_audit)
|
||||||
|
|
||||||
# Verify alarm update is called once
|
# Verify alarm update is called once
|
||||||
self.fake_alarm_aggr.update_alarm_summary.assert_called_once_with(
|
self.fake_alarm_aggr.update_alarm_summary.assert_called_once_with(
|
||||||
subcloud.name, self.fake_openstack_client.fm_client)
|
subcloud.name, self.fake_openstack_client.fm_client)
|
||||||
|
|
||||||
# Verify patch audit is called once
|
# Verify patch audit is called once
|
||||||
self.fake_patch_audit.subcloud_audit.assert_called_once_with(
|
self.fake_patch_audit.subcloud_patch_audit.assert_called_once_with(
|
||||||
subcloud.name, subcloud.region_name, mock.ANY, mock.ANY, True)
|
subcloud.name, subcloud.region_name, mock.ANY, True)
|
||||||
|
|
||||||
# Verify firmware audit is called once
|
# Verify firmware audit is called once
|
||||||
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_once_with(
|
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_once_with(
|
||||||
@ -811,7 +842,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
||||||
do_firmware_audit=do_firmware_audit,
|
do_firmware_audit=do_firmware_audit,
|
||||||
do_kubernetes_audit=do_kubernetes_audit,
|
do_kubernetes_audit=do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit=do_kube_rootca_update_audit,
|
||||||
|
do_software_audit=do_software_audit)
|
||||||
|
|
||||||
audit_fail_count = audit_fail_count + 1
|
audit_fail_count = audit_fail_count + 1
|
||||||
|
|
||||||
@ -827,7 +859,7 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
self.fake_alarm_aggr.update_alarm_summary.assert_called_once()
|
self.fake_alarm_aggr.update_alarm_summary.assert_called_once()
|
||||||
|
|
||||||
# Verify patch audit is called only once
|
# Verify patch audit is called only once
|
||||||
self.fake_patch_audit.subcloud_audit.assert_called_once()
|
self.fake_patch_audit.subcloud_patch_audit.assert_called_once()
|
||||||
|
|
||||||
# Verify firmware audit is only called once
|
# Verify firmware audit is only called once
|
||||||
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_once()
|
self.fake_firmware_audit.subcloud_firmware_audit.assert_called_once()
|
||||||
@ -861,13 +893,20 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_firmware_audit = True
|
do_firmware_audit = True
|
||||||
do_kubernetes_audit = True
|
do_kubernetes_audit = True
|
||||||
do_kube_rootca_update_audit = True
|
do_kube_rootca_update_audit = True
|
||||||
(patch_audit_data, firmware_audit_data,
|
do_software_audit = True
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
(
|
||||||
software_audit_data) = \
|
patch_audit_data,
|
||||||
am._get_audit_data(do_patch_audit,
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = am._get_audit_data(
|
||||||
|
do_patch_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit,
|
||||||
|
)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
wm._audit_subcloud(
|
wm._audit_subcloud(
|
||||||
@ -880,7 +919,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
||||||
do_firmware_audit=do_firmware_audit,
|
do_firmware_audit=do_firmware_audit,
|
||||||
do_kubernetes_audit=do_kubernetes_audit,
|
do_kubernetes_audit=do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit=do_kube_rootca_update_audit,
|
||||||
|
do_software_audit=do_software_audit)
|
||||||
|
|
||||||
# Verify the subcloud state was not updated
|
# Verify the subcloud state was not updated
|
||||||
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
||||||
@ -969,13 +1009,20 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_firmware_audit = True
|
do_firmware_audit = True
|
||||||
do_kubernetes_audit = True
|
do_kubernetes_audit = True
|
||||||
do_kube_rootca_update_audit = True
|
do_kube_rootca_update_audit = True
|
||||||
(patch_audit_data, firmware_audit_data,
|
do_software_audit = True
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
(
|
||||||
software_audit_data) = \
|
patch_audit_data,
|
||||||
am._get_audit_data(do_patch_audit,
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = am._get_audit_data(
|
||||||
|
do_patch_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit,
|
||||||
|
)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
wm._audit_subcloud(
|
wm._audit_subcloud(
|
||||||
@ -988,7 +1035,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
||||||
do_firmware_audit=do_firmware_audit,
|
do_firmware_audit=do_firmware_audit,
|
||||||
do_kubernetes_audit=do_kubernetes_audit,
|
do_kubernetes_audit=do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit=do_kube_rootca_update_audit,
|
||||||
|
do_software_audit=do_software_audit)
|
||||||
|
|
||||||
# Verify that the subcloud was updated to offline
|
# Verify that the subcloud was updated to offline
|
||||||
audit_fail_count = 2
|
audit_fail_count = 2
|
||||||
@ -1053,14 +1101,20 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_firmware_audit = True
|
do_firmware_audit = True
|
||||||
do_kubernetes_audit = True
|
do_kubernetes_audit = True
|
||||||
do_kube_rootca_update_audit = True
|
do_kube_rootca_update_audit = True
|
||||||
(patch_audit_data, firmware_audit_data,
|
do_software_audit = True
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
(
|
||||||
software_audit_data
|
patch_audit_data,
|
||||||
) = \
|
firmware_audit_data,
|
||||||
am._get_audit_data(do_patch_audit,
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = am._get_audit_data(
|
||||||
|
do_patch_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit,
|
||||||
|
)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
wm._audit_subcloud(
|
wm._audit_subcloud(
|
||||||
@ -1073,7 +1127,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
do_patch_audit=do_patch_audit, do_load_audit=do_load_audit,
|
||||||
do_firmware_audit=do_firmware_audit,
|
do_firmware_audit=do_firmware_audit,
|
||||||
do_kubernetes_audit=do_kubernetes_audit,
|
do_kubernetes_audit=do_kubernetes_audit,
|
||||||
do_kube_rootca_update_audit=do_kube_rootca_update_audit)
|
do_kube_rootca_update_audit=do_kube_rootca_update_audit,
|
||||||
|
do_software_audit=do_software_audit)
|
||||||
|
|
||||||
# Verify the audit fail count was updated in the DB.
|
# Verify the audit fail count was updated in the DB.
|
||||||
subcloud = db_api.subcloud_get(self.ctx, subcloud.id)
|
subcloud = db_api.subcloud_get(self.ctx, subcloud.id)
|
||||||
@ -1148,7 +1203,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
False, # do_load_audit
|
False, # do_load_audit
|
||||||
False, # do_firmware_audit
|
False, # do_firmware_audit
|
||||||
False, # do_kubernetes_audit
|
False, # do_kubernetes_audit
|
||||||
False) # do_kube_rootca_audit
|
False, # do_kube_rootca_audit
|
||||||
|
False,) # do_software_audit
|
||||||
|
|
||||||
# Verify the subcloud state was not updated
|
# Verify the subcloud state was not updated
|
||||||
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
||||||
@ -1213,7 +1269,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
False, # do_load_audit,
|
False, # do_load_audit,
|
||||||
False, # do_firmware_audit
|
False, # do_firmware_audit
|
||||||
False, # do_kubernetes_audit
|
False, # do_kubernetes_audit
|
||||||
False) # do_kube_rootca_update_audit
|
False, # do_kube_rootca_update_audit
|
||||||
|
False) # do_software_audit
|
||||||
|
|
||||||
# Verify the subcloud state was not updated
|
# Verify the subcloud state was not updated
|
||||||
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
||||||
@ -1277,7 +1334,8 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
False, # do_load_audit
|
False, # do_load_audit
|
||||||
False, # do_firmware_audit
|
False, # do_firmware_audit
|
||||||
False, # do_kubernetes_audit
|
False, # do_kubernetes_audit
|
||||||
False) # do_kube_rootca_update_audit
|
False, # do_kube_rootca_update_audit
|
||||||
|
False) # do_software_audit
|
||||||
|
|
||||||
# Verify the subcloud state was not updated
|
# Verify the subcloud state was not updated
|
||||||
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
self.fake_dcmanager_state_api.update_subcloud_availability.\
|
||||||
@ -1329,13 +1387,21 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_firmware_audit = False
|
do_firmware_audit = False
|
||||||
do_kubernetes_audit = False
|
do_kubernetes_audit = False
|
||||||
do_kube_rootca_audit = False
|
do_kube_rootca_audit = False
|
||||||
(patch_audit_data, firmware_audit_data,
|
do_kube_rootca_update_audit = False
|
||||||
kubernetes_audit_data, kube_rootca_update_audit_data,
|
do_software_audit = False
|
||||||
software_audit_data) = \
|
(
|
||||||
am._get_audit_data(do_patch_audit,
|
patch_audit_data,
|
||||||
|
firmware_audit_data,
|
||||||
|
kubernetes_audit_data,
|
||||||
|
kube_rootca_update_audit_data,
|
||||||
|
software_audit_data,
|
||||||
|
) = am._get_audit_data(
|
||||||
|
do_patch_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_audit)
|
do_kube_rootca_update_audit,
|
||||||
|
do_software_audit,
|
||||||
|
)
|
||||||
# Convert to dict like what would happen calling via RPC
|
# Convert to dict like what would happen calling via RPC
|
||||||
patch_audit_data = patch_audit_data.to_dict()
|
patch_audit_data = patch_audit_data.to_dict()
|
||||||
|
|
||||||
@ -1362,12 +1428,12 @@ class TestAuditWorkerManager(base.DCManagerTestCase):
|
|||||||
do_load_audit,
|
do_load_audit,
|
||||||
do_firmware_audit,
|
do_firmware_audit,
|
||||||
do_kubernetes_audit,
|
do_kubernetes_audit,
|
||||||
do_kube_rootca_audit)
|
do_kube_rootca_audit,
|
||||||
|
do_software_audit)
|
||||||
|
|
||||||
# Verify patch audit is called
|
# Verify patch audit is called
|
||||||
self.fake_patch_audit.subcloud_audit.assert_called_with(
|
self.fake_patch_audit.subcloud_patch_audit.assert_called_with(
|
||||||
subcloud.name, subcloud.region_name, patch_audit_data,
|
subcloud.name, subcloud.region_name, patch_audit_data, do_load_audit)
|
||||||
software_audit_data, do_load_audit)
|
|
||||||
|
|
||||||
# Verify the _update_subcloud_audit_fail_count is not called
|
# Verify the _update_subcloud_audit_fail_count is not called
|
||||||
with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \
|
with mock.patch.object(wm, '_update_subcloud_audit_fail_count') as \
|
||||||
|
@ -1,20 +1,18 @@
|
|||||||
# The order of packages is significant, because pip processes them in the order
|
# The order of packages is significant, because pip processes them in the order
|
||||||
# of appearance. Changing the order has an impact on the overall integration
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
# process, which may cause wedges in the gate later.
|
# process, which may cause wedges in the gate later.
|
||||||
hacking>=1.1.0,<=2.0.0 # Apache-2.0
|
|
||||||
cliff>=2.6.0 # Apache-2.0
|
|
||||||
coverage!=4.4,>=4.0 # Apache-2.0
|
coverage!=4.4,>=4.0 # Apache-2.0
|
||||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
flake8 # MIT
|
||||||
|
hacking>=1.1.0,<=2.0.0 # Apache-2.0
|
||||||
mock>=2.0 # BSD
|
mock>=2.0 # BSD
|
||||||
|
oslotest>=1.10.0 # Apache-2.0
|
||||||
|
pylint==2.14.1 # GPLv2
|
||||||
python-subunit>=0.0.18 # Apache-2.0/BSD
|
python-subunit>=0.0.18 # Apache-2.0/BSD
|
||||||
redfish # BSD
|
redfish # BSD
|
||||||
requests-mock>=1.1 # Apache-2.0
|
requests-mock>=1.1 # Apache-2.0
|
||||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||||
testtools>=1.4.0 # MIT
|
|
||||||
testresources>=0.2.4 # Apache-2.0/BSD
|
testresources>=0.2.4 # Apache-2.0/BSD
|
||||||
testscenarios>=0.4 # Apache-2.0/BSD
|
testscenarios>=0.4 # Apache-2.0/BSD
|
||||||
|
testtools>=1.4.0 # MIT
|
||||||
WebTest>=2.0 # MIT
|
WebTest>=2.0 # MIT
|
||||||
oslotest>=1.10.0 # Apache-2.0
|
|
||||||
pylint==2.14.1;python_version>"3.7" # GPLv2
|
|
||||||
PyYAML>=3.1.0
|
|
||||||
yamllint<1.26.1;python_version>="3.0" # GPLv2
|
|
||||||
|
Loading…
Reference in New Issue
Block a user