Remove force option for k8s rootca update complete/abort
Currently the k8s rootca update complete and abort will be rejected if system has alarms that are not ignored internally. "-f" option can be used to suppress none mgmt_affecting alarms, but mgmt_affecting alarms will still prevent complete and abort. Blocking complete and abort by alarms is not really neccessary since it won't help user to solve the alarms anyway, and at the same time causes confusion to the end users. This update remove alarm checking (and the -force option) from REST API and system CLI. Test Plan: PASS: rootca update complete while system has alarm PASS: rootca update abort at host-update trust-both-cas while system has alarms PASS: rootca update abort at host-update update-certs while system has alarms PASS: rootca update abort at host-update trust-new-ca while system has alarms PASS: rootca update abort at pods-update trust-both-cas while system has alarms Closes-Bug: 1949238 Signed-off-by: Andy Ning <andy.ning@windriver.com> Change-Id: I82b922f39d185990704c591f02781d581822b162
This commit is contained in:
parent
2b44504c65
commit
2ef6c9524c
@ -10955,7 +10955,6 @@ forbidden (403), badMethod (405), overLimit (413)
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"state", "plain", "xsd:string", "The new state to be set in kube_rootca_update object"
|
||||
"force", "query", "xsd:boolean", "A boolean flag indicating if the API should ignore non-management affecting alarms on eventual health checks (the parameter is passed as part of the URL, ie, /v1/kube_rootca_update/?force=True)."
|
||||
|
||||
::
|
||||
|
||||
@ -11027,7 +11026,6 @@ forbidden (403), badMethod (405), overLimit (413)
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"state", "plain", "xsd:string", "The new state to be set in kube_rootca_update object"
|
||||
"force", "query", "xsd:boolean", "A boolean flag indicating if the API should ignore non-management affecting alarms on eventual health checks (the parameter is passed as part of the URL, ie, /v1/kube_rootca_update/?force=True)."
|
||||
|
||||
::
|
||||
|
||||
@ -11073,7 +11071,7 @@ forbidden (403), badMethod (405), overLimit (413)
|
||||
"from_rootca_cert":"d70efa2daaee06f8-70634176318091904949557575469846596498",
|
||||
"updated_at":"2021-08-26T18:25:02.759171+00:00",
|
||||
"capabilities":{},
|
||||
"state":"update-completed",
|
||||
"state":"update-aborted",
|
||||
"id":27
|
||||
}
|
||||
|
||||
|
@ -30,8 +30,8 @@ class KubeRootCAUpdateTest(test_shell.ShellTest):
|
||||
'uuid': '88d31a2d-c82e-429f-a52d-f03f860bb620',
|
||||
'personality': 'fake-personality',
|
||||
'state': 'None',
|
||||
'target_rootca_cert': 'fake-target_rootca_cert',
|
||||
'effective_rootca_cert': 'fake-effective_rootca_cert',
|
||||
'target_rootca_cert': 'fake-target_cert',
|
||||
'effective_rootca_cert': 'fake-effective_cert',
|
||||
'created_at': 'fake-created-time',
|
||||
'updated_at': 'fake-updated-time',
|
||||
}
|
||||
|
@ -88,12 +88,10 @@ class KubeRootCAUpdateManager(base.Manager):
|
||||
|
||||
return self._list(self._path('hosts'), 'kube_host_updates')
|
||||
|
||||
def update_complete(self, patch, force):
|
||||
def update_complete(self, patch):
|
||||
"""Marks the Kubernetes rootca update as complete
|
||||
|
||||
:param patch: a json PATCH document to apply on complete API.
|
||||
:param force: A CLI argument to indicate if the API should ignore
|
||||
minor alarms on eventual health checks.
|
||||
"""
|
||||
|
||||
return self._update(self._path() + '?force=' + str(force), patch)
|
||||
return self._update(self._path(), patch)
|
||||
|
@ -160,10 +160,6 @@ def do_kube_rootca_host_update_list(cc, args):
|
||||
utils.print_list(update_status_list, fields, fields)
|
||||
|
||||
|
||||
@utils.arg('-f', '--force',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="Ignore non management-affecting alarms")
|
||||
def do_kube_rootca_update_complete(cc, args):
|
||||
"""Marks the rootca update as complete"""
|
||||
|
||||
@ -172,14 +168,10 @@ def do_kube_rootca_update_complete(cc, args):
|
||||
'path': '/state',
|
||||
'value': KUBE_ROOTCA_UPDATE_COMPLETED})
|
||||
|
||||
update_status = cc.kube_rootca_update.update_complete(patch, args.force)
|
||||
update_status = cc.kube_rootca_update.update_complete(patch)
|
||||
_print_kube_rootca_update_show(update_status)
|
||||
|
||||
|
||||
@utils.arg('-f', '--force',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="Ignore non management-affecting alarms")
|
||||
def do_kube_rootca_update_abort(cc, args):
|
||||
"""Marks the rootca update as aborted"""
|
||||
|
||||
@ -188,5 +180,5 @@ def do_kube_rootca_update_abort(cc, args):
|
||||
'path': '/state',
|
||||
'value': KUBE_ROOTCA_UPDATE_ABORTED})
|
||||
|
||||
update_status = cc.kube_rootca_update.update_complete(patch, args.force)
|
||||
update_status = cc.kube_rootca_update.update_complete(patch)
|
||||
_print_kube_rootca_update_show(update_status)
|
||||
|
@ -557,12 +557,14 @@ class KubeRootCAUpdateController(rest.RestController):
|
||||
rpc_kube_rootca_update = pecan.request.dbapi.kube_rootca_update_get_list()
|
||||
return KubeRootCAUpdateCollection.convert_with_links(rpc_kube_rootca_update)
|
||||
|
||||
# TODO (aning): move DB state update to conductor and synchronize with
|
||||
# state updating resulting from puppet apply status report using a LOCK.
|
||||
# (eg, sync with report_kube_rootca_update_success())
|
||||
@cutils.synchronized(LOCK_KUBE_ROOTCA_CONTROLLER)
|
||||
@wsme.validate(wtypes.text, [KubeRootCAUpdatePatchType])
|
||||
@wsme.validate([KubeRootCAUpdatePatchType])
|
||||
@wsme_pecan.wsexpose(KubeRootCAUpdate, wtypes.text, body=[KubeRootCAUpdatePatchType])
|
||||
def patch(self, force, patch):
|
||||
def patch(self, patch):
|
||||
"""Completes the kubernetes rootca, clearing both tables and alarm"""
|
||||
force = force == 'True'
|
||||
updates = self._get_updates(patch)
|
||||
update_state = updates['state']
|
||||
|
||||
@ -589,15 +591,6 @@ class KubeRootCAUpdateController(rest.RestController):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"kube-rootca-update-complete rejected: No kubernetes root CA update in progress."))
|
||||
|
||||
self._check_cluster_health(
|
||||
"kube-rootca-update-complete",
|
||||
alarm_ignore_list=[
|
||||
fm_constants.FM_ALARM_ID_KUBE_ROOTCA_UPDATE_IN_PROGRESS,
|
||||
fm_constants.FM_ALARM_ID_KUBE_ROOTCA_UPDATE_AUTO_APPLY_INPROGRESS,
|
||||
fm_constants.FM_ALARM_ID_CERT_EXPIRING_SOON],
|
||||
force=force,
|
||||
)
|
||||
|
||||
rpc_host_update_list = pecan.request.dbapi.kube_rootca_host_update_get_list()
|
||||
hostnames = [host.hostname for host in rpc_host_update_list]
|
||||
|
||||
|
@ -8997,6 +8997,13 @@ class ConductorManager(service.PeriodicService):
|
||||
LOG.info("Kube root CA update phase '%s' succeeded on host: %s"
|
||||
% (reported_cfg, host_uuid))
|
||||
|
||||
# If the update is aborted, don't update anything
|
||||
c_update = self.dbapi.kube_rootca_update_get_one()
|
||||
if c_update.state == kubernetes.KUBE_ROOTCA_UPDATE_ABORTED:
|
||||
LOG.info("Current update has been aborted at host: %s, config: %s"
|
||||
% (host_uuid, reported_cfg))
|
||||
return
|
||||
|
||||
values = {}
|
||||
h_update = self.dbapi.kube_rootca_host_update_get_by_host(host_uuid)
|
||||
|
||||
@ -9028,7 +9035,6 @@ class ConductorManager(service.PeriodicService):
|
||||
matching += 1
|
||||
if matching == len(h_updates):
|
||||
# All hosts are up to date.
|
||||
c_update = self.dbapi.kube_rootca_update_get_one()
|
||||
self.dbapi.kube_rootca_update_update(c_update.id, {'state': state})
|
||||
|
||||
def report_kube_rootca_update_failure(self, host_uuid, reported_cfg,
|
||||
@ -9039,6 +9045,13 @@ class ConductorManager(service.PeriodicService):
|
||||
LOG.info("Kube root CA update phase '%s' failed on host: %s, error: %s"
|
||||
% (reported_cfg, host_uuid, error))
|
||||
|
||||
# If the update is aborted, don't update anything
|
||||
c_update = self.dbapi.kube_rootca_update_get_one()
|
||||
if c_update.state == kubernetes.KUBE_ROOTCA_UPDATE_ABORTED:
|
||||
LOG.info("Current update has been aborted at host: %s, config: %s"
|
||||
% (host_uuid, reported_cfg))
|
||||
return
|
||||
|
||||
if reported_cfg == puppet_common.REPORT_KUBE_CERT_UPDATE_TRUSTBOTHCAS:
|
||||
state = kubernetes.KUBE_ROOTCA_UPDATING_HOST_TRUSTBOTHCAS_FAILED
|
||||
elif reported_cfg == puppet_common.REPORT_KUBE_CERT_UPDATE_UPDATECERTS:
|
||||
@ -9056,7 +9069,6 @@ class ConductorManager(service.PeriodicService):
|
||||
{'state': state})
|
||||
|
||||
# Update cluster 'update state'
|
||||
c_update = self.dbapi.kube_rootca_update_get_one()
|
||||
self.dbapi.kube_rootca_update_update(c_update.id, {'state': state})
|
||||
|
||||
def report_kube_rootca_pods_update_success(self, reported_cfg):
|
||||
@ -9066,6 +9078,13 @@ class ConductorManager(service.PeriodicService):
|
||||
LOG.info("Kube root CA update phase '%s' succeeded for pods"
|
||||
% (reported_cfg))
|
||||
|
||||
# If the update is aborted, don't update anything
|
||||
c_update = self.dbapi.kube_rootca_update_get_one()
|
||||
if c_update.state == kubernetes.KUBE_ROOTCA_UPDATE_ABORTED:
|
||||
LOG.info("Current update has been aborted at config: %s"
|
||||
% (reported_cfg))
|
||||
return
|
||||
|
||||
if reported_cfg == \
|
||||
puppet_common.REPORT_KUBE_CERT_UPDATE_PODS_TRUSTBOTHCAS:
|
||||
state = kubernetes.KUBE_ROOTCA_UPDATED_PODS_TRUSTBOTHCAS
|
||||
@ -9078,7 +9097,6 @@ class ConductorManager(service.PeriodicService):
|
||||
"Not supported reported_cfg: %s" % reported_cfg))
|
||||
|
||||
# Update cluster 'update state'
|
||||
c_update = self.dbapi.kube_rootca_update_get_one()
|
||||
self.dbapi.kube_rootca_update_update(c_update.id, {'state': state})
|
||||
|
||||
def report_kube_rootca_pods_update_failure(self, reported_cfg, error):
|
||||
@ -9088,6 +9106,13 @@ class ConductorManager(service.PeriodicService):
|
||||
LOG.info("Kube root CA update phase '%s' failed for pods, error: %s"
|
||||
% (reported_cfg, error))
|
||||
|
||||
# If the update is aborted, don't update anything
|
||||
c_update = self.dbapi.kube_rootca_update_get_one()
|
||||
if c_update.state == kubernetes.KUBE_ROOTCA_UPDATE_ABORTED:
|
||||
LOG.info("Current update has been aborted at config: %s"
|
||||
% (reported_cfg))
|
||||
return
|
||||
|
||||
if reported_cfg == \
|
||||
puppet_common.REPORT_KUBE_CERT_UPDATE_PODS_TRUSTBOTHCAS:
|
||||
state = kubernetes.KUBE_ROOTCA_UPDATING_PODS_TRUSTBOTHCAS_FAILED
|
||||
@ -9100,7 +9125,6 @@ class ConductorManager(service.PeriodicService):
|
||||
"Not supported reported_cfg: %s" % reported_cfg))
|
||||
|
||||
# Update cluster 'update state'
|
||||
c_update = self.dbapi.kube_rootca_update_get_one()
|
||||
self.dbapi.kube_rootca_update_update(c_update.id, {'state': state})
|
||||
|
||||
def create_controller_filesystems(self, context, rootfs_device):
|
||||
|
@ -371,7 +371,7 @@ class TestKubeRootCAUpdateComplete(TestKubeRootCAUpdate,
|
||||
patch = [{'op': 'replace',
|
||||
'path': '/state',
|
||||
'value': 'update-completed'}]
|
||||
result = self.patch_json(self.url + '?force=False', patch)
|
||||
result = self.patch_json(self.url, patch)
|
||||
result = result.json
|
||||
|
||||
self.assertEqual(result['state'], kubernetes.KUBE_ROOTCA_UPDATE_COMPLETED)
|
||||
@ -388,7 +388,7 @@ class TestKubeRootCAUpdateComplete(TestKubeRootCAUpdate,
|
||||
host_updates = self.dbapi.kube_rootca_host_update_get_list()
|
||||
self.assertEqual(len(host_updates), 0)
|
||||
|
||||
def test_update_complete_force_pods_trustnewca_update_exists(self):
|
||||
def test_update_abort_pods_trustnewca_update_exists(self):
|
||||
dbutils.create_test_kube_rootca_update(state=kubernetes.KUBE_ROOTCA_UPDATED_PODS_TRUSTNEWCA)
|
||||
dbutils.create_test_kube_rootca_host_update(host_id=self.host.id,
|
||||
state=kubernetes.KUBE_ROOTCA_UPDATED_PODS_TRUSTNEWCA)
|
||||
@ -398,7 +398,7 @@ class TestKubeRootCAUpdateComplete(TestKubeRootCAUpdate,
|
||||
patch = [{'op': 'replace',
|
||||
'path': '/state',
|
||||
'value': 'update-aborted'}]
|
||||
result = self.patch_json(self.url + '?force=False', patch)
|
||||
result = self.patch_json(self.url, patch)
|
||||
result = result.json
|
||||
|
||||
self.assertEqual(result['state'], kubernetes.KUBE_ROOTCA_UPDATE_COMPLETED)
|
||||
@ -415,7 +415,7 @@ class TestKubeRootCAUpdateComplete(TestKubeRootCAUpdate,
|
||||
host_updates = self.dbapi.kube_rootca_host_update_get_list()
|
||||
self.assertEqual(len(host_updates), 0)
|
||||
|
||||
def test_update_complete_force_hosts_updatecerts_trustnewca_update_exists(self):
|
||||
def test_update_abort_hosts_updatecerts_trustnewca_update_exists(self):
|
||||
dbutils.create_test_kube_rootca_update(state=kubernetes.KUBE_ROOTCA_UPDATING_HOST_TRUSTNEWCA)
|
||||
dbutils.create_test_kube_rootca_host_update(host_id=self.host.id,
|
||||
state=kubernetes.KUBE_ROOTCA_UPDATED_HOST_UPDATECERTS)
|
||||
@ -425,7 +425,7 @@ class TestKubeRootCAUpdateComplete(TestKubeRootCAUpdate,
|
||||
patch = [{'op': 'replace',
|
||||
'path': '/state',
|
||||
'value': 'update-aborted'}]
|
||||
result = self.patch_json(self.url + '?force=False', patch)
|
||||
result = self.patch_json(self.url, patch)
|
||||
result = result.json
|
||||
|
||||
self.assertEqual(result['state'], kubernetes.KUBE_ROOTCA_UPDATE_ABORTED)
|
||||
@ -441,34 +441,11 @@ class TestKubeRootCAUpdateComplete(TestKubeRootCAUpdate,
|
||||
host_list = self.dbapi.kube_rootca_host_update_get_list()
|
||||
self.assertEqual(len(host_list), 0)
|
||||
|
||||
def test_update_complete_invalid_health(self):
|
||||
dbutils.create_test_kube_rootca_update(state=kubernetes.KUBE_ROOTCA_UPDATED_PODS_TRUSTNEWCA)
|
||||
dbutils.create_test_kube_rootca_host_update(host_id=self.host.id,
|
||||
state=kubernetes.KUBE_ROOTCA_UPDATED_PODS_TRUSTNEWCA)
|
||||
dbutils.create_test_kube_rootca_host_update(host_id=self.host2.id,
|
||||
state=kubernetes.KUBE_ROOTCA_UPDATED_PODS_TRUSTNEWCA)
|
||||
self.fake_fm_client.alarm.list.return_value = [FAKE_MGMT_ALARM]
|
||||
|
||||
patch = [{'op': 'replace',
|
||||
'path': '/state',
|
||||
'value': 'update-completed'}]
|
||||
|
||||
result = self.patch_json(self.url + '?force=False', patch, expect_errors=True)
|
||||
|
||||
self.assertEqual(result.status_int, http_client.BAD_REQUEST)
|
||||
self.assertIn("System is not healthy. Run system health-query for more details.",
|
||||
result.json['error_message'])
|
||||
# Checks that DB is unmodified
|
||||
update_entry = self.dbapi.kube_rootca_update_get_one()
|
||||
self.assertNotEqual(update_entry, None)
|
||||
host_update_list = self.dbapi.kube_rootca_host_update_get_list()
|
||||
self.assertEqual(len(host_update_list), 2)
|
||||
|
||||
def test_update_complete_no_update(self):
|
||||
patch = [{'op': 'replace',
|
||||
'path': '/state',
|
||||
'value': 'update-completed'}]
|
||||
result = self.patch_json(self.url + '?force=False', patch, expect_errors=True)
|
||||
result = self.patch_json(self.url, patch, expect_errors=True)
|
||||
|
||||
self.assertEqual(result.status_int, http_client.BAD_REQUEST)
|
||||
self.assertIn("kube-rootca-update-complete rejected: No kubernetes root CA update in progress.",
|
||||
@ -480,7 +457,7 @@ class TestKubeRootCAUpdateComplete(TestKubeRootCAUpdate,
|
||||
patch = [{'op': 'replace',
|
||||
'path': '/state',
|
||||
'value': 'update-completed'}]
|
||||
result = self.patch_json(self.url + '?force=False', patch, expect_errors=True)
|
||||
result = self.patch_json(self.url, patch, expect_errors=True)
|
||||
|
||||
self.assertEqual(result.status_int, http_client.BAD_REQUEST)
|
||||
self.assertIn("kube-rootca-update-complete rejected: Expect to find cluster update"
|
||||
|
Loading…
x
Reference in New Issue
Block a user