Audit local registry secret info when there is user update in keystone
local registry uses admin's username&password for authentication.
And admin's password could be changed by openstack client cmd. It will
cause auth info in secrets obsolete, and lead to invalid authentication
in keystone.
To keep secrets info updated, keystone event notification is enabled.
And event notification listener is added in sysinv. So when there is
user password change, a user update event will be sent out by keystone.
And sysinv will call function audit_local_registry_secrets to check
whether kubernetes secret info need be updated or not.
A periodic task is added also to ensure secrets are always synced, in
case notification is missed or there is failure in handle notification.
oslo_messaging is added to tox's requirements.txt to avoid tox failure.
The version is based on global-requirements.txt from Openstack Train.
Test:
Pass deployment and secrets could be updated automatically with new auth
info.
Pass host-swact in duplex mode.
We lack of info how LP1853093 was triggered by the user, but this patch
can address the issue that local registry secrets are not updated
accordingly after the password of "admin" is changed.
And this fix will help technically.
Closes-Bug: 1853017
Closes-Bug: 1853093
Depends-On: https://review.opendev.org/707522
Depends-On: https://review.opendev.org/707523
Change-Id: I959b65288e0834b989aa87e40506e41d0bba0d59
Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
(cherry picked from commit 8ab1e2d7c6
)
This commit is contained in:
parent
93235a6667
commit
7e5e887eb3
|
@ -16,6 +16,7 @@ iso8601>=0.1.4
|
||||||
oslo.config>=3.7.0 # Apache-2.0
|
oslo.config>=3.7.0 # Apache-2.0
|
||||||
oslo.concurrency>=3.7.1 # Apache-2.0
|
oslo.concurrency>=3.7.1 # Apache-2.0
|
||||||
oslo.db>=4.1.0 # Apache-2.0
|
oslo.db>=4.1.0 # Apache-2.0
|
||||||
|
oslo.messaging!=9.0.0 # Apache-2.0
|
||||||
oslo.service>=1.10.0 # Apache-2.0
|
oslo.service>=1.10.0 # Apache-2.0
|
||||||
oslo.utils>=3.5.0 # Apache-2.0
|
oslo.utils>=3.5.0 # Apache-2.0
|
||||||
oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0
|
oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0
|
||||||
|
|
|
@ -118,14 +118,22 @@ class KubeOperator(object):
|
||||||
"kube_get_namespace %s: %s" % (namespace, e))
|
"kube_get_namespace %s: %s" % (namespace, e))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def kube_get_namespace_name_list(self):
|
||||||
|
c = self._get_kubernetesclient_core()
|
||||||
|
try:
|
||||||
|
ns_list = c.list_namespace()
|
||||||
|
return list(set(ns.metadata.name for ns in ns_list.items))
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to get Namespace list: %s" % e)
|
||||||
|
raise
|
||||||
|
|
||||||
def kube_get_secret(self, name, namespace):
|
def kube_get_secret(self, name, namespace):
|
||||||
c = self._get_kubernetesclient_core()
|
c = self._get_kubernetesclient_core()
|
||||||
try:
|
try:
|
||||||
c.read_namespaced_secret(name, namespace)
|
return c.read_namespaced_secret(name, namespace)
|
||||||
return True
|
|
||||||
except ApiException as e:
|
except ApiException as e:
|
||||||
if e.status == httplib.NOT_FOUND:
|
if e.status == httplib.NOT_FOUND:
|
||||||
return False
|
return None
|
||||||
else:
|
else:
|
||||||
LOG.error("Failed to get Secret %s under "
|
LOG.error("Failed to get Secret %s under "
|
||||||
"Namespace %s: %s" % (name, namespace, e.body))
|
"Namespace %s: %s" % (name, namespace, e.body))
|
||||||
|
@ -154,6 +162,15 @@ class KubeOperator(object):
|
||||||
"%s: %s" % (name, src_namespace, dst_namespace, e))
|
"%s: %s" % (name, src_namespace, dst_namespace, e))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def kube_patch_secret(self, name, namespace, body):
|
||||||
|
c = self._get_kubernetesclient_core()
|
||||||
|
try:
|
||||||
|
c.patch_namespaced_secret(name, namespace, body)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to patch Secret %s under Namespace %s: "
|
||||||
|
"%s" % (name, namespace, e))
|
||||||
|
raise
|
||||||
|
|
||||||
def kube_delete_persistent_volume_claim(self, namespace, **kwargs):
|
def kube_delete_persistent_volume_claim(self, namespace, **kwargs):
|
||||||
c = self._get_kubernetesclient_core()
|
c = self._get_kubernetesclient_core()
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -0,0 +1,88 @@
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# Copyright (C) 2019 Intel Corporation
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
Sysinv Keystone notification listener.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import keyring
|
||||||
|
import oslo_messaging
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
from sysinv.common import constants
|
||||||
|
from sysinv.common import utils
|
||||||
|
from sysinv.db import api as dbapi
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
kube_app = None
|
||||||
|
|
||||||
|
|
||||||
|
class NotificationEndpoint(object):
|
||||||
|
"""Task which exposes the API for consuming priority based notifications.
|
||||||
|
|
||||||
|
The Oslo notification framework delivers notifications based on priority to
|
||||||
|
matching callback APIs as defined in its notification listener endpoint
|
||||||
|
list.
|
||||||
|
|
||||||
|
Currently from Keystone perspective, `info` API is sufficient as Keystone
|
||||||
|
send notifications at `info` priority ONLY. Other priority level APIs
|
||||||
|
(warn, error, critical, audit, debug) are not needed here.
|
||||||
|
"""
|
||||||
|
filter_rule = oslo_messaging.NotificationFilter(
|
||||||
|
event_type='identity.user.updated')
|
||||||
|
|
||||||
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
|
"""Receives notification at info level."""
|
||||||
|
global kube_app
|
||||||
|
kube_app.audit_local_registry_secrets()
|
||||||
|
return oslo_messaging.NotificationResult.HANDLED
|
||||||
|
|
||||||
|
|
||||||
|
def get_transport_url():
|
||||||
|
try:
|
||||||
|
db_api = dbapi.get_instance()
|
||||||
|
address = db_api.address_get_by_name(
|
||||||
|
utils.format_address_name(constants.CONTROLLER_HOSTNAME,
|
||||||
|
constants.NETWORK_TYPE_MGMT)
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to get management IP address: %s" % str(e))
|
||||||
|
return None
|
||||||
|
|
||||||
|
auth_password = keyring.get_password('amqp', 'rabbit')
|
||||||
|
|
||||||
|
transport_url = "rabbit://guest:%s@%s:5672" % (auth_password, address.address)
|
||||||
|
return transport_url
|
||||||
|
|
||||||
|
|
||||||
|
def start_keystone_listener(app):
|
||||||
|
|
||||||
|
global kube_app
|
||||||
|
kube_app = app
|
||||||
|
|
||||||
|
conf = cfg.ConfigOpts()
|
||||||
|
conf.transport_url = get_transport_url()
|
||||||
|
|
||||||
|
if conf.transport_url is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
transport = oslo_messaging.get_rpc_transport(conf)
|
||||||
|
targets = [
|
||||||
|
oslo_messaging.Target(exchange='keystone', topic='notifications', fanout=True),
|
||||||
|
]
|
||||||
|
endpoints = [
|
||||||
|
NotificationEndpoint(),
|
||||||
|
]
|
||||||
|
|
||||||
|
pool = "sysinv-keystone-listener-workers"
|
||||||
|
server = oslo_messaging.get_notification_listener(transport, targets,
|
||||||
|
endpoints, pool=pool)
|
||||||
|
LOG.info("Sysinv keystone listener started!")
|
||||||
|
server.start()
|
||||||
|
server.wait()
|
|
@ -960,7 +960,7 @@ class AppOperator(object):
|
||||||
for ns in namespaces:
|
for ns in namespaces:
|
||||||
if (ns in [common.HELM_NS_HELM_TOOLKIT,
|
if (ns in [common.HELM_NS_HELM_TOOLKIT,
|
||||||
common.HELM_NS_STORAGE_PROVISIONER] or
|
common.HELM_NS_STORAGE_PROVISIONER] or
|
||||||
self._kube.kube_get_secret(pool_secret, ns)):
|
self._kube.kube_get_secret(pool_secret, ns) is not None):
|
||||||
# Secret already exist
|
# Secret already exist
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -1026,7 +1026,7 @@ class AppOperator(object):
|
||||||
list(set([ns for ns_list in app_ns.values() for ns in ns_list]))
|
list(set([ns for ns_list in app_ns.values() for ns in ns_list]))
|
||||||
for ns in namespaces:
|
for ns in namespaces:
|
||||||
if (ns == common.HELM_NS_HELM_TOOLKIT or
|
if (ns == common.HELM_NS_HELM_TOOLKIT or
|
||||||
self._kube.kube_get_secret(DOCKER_REGISTRY_SECRET, ns)):
|
self._kube.kube_get_secret(DOCKER_REGISTRY_SECRET, ns) is not None):
|
||||||
# Secret already exist
|
# Secret already exist
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -1077,6 +1077,75 @@ class AppOperator(object):
|
||||||
LOG.error(e)
|
LOG.error(e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def audit_local_registry_secrets(self):
|
||||||
|
"""
|
||||||
|
local registry uses admin's username&password for authentication.
|
||||||
|
K8s stores the authentication info in secrets in order to access
|
||||||
|
local registry, while admin's password is saved in keyring.
|
||||||
|
Admin's password could be changed by openstack client cmd outside of
|
||||||
|
sysinv and K8s. It will cause info mismatch between keyring and
|
||||||
|
k8s's secrets, and leads to authentication failure.
|
||||||
|
There are two ways to keep k8s's secrets updated with data in keyring:
|
||||||
|
1. Polling. Use a periodic task to sync info from keyring to secrets.
|
||||||
|
2. Notification. Keystone send out notification when there is password
|
||||||
|
update, and notification receiver to do the data sync.
|
||||||
|
To ensure k8s's secrets are timely and always synced with keyring, both
|
||||||
|
methods are used here. And this function will be called in both cases
|
||||||
|
to audit password info between keyring and default-registry-key, and
|
||||||
|
update keyring's password to all local registry secrets if need.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Use lock to synchronize call from timer and notification
|
||||||
|
lock_name = "AUDIT_LOCAL_REGISTRY_SECRETS"
|
||||||
|
|
||||||
|
@cutils.synchronized(lock_name, external=False)
|
||||||
|
def _sync_audit_local_registry_secrets(self):
|
||||||
|
try:
|
||||||
|
secret = self._kube.kube_get_secret("default-registry-key", "kube-system")
|
||||||
|
if secret is None:
|
||||||
|
return
|
||||||
|
secret_auth_body = base64.b64decode(secret.data['.dockerconfigjson'])
|
||||||
|
secret_auth_info = (secret_auth_body.split('auth":')[1]).split('"')[1]
|
||||||
|
registry_auth = get_local_docker_registry_auth()
|
||||||
|
registry_auth_info = '{0}:{1}'.format(registry_auth['username'],
|
||||||
|
registry_auth['password'])
|
||||||
|
if secret_auth_info == base64.b64encode(registry_auth_info):
|
||||||
|
LOG.debug("Auth info is the same, no update is needed for k8s secret.")
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error(e)
|
||||||
|
return
|
||||||
|
|
||||||
|
# update "default-registry-key" secret info under all namespaces
|
||||||
|
try:
|
||||||
|
# update secret with new auth info
|
||||||
|
token = '{{\"auths\": {{\"{0}\": {{\"auth\": \"{1}\"}}}}}}'.format(
|
||||||
|
constants.DOCKER_REGISTRY_SERVER, base64.b64encode(registry_auth_info))
|
||||||
|
secret.data['.dockerconfigjson'] = base64.b64encode(token)
|
||||||
|
|
||||||
|
ns_list = self._kube.kube_get_namespace_name_list()
|
||||||
|
for ns in ns_list:
|
||||||
|
secret = self._kube.kube_get_secret(DOCKER_REGISTRY_SECRET, ns)
|
||||||
|
if secret is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
secret_auth_body = base64.b64decode(secret.data['.dockerconfigjson'])
|
||||||
|
if constants.DOCKER_REGISTRY_SERVER in secret_auth_body:
|
||||||
|
secret.data['.dockerconfigjson'] = base64.b64encode(token)
|
||||||
|
self._kube.kube_patch_secret(DOCKER_REGISTRY_SECRET, ns, secret)
|
||||||
|
LOG.info("Secret %s under Namespace %s is updated"
|
||||||
|
% (DOCKER_REGISTRY_SECRET, ns))
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to update Secret %s under Namespace %s: %s"
|
||||||
|
% (DOCKER_REGISTRY_SECRET, ns, e))
|
||||||
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error(e)
|
||||||
|
return
|
||||||
|
|
||||||
|
_sync_audit_local_registry_secrets(self)
|
||||||
|
|
||||||
def _delete_namespace(self, namespace):
|
def _delete_namespace(self, namespace):
|
||||||
loop_timeout = 1
|
loop_timeout = 1
|
||||||
timeout = 300
|
timeout = 300
|
||||||
|
|
|
@ -88,6 +88,7 @@ from sysinv.conductor import ceph as iceph
|
||||||
from sysinv.conductor import kube_app
|
from sysinv.conductor import kube_app
|
||||||
from sysinv.conductor import openstack
|
from sysinv.conductor import openstack
|
||||||
from sysinv.conductor import docker_registry
|
from sysinv.conductor import docker_registry
|
||||||
|
from sysinv.conductor import keystone_listener
|
||||||
from sysinv.db import api as dbapi
|
from sysinv.db import api as dbapi
|
||||||
from sysinv.objects import base as objects_base
|
from sysinv.objects import base as objects_base
|
||||||
from sysinv.objects import kube_app as kubeapp_obj
|
from sysinv.objects import kube_app as kubeapp_obj
|
||||||
|
@ -191,6 +192,10 @@ class ConductorManager(service.PeriodicService):
|
||||||
# initializing conductor manager service
|
# initializing conductor manager service
|
||||||
super(ConductorManager, self).start()
|
super(ConductorManager, self).start()
|
||||||
|
|
||||||
|
# monitor keystone user update event to check whether admin password is
|
||||||
|
# changed or not. If changed, then sync it to kubernetes's secret info.
|
||||||
|
greenthread.spawn(keystone_listener.start_keystone_listener, self._app)
|
||||||
|
|
||||||
def _start(self):
|
def _start(self):
|
||||||
self.dbapi = dbapi.get_instance()
|
self.dbapi = dbapi.get_instance()
|
||||||
self.fm_api = fm_api.FaultAPIs()
|
self.fm_api = fm_api.FaultAPIs()
|
||||||
|
@ -4773,6 +4778,13 @@ class ConductorManager(service.PeriodicService):
|
||||||
'install_state_info':
|
'install_state_info':
|
||||||
host.install_state_info})
|
host.install_state_info})
|
||||||
|
|
||||||
|
@periodic_task.periodic_task(spacing=CONF.conductor.audit_interval)
|
||||||
|
def _kubernetes_local_secrets_audit(self, context):
|
||||||
|
# Audit kubernetes local registry secrets info
|
||||||
|
LOG.debug("Sysinv Conductor running periodic audit task for k8s local registry secrets.")
|
||||||
|
if self._app:
|
||||||
|
self._app.audit_local_registry_secrets()
|
||||||
|
|
||||||
@periodic_task.periodic_task(spacing=CONF.conductor.audit_interval)
|
@periodic_task.periodic_task(spacing=CONF.conductor.audit_interval)
|
||||||
def _conductor_audit(self, context):
|
def _conductor_audit(self, context):
|
||||||
# periodically, perform audit of inventory
|
# periodically, perform audit of inventory
|
||||||
|
|
Loading…
Reference in New Issue