Add job for cluster tests

Remove test that creates cluster with zero nodes,
it was failing for long time

Depends-On: https://review.opendev.org/c/openstack/magnum/+/893823
Depends-On: https://review.opendev.org/c/openstack/magnum/+/905357

Change-Id: I6bc2fbb78241765ac5f1c67675e152d0691b8d23
This commit is contained in:
Michal Nasiadka 2023-08-30 09:00:25 +02:00
parent 4320429c89
commit 2fe075ae78
11 changed files with 247 additions and 86 deletions

View File

@ -10,6 +10,7 @@
- magnum-tempest-plugin-tests-api-2023-1
- magnum-tempest-plugin-tests-api-zed
- magnum-tempest-plugin-tests-api-yoga
- magnum-tempest-plugin-tests-cluster-k8s_fcos_v1
gate:
jobs:
- magnum-tempest-plugin-tests-api
@ -45,6 +46,10 @@
enforce_scope:
magnum: true
- job:
name: magnum-tempest-plugin-tests-cluster-k8s_fcos_v1
parent: magnum-tempest-plugin-tests-cluster
- job:
name: magnum-tempest-plugin-tests-api
parent: magnum-tempest-plugin-base
@ -56,15 +61,37 @@
test-config:
$TEMPEST_CONFIG:
magnum:
image_id: fedora-coreos-31.20200323.3.2-openstack.x86_64
image_id: fedora-coreos-38.20230806.3.0-openstack.x86_64
nic_id: public
keypair_id: default
flavor_id: ds2G
master_flavor_id: ds2G
copy_logs: true
- job:
name: magnum-tempest-plugin-tests-cluster
parent: magnum-tempest-plugin-base
nodeset: magnum-nested-virt-ubuntu-jammy
vars:
configure_swap_size: 8192
tox_envlist: all
tempest_test_regex: ^magnum_tempest_plugin.tests.api.v1.test_cluster.ClusterTest.test_create_list_sign_delete_clusters
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
magnum:
image_id: fedora-coreos-38.20230806.3.0-openstack.x86_64
nic_id: public
keypair_id: default
flavor_id: ds2G
master_flavor_id: ds2G
copy_logs: true
copy_logs_sucess: true
devstack_localrc:
# NOTE: extend default glance limit from 1GB
GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 5000
LIBVIRT_TYPE: kvm
LIBVIRT_CPU_MODE: host-passthrough
zuul_copy_output:
/tmp/magnum-nodes: logs
- job:
name: magnum-tempest-plugin-base
@ -72,22 +99,23 @@
Magnum functional tests base layer
parent: devstack-tempest
required-projects:
- openstack/magnum
- openstack/python-magnumclient
- openstack/heat
- openstack/barbican
- openstack/heat
- openstack/magnum
- openstack/magnum-tempest-plugin
- openstack/python-magnumclient
vars:
tempest_plugins:
- magnum-tempest-plugin
devstack_localrc:
USE_PYTHON3: true
MAGNUM_GUEST_IMAGE_URL: https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/31.20200323.3.2/x86_64/fedora-coreos-31.20200323.3.2-openstack.x86_64.qcow2.xz
MAGNUM_IMAGE_NAME: fedora-coreos-31.20200323.3.2-openstack.x86_64
GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 5000
MAGNUM_GUEST_IMAGE_URL: https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/38.20230806.3.0/x86_64/fedora-coreos-38.20230806.3.0-openstack.x86_64.qcow2.xz
MAGNUM_IMAGE_NAME: fedora-coreos-38.20230806.3.0-openstack.x86_64
devstack_plugins:
magnum: https://opendev.org/openstack/magnum
heat: https://opendev.org/openstack/heat
barbican: https://opendev.org/openstack/barbican
magnum: https://opendev.org/openstack/magnum
devstack_services:
# Disable swift and dependent c-bak service to support upload of .qcow2.xz image in the gate
s-account: false
@ -103,3 +131,14 @@
- ^install-guide/.*$
- ^releasenotes/.*$
- ^dockerfiles/.*$
- nodeset:
name: magnum-nested-virt-ubuntu-jammy
nodes:
- name: controller
label: nested-virt-ubuntu-jammy
groups:
- name: tempest
nodes:
- controller

View File

@ -22,6 +22,7 @@ import magnum_tempest_plugin
CONF = config.CONF
COPY_LOG_HELPER = "magnum_tempest_plugin/tests/contrib/copy_instance_logs.sh"
COPY_PODLOG_HELPER = "magnum_tempest_plugin/tests/contrib/copy_pod_logs.sh"
class BaseMagnumTest(test.BaseTestCase):
@ -55,11 +56,9 @@ class BaseMagnumTest(test.BaseTestCase):
"""
def int_copy_logs():
try:
cls.LOG.info("Copying logs...")
func_name = "test"
msg = ("Failed to copy logs for cluster")
nodes_addresses = get_nodes_fn()
master_nodes = nodes_addresses[0]
slave_nodes = nodes_addresses[1]
@ -92,13 +91,33 @@ class BaseMagnumTest(test.BaseTestCase):
"to %(base_path)s%(log_name)s-"
"%(node_address)s" %
{'node_address': node_address,
'base_path': "/opt/stack/logs/cluster-nodes/",
'base_path': "/opt/stack/logs/magnum-nodes/",
'log_name': log_name})
cls.LOG.exception(msg)
raise
do_copy_logs('master', master_nodes)
do_copy_logs('node', slave_nodes)
except Exception:
cls.LOG.exception(msg)
raise
return int_copy_logs
return int_copy_logs()
@classmethod
def copy_pod_logs(cls):
"""Copy pod logs
This method will retrieve all pod logs using bash script,
expects a kube.config file under /tmp/magnum-nodes/
"""
base_path = os.path.split(os.path.dirname(
os.path.abspath(magnum_tempest_plugin.__file__)))[0]
full_location = os.path.join(base_path, COPY_PODLOG_HELPER)
try:
cls.LOG.debug("running %s", full_location)
subprocess.check_call([full_location])
except Exception as e:
cls.LOG.exception(e)
raise

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import ast
import warnings
from tempest import config
@ -137,9 +138,8 @@ class Config(object):
@classmethod
def set_copy_logs(cls, config):
if 'copy_logs' not in CONF.magnum:
cls.copy_logs = True
cls.copy_logs = str(CONF.magnum.copy_logs).lower() == 'true'
cls.copy_logs = CONF.magnum.get('copy_logs', True)
cls.copy_logs_success = CONF.magnum.get('copy_logs_success', True)
@classmethod
def set_coe(cls, config):
@ -157,6 +157,10 @@ class Config(object):
def set_cluster_creation_timeout(cls, config):
cls.cluster_creation_timeout = CONF.magnum.cluster_creation_timeout
@classmethod
def set_labels(cls, config):
cls.labels = ast.literal_eval(CONF.magnum.labels)
@classmethod
def setUp(cls):
cls.set_admin_creds(config)
@ -180,3 +184,4 @@ class Config(object):
cls.set_network_driver(config)
cls.set_cluster_template_id(config)
cls.set_cluster_creation_timeout(config)
cls.set_labels(config)

View File

@ -270,9 +270,10 @@ def valid_cluster_template(is_public=False):
external_network_id=config.Config.nic_id,
http_proxy=None, https_proxy=None,
no_proxy=None, network_driver=config.Config.network_driver,
volume_driver=None, labels={},
volume_driver=None,
docker_storage_driver=config.Config.docker_storage_driver,
tls_disabled=False)
tls_disabled=False,
labels=config.Config.labels)
def cluster_data(name=data_utils.rand_name('cluster'),

View File

@ -12,15 +12,20 @@
import fixtures
import kubernetes
from kubernetes.client.rest import ApiException
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_utils import uuidutils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
import testtools
from yaml import safe_load
from magnum_tempest_plugin.common import config
from magnum_tempest_plugin.common import datagen
from magnum_tempest_plugin.common import utils
from magnum_tempest_plugin.tests.api import base
@ -109,17 +114,30 @@ class ClusterTestTemplate(base.BaseTempestTest):
self.assertTrue(uuidutils.is_uuid_like(model.uuid))
self.clusters.append(model.uuid)
self.cluster_uuid = model.uuid
if config.Config.copy_logs:
self.addCleanup(self.copy_logs_handler(
timeout = (config.Config.cluster_creation_timeout + 10) * 60
try:
self.cluster_admin_client.wait_for_created_cluster(
model.uuid, delete_on_error=False, timeout=timeout)
except Exception as e:
self.LOG.debug("Cluster create exception: %s\n" % e)
self.copy_logs_handler(
lambda: list(
[self._get_cluster_by_id(model.uuid)[1].master_addresses,
self._get_cluster_by_id(model.uuid)[1].node_addresses]),
self.cluster_template.coe,
self.keypair))
self.keypair)
raise
if config.Config.copy_logs_success:
self.LOG.debug('Copying logs on success')
self.copy_logs_handler(
lambda: list(
[self._get_cluster_by_id(model.uuid)[1].master_addresses,
self._get_cluster_by_id(model.uuid)[1].node_addresses]),
self.cluster_template.coe,
self.keypair)
timeout = config.Config.cluster_creation_timeout * 60
self.cluster_admin_client.wait_for_created_cluster(
model.uuid, delete_on_error=False, timeout=timeout)
return resp, model
def _delete_cluster(self, cluster_id):
@ -152,6 +170,8 @@ class ClusterTestTemplate(base.BaseTempestTest):
_, cluster_model = self._create_cluster(gen_model)
self.assertNotIn('status', cluster_model)
_, cluster_model = self._get_cluster_by_id(cluster_model.uuid)
# test cluster list
resp, cluster_list_model = self.cluster_reader_client.list_clusters()
self.assertEqual(200, resp.status)
@ -161,32 +181,19 @@ class ClusterTestTemplate(base.BaseTempestTest):
for x in cluster_list_model.clusters]))
# test ca show
resp, cert_model = self.cert_reader_client.get_cert(
resp, ca = self.cert_reader_client.get_cert(
cluster_model.uuid, headers=HEADERS)
self.LOG.debug("cert resp: %s", resp)
self.assertEqual(200, resp.status)
self.assertEqual(cert_model.cluster_uuid, cluster_model.uuid)
self.assertIsNotNone(cert_model.pem)
self.assertIn('-----BEGIN CERTIFICATE-----', cert_model.pem)
self.assertIn('-----END CERTIFICATE-----', cert_model.pem)
self.assertEqual(ca.cluster_uuid, cluster_model.uuid)
self.assertIsNotNone(ca.pem)
self.assertIn('-----BEGIN CERTIFICATE-----', ca.pem)
self.assertIn('-----END CERTIFICATE-----', ca.pem)
# test ca sign
csr_sample = """-----BEGIN CERTIFICATE REQUEST-----
MIIByjCCATMCAQAwgYkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlh
MRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMR8w
HQYDVQQLExZJbmZvcm1hdGlvbiBUZWNobm9sb2d5MRcwFQYDVQQDEw53d3cuZ29v
Z2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEApZtYJCHJ4VpVXHfV
IlstQTlO4qC03hjX+ZkPyvdYd1Q4+qbAeTwXmCUKYHThVRd5aXSqlPzyIBwieMZr
WFlRQddZ1IzXAlVRDWwAo60KecqeAXnnUK+5fXoTI/UgWshre8tJ+x/TMHaQKR/J
cIWPhqaQhsJuzZbvAdGA80BLxdMCAwEAAaAAMA0GCSqGSIb3DQEBBQUAA4GBAIhl
4PvFq+e7ipARgI5ZM+GZx6mpCz44DTo0JkwfRDf+BtrsaC0q68eTf2XhYOsq4fkH
Q0uA0aVog3f5iJxCa3Hp5gxbJQ6zV6kJ0TEsuaaOhEko9sdpCoPOnRBm2i/XRD2D
6iNh8f8z0ShGsFqjDgFHyF3o+lUyj+UC6H1QW7bn
-----END CERTIFICATE REQUEST-----
"""
csr_sample = utils.generate_csr_and_key()
cert_data_model = datagen.cert_data(cluster_model.uuid,
csr_data=csr_sample)
csr_data=csr_sample['csr'])
resp, cert_model = self.cert_member_client.post_cert(
cert_data_model, headers=HEADERS)
self.LOG.debug("cert resp: %s", resp)
@ -196,6 +203,60 @@ Q0uA0aVog3f5iJxCa3Hp5gxbJQ6zV6kJ0TEsuaaOhEko9sdpCoPOnRBm2i/XRD2D
self.assertIn('-----BEGIN CERTIFICATE-----', cert_model.pem)
self.assertIn('-----END CERTIFICATE-----', cert_model.pem)
# test Kubernetes API
kube_cfg = """
---
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {ca}
server: {api_address}
name: {name}
contexts:
- context:
cluster: {name}
user: admin
name: default
current-context: default
kind: Config
preferences: {{}}
users:
- name: admin
user:
client-certificate-data: {cert}
client-key-data: {key}
""".format(name=cluster_model.name,
api_address=cluster_model.api_address,
key=base64.encode_as_text(csr_sample['private_key']),
cert=base64.encode_as_text(cert_model.pem),
ca=base64.encode_as_text(ca.pem))
kube_cfg = safe_load(kube_cfg)
kube_config = kubernetes.config.load_kube_config_from_dict(kube_cfg)
# Get nodes and pods list using kubernetes python client
with kubernetes.client.ApiClient(kube_config) as api_client:
v1 = kubernetes.client.CoreV1Api(api_client)
try:
list_nodes = v1.list_node(pretty="true")
with open("/tmp/magnum-nodes/api-list-nodes", "w") as outfile:
outfile.write(str(list_nodes))
list_pods = v1.list_pod_for_all_namespaces(pretty="true")
with open("/tmp/magnum-nodes/api-list-pods", "w") as outfile:
outfile.write(str(list_pods))
except ApiException as e:
print("Exception when calling CoreV1Api: %s\n" % e)
raise
# Get nodes and pods using kubectl
with open("/tmp/magnum-nodes/kube.conf", "w") as outfile:
outfile.write(str(kube_cfg))
try:
self.copy_pod_logs()
except Exception as e:
self.LOG.debug("kubectl exception: %s\n" % e)
raise
# test cluster delete
self._delete_cluster(cluster_model.uuid)
self.clusters.remove(cluster_model.uuid)

View File

@ -16,6 +16,11 @@ import inspect
import time
import types
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509
def def_method(f, *args, **kwargs):
@functools.wraps(f)
@ -109,3 +114,34 @@ def memoized(func):
cache[args] = value
return value
return wrapper
def generate_csr_and_key():
"""Return a dict with a new csr, public key and private key."""
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048
)
public_key = private_key.public_key()
csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([
x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, u"admin"),
x509.NameAttribute(x509.oid.NameOID.ORGANIZATION_NAME,
u"system:masters")
])).sign(private_key, hashes.SHA256())
result = {
'csr': csr.public_bytes(
encoding=serialization.Encoding.PEM).decode("utf-8"),
'private_key': private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()).decode("utf-8"),
'public_key': public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo).decode(
"utf-8"),
}
return result

View File

@ -69,6 +69,9 @@ MagnumGroup = [
cfg.BoolOpt("copy_logs",
default=True,
help="Specify whether to copy nova server logs on failure."),
cfg.BoolOpt("copy_logs_success",
default=True,
help="Specify whether to copy nova server logs on success."),
cfg.StrOpt("coe",
default="kubernetes",
help="Container Orchestration Engine"),
@ -80,4 +83,7 @@ MagnumGroup = [
default=30,
help="Timeout(in minutes) to wait for the cluster creation "
"finished."),
cfg.StrOpt("labels",
default={},
help="A dict of labels to be defined in cluster template"),
]

View File

@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from magnum_tempest_plugin.common import config
from magnum_tempest_plugin.common.templates import cluster
IDEMPOTENT_IDS = {
@ -72,13 +71,3 @@ class ClusterTest(cluster.ClusterTestTemplate):
cls._setup_cluster_template()
except Exception:
raise
@classmethod
def tearDownClass(cls):
if cls.delete_template:
cls._delete_cluster_template(cls.cluster_template.uuid)
if config.Config.keypair_name:
cls.keypairs_client.delete_keypair(config.Config.keypair_name)
super(ClusterTest, cls).tearDownClass()

View File

@ -21,8 +21,9 @@ echo "Magnum's copy_instance_logs.sh was called..."
SSH_IP=$1
COE=${2-kubernetes}
NODE_TYPE=${3-master}
LOG_PATH=/opt/stack/logs/cluster-nodes/${NODE_TYPE}-${SSH_IP}
LOG_PATH=/tmp/magnum-nodes/${NODE_TYPE}-${SSH_IP}
KEYPAIR=${4-default}
PRIVATE_KEY=
echo "If private key is specified, save to temp and use that; else, use default"
@ -42,39 +43,22 @@ function remote_exec {
mkdir -p $LOG_PATH
cat /proc/cpuinfo > /opt/stack/logs/cpuinfo.log
if [[ "$COE" == "kubernetes" ]]; then
SSH_USER=fedora
SSH_USER=core
remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log
remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log
remote_exec $SSH_USER "sudo journalctl -u cloud-final --no-pager" cloud-final.log
remote_exec $SSH_USER "sudo journalctl -u cloud-init-local --no-pager" cloud-init-local.log
remote_exec $SSH_USER "sudo journalctl -u cloud-init --no-pager" cloud-init.log
remote_exec $SSH_USER "sudo cat /var/log/cloud-init-output.log" cloud-init-output.log
remote_exec $SSH_USER "sudo journalctl -u kubelet --no-pager" kubelet.log
remote_exec $SSH_USER "sudo journalctl -u kube-proxy --no-pager" kube-proxy.log
remote_exec $SSH_USER "sudo journalctl -u etcd --no-pager" etcd.log
remote_exec $SSH_USER "sudo journalctl -u kube-apiserver --no-pager" kube-apiserver.log
remote_exec $SSH_USER "sudo journalctl -u kube-scheduler --no-pager" kube-scheduler.log
remote_exec $SSH_USER "sudo journalctl -u kube-controller-manager --no-pager" kube-controller-manager.log
remote_exec $SSH_USER "sudo journalctl -u docker-storage-setup --no-pager" docker-storage-setup.log
remote_exec $SSH_USER "sudo systemctl status docker-storage-setup -l" docker-storage-setup.service.status.log
remote_exec $SSH_USER "sudo systemctl show docker-storage-setup --no-pager" docker-storage-setup.service.show.log
remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage-setup 2>/dev/null" docker-storage-setup.sysconfig.env.log
remote_exec $SSH_USER "sudo journalctl -u docker --no-pager" docker.log
remote_exec $SSH_USER "sudo systemctl status docker -l" docker.service.status.log
remote_exec $SSH_USER "sudo systemctl show docker --no-pager" docker.service.show.log
remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker" docker.sysconfig.env.log
remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-storage" docker-storage.sysconfig.env.log
remote_exec $SSH_USER "sudo cat /etc/sysconfig/docker-network" docker-network.sysconfig.env.log
remote_exec $SSH_USER "sudo timeout 60s docker ps --all=true --no-trunc=true" docker-containers.log
remote_exec $SSH_USER "sudo tar zcvf - /var/lib/docker/containers 2>/dev/null" docker-container-configs.tar.gz
remote_exec $SSH_USER "sudo journalctl -u flanneld --no-pager" flanneld.log
remote_exec $SSH_USER "sudo ip a" ipa.log
remote_exec $SSH_USER "sudo netstat -an" netstat.log
remote_exec $SSH_USER "sudo df -h" dfh.log
remote_exec $SSH_USER "sudo journalctl -u wc-notify --no-pager" wc-notify.log
remote_exec $SSH_USER "sudo cat /etc/sysconfig/heat-params" heat-params
remote_exec $SSH_USER "sudo cat /etc/etcd/etcd.conf" etcd.conf
remote_exec $SSH_USER "sudo cat /etc/kubernetes/config" kubernetes-config
@ -82,19 +66,11 @@ if [[ "$COE" == "kubernetes" ]]; then
remote_exec $SSH_USER "sudo cat /etc/kubernetes/controller-manager" kubernetes-controller-config
remote_exec $SSH_USER "sudo cat /etc/kubernetes/kubelet" kubelet-config
remote_exec $SSH_USER "sudo cat /etc/kubernetes/proxy" kubernetes-proxy-config
remote_exec $SSH_USER "sudo cat /etc/kubernetes/kubeconfig.yaml" kubeconfig.yaml
remote_exec $SSH_USER "sudo tail -n +1 -- /etc/kubernetes/manifests/*" kubernetes-manifests
remote_exec $SSH_USER "sudo tail -n +1 -- /etc/kubernetes/certs/*" kubernetes-certs
remote_exec $SSH_USER "sudo cat /usr/local/bin/wc-notify" bin-wc-notify
remote_exec $SSH_USER "sudo cat /etc/kubernetes/kube_openstack_config" kube_openstack_config
remote_exec $SSH_USER "sudo cat /etc/sysconfig/flanneld" flanneld.sysconfig
remote_exec $SSH_USER "sudo cat /usr/local/bin/flannel-config" bin-flannel-config
remote_exec $SSH_USER "sudo cat /etc/sysconfig/flannel-network.json" flannel-network.json.sysconfig
remote_exec $SSH_USER "sudo cat /usr/local/bin/flannel-docker-bridge" bin-flannel-docker-bridge
remote_exec $SSH_USER "sudo cat /etc/systemd/system/docker.service.d/flannel.conf" docker-flannel.conf
remote_exec $SSH_USER "sudo cat /etc/systemd/system/flanneld.service.d/flannel-docker-bridge.conf" flannel-docker-bridge.conf
remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-docker-bridge.service" flannel-docker-bridge.service
remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-config.service" flannel-config.service
remote_exec $SSH_USER "sudo journalctl --no-pager -u heat-container-agent" heat-container-agent.log
remote_exec $SSH_USER "sudo cat /var/log/heat-config/heat-config-script/*kube_cluster*" heat-kube-cluster.log
remote_exec $SSH_USER "sudo cat /var/log/heat-config/heat-config-script/*kube_masters*" heat-kube-masters.log
remote_exec $SSH_USER "sudo cat /var/log/heat-config/heat-config-script/*kube_minions*" heat-kube-minions.log
elif [[ "$COE" == "swarm" || "$COE" == "swarm-mode" ]]; then
SSH_USER=fedora
remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log

View File

@ -0,0 +1,28 @@
#!/usr/bin/env bash
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -o xtrace
LOG_DIR="/tmp/magnum-nodes/kubernetes/"
KUBECTL="/opt/stack/bin/kubectl --kubeconfig /tmp/magnum-nodes/kube.conf"
mkdir -p ${LOG_DIR}
${KUBECTL} get all -A -o wide > ${LOG_DIR}/kubectl_get_all
for ns in $(${KUBECTL} get -o name namespace); do
mkdir -p ${LOG_DIR}/pods/${ns#*/}
for pod in $(${KUBECTL} get -n ${ns#*/} -o name pod); do
${KUBECTL} -n ${ns#*/} logs ${pod#*/} > ${LOG_DIR}/pods/${ns#*/}/${pod#*/}
done
done

View File

@ -6,3 +6,4 @@ pbr!=2.1.0,>=2.0.0 # Apache-2.0
tempest>=17.1.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
oslo.log>=3.36.0 # Apache-2.0
kubernetes>=26.0.0 # Apache-2.0