DNM: WIP Get tests passing on cluster-api
Change-Id: Ib3498bf5d12c08f9e0523372bab6d7e548cf0daa
This commit is contained in:
43
.zuul.yaml
43
.zuul.yaml
@@ -9,6 +9,7 @@
|
||||
- magnum-tempest-plugin-tests-api-zed
|
||||
- magnum-tempest-plugin-tests-api-yoga
|
||||
- magnum-tempest-plugin-tests-api-xena
|
||||
- magnum-tempest-plugin-tests-functional
|
||||
gate:
|
||||
jobs:
|
||||
- magnum-tempest-plugin-tests-api
|
||||
@@ -31,23 +32,48 @@
|
||||
nodeset: openstack-single-node-focal
|
||||
override-checkout: stable/xena
|
||||
|
||||
- job:
|
||||
name: magnum-tempest-plugin-tests-api-legacy
|
||||
parent: magnum-tempest-plugin-tests-api
|
||||
vars:
|
||||
devstack_localrc:
|
||||
MAGNUM_GUEST_IMAGE_URL: https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/31.20200323.3.2/x86_64/fedora-coreos-31.20200323.3.2-openstack.x86_64.qcow2.xz
|
||||
MAGNUM_IMAGE_NAME: fedora-coreos-31.20200323.3.2-openstack.x86_64
|
||||
devstack_local_conf:
|
||||
test-config:
|
||||
$TEMPEST_CONFIG:
|
||||
image_id: fedora-coreos-31.20200323.3.2-openstack.x86_64
|
||||
labels:
|
||||
network_driver:
|
||||
|
||||
- job:
|
||||
name: magnum-tempest-plugin-tests-functional
|
||||
parent: magnum-tempest-plugin-tests-api
|
||||
vars:
|
||||
tempest_test_regex: (test_create_list_sign_delete_clusters|test_create_cluster_and_get_kubeconfig)
|
||||
tempest_exclude_regex:
|
||||
|
||||
- job:
|
||||
name: magnum-tempest-plugin-tests-api
|
||||
parent: magnum-tempest-plugin-base
|
||||
vars:
|
||||
tox_envlist: all
|
||||
tempest_test_regex: ^magnum_tempest_plugin.tests.api
|
||||
tempest_exclude_regex: (test_create_list_sign_delete_clusters|test_create_cluster_with_zero_nodes)
|
||||
tempest_exclude_regex: (test_create_list_sign_delete_clusters|test_create_cluster_with_zero_nodes|test_create_cluster_and_get_kubeconfig)
|
||||
devstack_local_conf:
|
||||
test-config:
|
||||
$TEMPEST_CONFIG:
|
||||
magnum:
|
||||
image_id: fedora-coreos-31.20200323.3.2-openstack.x86_64
|
||||
image_id: ubuntu-2004-kube-v1.25.5
|
||||
nic_id: public
|
||||
keypair_id: default
|
||||
flavor_id: ds2G
|
||||
master_flavor_id: ds2G
|
||||
flavor_id: ds2G20
|
||||
master_flavor_id: ds2G20
|
||||
labels: kube_tag:v1.25.5
|
||||
network_driver: flannel
|
||||
copy_logs: true
|
||||
auth:
|
||||
tempest_roles: creator, member, load-balancer_admin
|
||||
devstack_localrc:
|
||||
# NOTE: extend default glance limit from 1GB
|
||||
GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 5000
|
||||
@@ -63,17 +89,21 @@
|
||||
- openstack/heat
|
||||
- openstack/barbican
|
||||
- openstack/magnum-tempest-plugin
|
||||
- openstack/octavia
|
||||
vars:
|
||||
tempest_plugins:
|
||||
- magnum-tempest-plugin
|
||||
devstack_localrc:
|
||||
USE_PYTHON3: true
|
||||
MAGNUM_GUEST_IMAGE_URL: https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/31.20200323.3.2/x86_64/fedora-coreos-31.20200323.3.2-openstack.x86_64.qcow2.xz
|
||||
MAGNUM_IMAGE_NAME: fedora-coreos-31.20200323.3.2-openstack.x86_64
|
||||
MAGNUM_GUEST_IMAGE_URL: https://minio.services.osism.tech/openstack-k8s-capi-images/ubuntu-2004-kube-v1.25/ubuntu-2004-kube-v1.25.5.qcow2
|
||||
MAGNUM_IMAGE_NAME: ubuntu-2004-kube-v1.25.5
|
||||
MAGNUM_BRANCH: refs/changes/55/872755/28
|
||||
devstack_plugins:
|
||||
magnum: https://opendev.org/openstack/magnum
|
||||
heat: https://opendev.org/openstack/heat
|
||||
neutron: https://opendev.org/openstack/neutron
|
||||
barbican: https://opendev.org/openstack/barbican
|
||||
octavia: https://opendev.org/openstack/octavia
|
||||
devstack_services:
|
||||
# Disable swift and dependent c-bak service to support upload of .qcow2.xz image in the gate
|
||||
s-account: false
|
||||
@@ -81,6 +111,7 @@
|
||||
s-object: false
|
||||
s-proxy: false
|
||||
c-bak: false
|
||||
octavia: true
|
||||
irrelevant-files:
|
||||
- ^.*\.rst$
|
||||
- ^api-ref/.*$
|
||||
|
||||
12
bin/run-sonobuoy
Executable file
12
bin/run-sonobuoy
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
mkdir -p sonobuoy
|
||||
pushd sonobuoy
|
||||
echo "$1" > ./KUBECONFIG
|
||||
wget https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.56.12/sonobuoy_0.56.12_linux_amd64.tar.gz
|
||||
tar -xf sonobuoy_0.56.12_linux_amd64.tar.gz
|
||||
./sonobuoy run --mode quick --wait --kubeconfig $(pwd)/KUBECONFIG
|
||||
exit_code=$?
|
||||
popd
|
||||
rm -rf sonobuoy/
|
||||
exit $exit_code
|
||||
@@ -157,6 +157,14 @@ class Config(object):
|
||||
def set_cluster_creation_timeout(cls, config):
|
||||
cls.cluster_creation_timeout = CONF.magnum.cluster_creation_timeout
|
||||
|
||||
@classmethod
|
||||
def set_master_lb_enabled(cls, config):
|
||||
cls.master_lb_enabled = CONF.magnum.master_lb_enabled
|
||||
|
||||
@classmethod
|
||||
def set_labels(cls, config):
|
||||
cls.labels = CONF.magnum.labels
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
cls.set_admin_creds(config)
|
||||
@@ -180,3 +188,5 @@ class Config(object):
|
||||
cls.set_network_driver(config)
|
||||
cls.set_cluster_template_id(config)
|
||||
cls.set_cluster_creation_timeout(config)
|
||||
cls.set_master_lb_enabled(config)
|
||||
cls.set_labels(config)
|
||||
|
||||
@@ -115,14 +115,15 @@ def cluster_template_data(**kwargs):
|
||||
"tls_disabled": False,
|
||||
"network_driver": None,
|
||||
"volume_driver": None,
|
||||
"labels": {},
|
||||
"labels": config.Config.labels,
|
||||
"public": False,
|
||||
"dns_nameserver": "8.8.8.8",
|
||||
"flavor_id": data_utils.rand_name('cluster'),
|
||||
"master_flavor_id": data_utils.rand_name('cluster'),
|
||||
"external_network_id": config.Config.nic_id,
|
||||
"keypair_id": data_utils.rand_name('cluster'),
|
||||
"image_id": data_utils.rand_name('cluster')
|
||||
"image_id": data_utils.rand_name('cluster'),
|
||||
"master_lb_enabled": config.Config.master_lb_enabled
|
||||
}
|
||||
|
||||
data.update(kwargs)
|
||||
@@ -270,8 +271,10 @@ def valid_cluster_template(is_public=False):
|
||||
external_network_id=config.Config.nic_id,
|
||||
http_proxy=None, https_proxy=None,
|
||||
no_proxy=None, network_driver=config.Config.network_driver,
|
||||
volume_driver=None, labels={},
|
||||
volume_driver=None,
|
||||
labels=config.Config.labels,
|
||||
docker_storage_driver=config.Config.docker_storage_driver,
|
||||
master_lb_enabled=config.Config.master_lb_enabled,
|
||||
tls_disabled=False)
|
||||
|
||||
|
||||
@@ -322,6 +325,8 @@ def valid_cluster_data(cluster_template_id,
|
||||
:returns: ClusterEntity with generated data
|
||||
"""
|
||||
|
||||
print("CLUSTER ID ", cluster_template_id)
|
||||
|
||||
return cluster_data(cluster_template_id=cluster_template_id, name=name,
|
||||
master_count=master_count, node_count=node_count,
|
||||
create_timeout=create_timeout)
|
||||
|
||||
@@ -16,6 +16,13 @@ import inspect
|
||||
import time
|
||||
import types
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography import x509
|
||||
from cryptography.x509.oid import NameOID
|
||||
|
||||
|
||||
def def_method(f, *args, **kwargs):
|
||||
@functools.wraps(f)
|
||||
@@ -109,3 +116,33 @@ def memoized(func):
|
||||
cache[args] = value
|
||||
return value
|
||||
return wrapper
|
||||
|
||||
|
||||
def generate_csr_and_key():
|
||||
"""Return a dict with a new csr and key."""
|
||||
key = rsa.generate_private_key(
|
||||
public_exponent=65537,
|
||||
key_size=2048,
|
||||
backend=default_backend())
|
||||
|
||||
csr = x509.CertificateSigningRequestBuilder().subject_name(
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, u"admin"),
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"system:masters")
|
||||
])).sign(key, hashes.SHA256(), default_backend())
|
||||
|
||||
result = {
|
||||
'csr': csr.public_bytes(
|
||||
encoding=serialization.Encoding.PEM).decode("utf-8"),
|
||||
'key': key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption()).decode("utf-8"),
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def log_subprocess_output(pipe, LOG):
|
||||
for line in iter(pipe.readline, b''): # b'\n'-separated lines
|
||||
LOG.info('%r', line)
|
||||
|
||||
@@ -68,4 +68,10 @@ MagnumGroup = [
|
||||
default=30,
|
||||
help="Timeout(in minutes) to wait for the cluster creation "
|
||||
"finished."),
|
||||
cfg.BoolOpt("master-lb-enabled",
|
||||
default=True,
|
||||
help="Indicates whether created Clusters should have a load \
|
||||
balancer for master nodes or not."),
|
||||
cfg.DictOpt("labels",
|
||||
default={}),
|
||||
]
|
||||
|
||||
@@ -23,7 +23,7 @@ class MagnumTempestPlugin(plugins.TempestPlugin):
|
||||
def load_tests(self):
|
||||
base_path = os.path.split(os.path.dirname(
|
||||
os.path.abspath(__file__)))[0]
|
||||
test_dir = "magnum_tempest_plugin/tests/api/v1"
|
||||
test_dir = "magnum_tempest_plugin/tests"
|
||||
full_test_dir = os.path.join(base_path, test_dir)
|
||||
return full_test_dir, base_path
|
||||
|
||||
|
||||
@@ -80,6 +80,7 @@ class ClusterClient(client.MagnumClient):
|
||||
:returns: response object and ClusterIdEntity object
|
||||
"""
|
||||
|
||||
print("posting :", model, kwargs)
|
||||
resp, body = self.post(
|
||||
self.clusters_uri(),
|
||||
body=model.to_json(), **kwargs)
|
||||
|
||||
@@ -77,6 +77,8 @@ class ClusterTemplateClient(client.MagnumClient):
|
||||
:returns: response object and ClusterTemplateEntity object
|
||||
"""
|
||||
|
||||
# print("MODEL IS:", model)
|
||||
|
||||
resp, body = self.post(
|
||||
self.cluster_templates_uri(),
|
||||
body=model.to_json(), **kwargs)
|
||||
|
||||
@@ -11,16 +11,25 @@
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
import testtools
|
||||
import yaml
|
||||
|
||||
from kubernetes import client as kube_client
|
||||
from kubernetes import config as kube_config
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import base64
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
import testtools
|
||||
|
||||
from magnum_tempest_plugin.common import config
|
||||
from magnum_tempest_plugin.common import datagen
|
||||
from magnum_tempest_plugin.common import utils
|
||||
from magnum_tempest_plugin.lib import exceptions as magnum_exceptions
|
||||
from magnum_tempest_plugin.tests.api import base
|
||||
|
||||
|
||||
@@ -34,6 +43,9 @@ class ClusterTest(base.BaseTempestTest):
|
||||
"""Tests for cluster CRUD."""
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
|
||||
delete_template = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@@ -70,15 +82,17 @@ class ClusterTest(base.BaseTempestTest):
|
||||
get_cluster_template(config.Config.cluster_template_id)
|
||||
else:
|
||||
model = datagen.valid_cluster_template()
|
||||
_, cls.cluster_template = cls._create_cluster_template(model)
|
||||
# _,
|
||||
# cls._create_cluster_template(model)
|
||||
cls.cluster_template = model
|
||||
cls.delete_template = True
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.delete_template:
|
||||
cls._delete_cluster_template(cls.cluster_template.uuid)
|
||||
# if cls.delete_template:
|
||||
# cls._delete_cluster_template(cls.cluster_template.uuid)
|
||||
|
||||
if config.Config.keypair_name:
|
||||
cls.keypairs_client.delete_keypair(config.Config.keypair_name)
|
||||
@@ -154,14 +168,31 @@ class ClusterTest(base.BaseTempestTest):
|
||||
resp, model = self.cluster_client.get_cluster(cluster_id)
|
||||
return resp, model
|
||||
|
||||
# def test_create_delete_cluster(self):
|
||||
|
||||
# model = datagen.valid_cluster_template()
|
||||
# _, cluster_template = self._create_cluster_template(model)
|
||||
# gen_model = datagen.valid_cluster_data(
|
||||
# cluster_template_id=cluster_template.uuid, node_count=1)
|
||||
# _, cluster_model = self._create_cluster(gen_model)
|
||||
|
||||
# self._delete_cluster(cluster_model.uuid)
|
||||
# self.clusters.remove(cluster_model.uuid)
|
||||
|
||||
# self._delete_cluster_template(cluster_template.uuid)
|
||||
|
||||
# (dimtruck) Combining all these tests in one because
|
||||
# they time out on the gate (2 hours not enough)
|
||||
@testtools.testcase.attr('positive')
|
||||
@testtools.testcase.attr('slow')
|
||||
@decorators.idempotent_id('44158a8c-a856-11e9-9382-00224d6b7bc1')
|
||||
def test_create_list_sign_delete_clusters(self):
|
||||
|
||||
model = datagen.valid_cluster_template()
|
||||
_, cluster_template = self._create_cluster_template(model)
|
||||
|
||||
gen_model = datagen.valid_cluster_data(
|
||||
cluster_template_id=self.cluster_template.uuid, node_count=1)
|
||||
cluster_template_id=cluster_template.uuid, node_count=1)
|
||||
|
||||
# test cluster create
|
||||
_, cluster_model = self._create_cluster(gen_model)
|
||||
@@ -214,6 +245,7 @@ Q0uA0aVog3f5iJxCa3Hp5gxbJQ6zV6kJ0TEsuaaOhEko9sdpCoPOnRBm2i/XRD2D
|
||||
# test cluster delete
|
||||
self._delete_cluster(cluster_model.uuid)
|
||||
self.clusters.remove(cluster_model.uuid)
|
||||
self._delete_cluster_template(cluster_template.uuid)
|
||||
|
||||
@testtools.testcase.attr('negative')
|
||||
@decorators.idempotent_id('11c293da-a857-11e9-9382-00224d6b7bc1')
|
||||
@@ -282,3 +314,93 @@ Q0uA0aVog3f5iJxCa3Hp5gxbJQ6zV6kJ0TEsuaaOhEko9sdpCoPOnRBm2i/XRD2D
|
||||
self.assertRaises(
|
||||
exceptions.NotFound,
|
||||
self.cluster_client.delete_cluster, data_utils.rand_uuid())
|
||||
|
||||
@testtools.testcase.attr('positive')
|
||||
@decorators.idempotent_id('f4c33092-7eeb-43d7-826e-bba16fd61e28')
|
||||
def test_create_cluster_and_get_kubeconfig(self):
|
||||
|
||||
model = datagen.valid_cluster_template()
|
||||
_, cluster_template = self._create_cluster_template(model)
|
||||
|
||||
gen_model = datagen.valid_cluster_data(
|
||||
cluster_template_id=cluster_template.uuid, node_count=1)
|
||||
|
||||
print("gen_model:", gen_model)
|
||||
# test cluster create
|
||||
_, cluster_model = self._create_cluster(gen_model)
|
||||
self.assertNotIn('status', cluster_model)
|
||||
|
||||
_, cluster_model = self._get_cluster_by_id(cluster_model.uuid)
|
||||
|
||||
# template kubeconfig
|
||||
|
||||
# generate csr and private key
|
||||
csr_sample = utils.generate_csr_and_key()
|
||||
|
||||
# get CA cert
|
||||
_, ca = self.cert_client.get_cert(cluster_model.uuid,
|
||||
headers=HEADERS)
|
||||
# sign CSR
|
||||
cert_data_model = datagen.cert_data(cluster_model.uuid,
|
||||
csr_data=csr_sample['csr'])
|
||||
|
||||
resp, cert_model = self.cert_client.post_cert(cert_data_model,
|
||||
headers=HEADERS)
|
||||
cfg = """
|
||||
---
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: {ca}
|
||||
server: {api_address}
|
||||
name: {name}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {name}
|
||||
user: admin
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {{}}
|
||||
users:
|
||||
- name: admin
|
||||
user:
|
||||
client-certificate-data: {cert}
|
||||
client-key-data: {key}
|
||||
""".format(name=cluster_model.name,
|
||||
api_address=cluster_model.api_address,
|
||||
key=base64.encode_as_text(csr_sample['key']),
|
||||
cert=base64.encode_as_text(cert_model.pem),
|
||||
ca=base64.encode_as_text(ca.pem))
|
||||
|
||||
cfg = yaml.safe_load(cfg)
|
||||
|
||||
self.LOG.info("Generated kubeconfig: %s", cfg)
|
||||
|
||||
kube_config.load_kube_config_from_dict(cfg)
|
||||
|
||||
v1 = kube_client.CoreV1Api()
|
||||
|
||||
resp = v1.list_node(pretty="true")
|
||||
|
||||
self.LOG.info("LIST NODES: %s", resp)
|
||||
|
||||
self.LOG.info("Running sonobuoy on created cluster...")
|
||||
|
||||
process = Popen(["run-sonobuoy", str(cfg)], stdout=PIPE, stderr=STDOUT)
|
||||
with process.stdout:
|
||||
utils.log_subprocess_output(process.stdout, self.LOG)
|
||||
exitcode = process.wait()
|
||||
if exitcode != 0:
|
||||
self.LOG.error("sonobuoy process exited with status %s", exitcode)
|
||||
raise magnum_exceptions.SonobuoyException
|
||||
|
||||
else:
|
||||
self.LOG.info(
|
||||
"sonobuoy process completed with status %s",
|
||||
exitcode)
|
||||
|
||||
# test cluster delete
|
||||
self._delete_cluster(cluster_model.uuid)
|
||||
self.clusters.remove(cluster_model.uuid)
|
||||
self._delete_cluster_template(cluster_template.uuid)
|
||||
|
||||
@@ -45,7 +45,7 @@ mkdir -p $LOG_PATH
|
||||
cat /proc/cpuinfo > /opt/stack/logs/cpuinfo.log
|
||||
|
||||
if [[ "$COE" == "kubernetes" ]]; then
|
||||
SSH_USER=fedora
|
||||
SSH_USER=ubuntu
|
||||
remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u cloud-final --no-pager" cloud-final.log
|
||||
@@ -96,7 +96,7 @@ if [[ "$COE" == "kubernetes" ]]; then
|
||||
remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-docker-bridge.service" flannel-docker-bridge.service
|
||||
remote_exec $SSH_USER "sudo cat /etc/systemd/system/flannel-config.service" flannel-config.service
|
||||
elif [[ "$COE" == "swarm" || "$COE" == "swarm-mode" ]]; then
|
||||
SSH_USER=fedora
|
||||
SSH_USER=ubuntu
|
||||
remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u cloud-final --no-pager" cloud-final.log
|
||||
|
||||
@@ -5,4 +5,5 @@
|
||||
pbr!=2.1.0,>=2.0.0 # Apache-2.0
|
||||
tempest>=17.1.0 # Apache-2.0
|
||||
oslo.config>=5.2.0 # Apache-2.0
|
||||
kubernetes # Apache-2.0
|
||||
oslo.log>=3.36.0 # Apache-2.0
|
||||
|
||||
1
setup.py
1
setup.py
@@ -26,4 +26,5 @@ except ImportError:
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr>=2.0.0'],
|
||||
scripts=['bin/run-sonobuoy'],
|
||||
pbr=True)
|
||||
|
||||
Reference in New Issue
Block a user