ClusterAPI: implement update_cluster_status

This patch means the ClusterAPI driver can now successfully create
and delete clusters.

You can see this being tested in Zuul in these
patches which change what we test to include cluster create:
https://review.opendev.org/c/openstack/magnum-tempest-plugin/+/884366
https://review.opendev.org/c/openstack/magnum-tempest-plugin/+/872759

We add checks to see if the helm chart application has actually
worked, by looking at the capi resoruces in the capi management cluster,
and then update the DB state so the API tells the users the cluster is ready.

We can now create and delete clusters using the new driver!
To be able to test this within devstack we have added a CAPI k8s cluster
on the controller node that we will use to create all the magnum clusters.

To aid developer and reviewer testing we have added a script to bring
up this environment within devstack:
devstack/contrib/new-devstack.sh

Future patches will fix up other APIs such as resizing the cluster
and adding additional node pools.

story: 2009780

Co-Authored-By: Matt Pryor<matt@stackhpc.com>
Co-Authored-By: Tyler Christie<tyler@stackhpc.com>
Co-Authored-By: John Garbutt<johng@stackhpc.com>

Change-Id: If583bec63a2135486e393bacb16402f6821b7ede
This commit is contained in:
John Garbutt 2022-07-25 18:46:56 +01:00 committed by John Garbutt
parent 40c3be03c6
commit 83069e2930
9 changed files with 1893 additions and 9 deletions

102
devstack/contrib/new-devstack.sh Executable file
View File

@ -0,0 +1,102 @@
#!/bin/bash
#
# These instructions assume an Ubuntu-based host or VM for running devstack.
# Please note that if you are running this in a VM, it is vitally important
# that the underlying hardware have nested virtualization enabled or you will
# experience very poor amphora performance.
#
# Heavily based on:
# https://opendev.org/openstack/octavia/src/branch/master/devstack/contrib/new-octavia-devstack.sh
set -ex
# Set up the packages we need. Ubuntu package manager is assumed.
sudo apt-get update
sudo apt-get install git vim apparmor apparmor-utils -y
# Clone the devstack repo
sudo mkdir -p /opt/stack
if [ ! -f /opt/stack/stack.sh ]; then
sudo chown -R ${USER}. /opt/stack
git clone https://git.openstack.org/openstack-dev/devstack /opt/stack
fi
cat <<EOF > /opt/stack/local.conf
[[local|localrc]]
enable_plugin barbican https://opendev.org/openstack/barbican
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin neutron https://opendev.org/openstack/neutron
# NOTE: you can reference your gerrit patch here
# enable_plugin magnum https://review.opendev.org/openstack/magnum refs/<etc>
enable_plugin magnum https://opendev.org/openstack/magnum
enable_plugin magnum-ui https://opendev.org/openstack/magnum-ui
enable_plugin octavia https://opendev.org/openstack/octavia
enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard
LIBS_FROM_GIT+=python-octaviaclient
DATABASE_PASSWORD=secretdatabase
RABBIT_PASSWORD=secretrabbit
ADMIN_PASSWORD=secretadmin
HOST_IP=$(hostname -i)
SERVICE_PASSWORD=secretservice
SERVICE_TOKEN=111222333444
# Enable Logging
LOGFILE=/opt/stack/logs/stack.sh.log
VERBOSE=True
LOG_COLOR=True
# Octavia services
enable_service octavia o-api o-cw o-da o-hk o-hm
enable_service tempest
GLANCE_LIMIT_IMAGE_SIZE_TOTAL=10000
LIBVIRT_TYPE=kvm
[[post-config|/etc/neutron/neutron.conf]]
[DEFAULT]
advertise_mtu = True
EOF
# Fix permissions on current tty so screens can attach
sudo chmod go+rw `tty`
# Stack that stack!
/opt/stack/stack.sh
#
# Install this checkout and restart the Magnum services
#
SELF_PATH="$(realpath "${BASH_SOURCE[0]:-${(%):-%x}}")"
REPO_PATH="$(dirname "$(dirname "$(dirname "$SELF_PATH")")")"
python3 -m pip install -e "$REPO_PATH"
sudo systemctl restart devstack@magnum-api devstack@magnum-cond
source /opt/stack/openrc admin admin
pip install python-magnumclient
# Add a k8s image
curl -O https://object.arcus.openstack.hpc.cam.ac.uk/swift/v1/AUTH_f0dc9cb312144d0aa44037c9149d2513/azimuth-images-prerelease/ubuntu-focal-kube-v1.26.3-230411-1504.qcow2
openstack image create ubuntu-focal-kube-v1.26.3 \
--file ubuntu-focal-kube-v1.26.3-230411-1504.qcow2 \
--disk-format qcow2 \
--container-format bare \
--public
openstack image set ubuntu-focal-kube-v1.26.3 --os-distro ubuntu --os-version 20.04
openstack image set ubuntu-focal-kube-v1.26.3 --property kube_version=v1.26.3
# Register template for cluster api driver
openstack coe cluster template create new_driver \
--coe kubernetes \
--image $(openstack image show ubuntu-focal-kube-v1.26.3 -c id -f value) \
--external-network public \
--label kube_tag=v1.26.3 \
--master-flavor ds2G20 \
--flavor ds2G20 \
--public \
--master-lb-enabled
# You can test it like this:
# openstack coe cluster create devstacktest \
# --cluster-template new_driver \
# --master-count 1 \
# --node-count 2
# openstack coe cluster list
# openstack coe cluster config devstacktest

View File

@ -310,11 +310,22 @@ function magnum_register_image {
echo "Unknown image extension in $image_filename, supported extensions: tgz, img, qcow2, iso, vhd, vhdx, tar.gz, img.gz, img.bz2, vhd.gz, vhdx.gz, qcow2.xz"; false
fi
# Cluster API driver sets kube_version on the image
# as the image includes the k8s binaries
if [ ! -z "$MAGNUM_IMAGE_KUBE_VERSION" ]; then
magnum_image_property=$magnum_image_property" --property kube_version=$MAGNUM_IMAGE_KUBE_VERSION"
fi
openstack image set $image_name $magnum_image_property
openstack image set --public $image_name
openstack image show -f yaml $image_name
}
#magnum_configure_flavor - set hw_rng property for flavor to address the potential entropy issue
function magnum_configure_flavor {
# add a new flavor with two vcpus and just enough RAM
openstack flavor create ds2G20 --ram 2048 --disk 20 --id d5 --vcpus 2 --public
local magnum_flavor_property="--property hw_rng:allowed=True --property hw_rng:rate_bytes=1024 --property hw_rng:rate_period=1"
local FLAVOR_LIST=$(openstack flavor list -c Name -f value)
@ -332,12 +343,122 @@ function install_magnumclient {
fi
}
function setup_capi_management_cluster {
# # Install `kubectl` CLI
curl -fsLo /tmp/kubectl "https://dl.k8s.io/release/$(curl -fsL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 /tmp/kubectl /usr/local/bin/kubectl
# K3s has issues without apparmor, so we add it here
sudo apt install -y apparmor apparmor-utils
# Install k3s
curl -fsL https://get.k3s.io | sudo bash -s - --disable traefik
# copy kubeconfig file into standard location
mkdir -p $HOME/.kube
sudo cp /etc/rancher/k3s/k3s.yaml $HOME/.kube/config
sudo chown $USER $HOME/.kube/config
# Install helm
curl -fsL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
{
# Install cert manager
helm upgrade cert-manager cert-manager \
--install \
--namespace cert-manager \
--create-namespace \
--repo https://charts.jetstack.io \
--version v1.10.1 \
--set installCRDs=true \
--wait \
--timeout 10m
} || {
kubectl -n cert-manager get pods | awk '$1 && $1!="NAME" { print $1 }' | xargs -n1 kubectl -n cert-manager logs
exit
}
# Install Cluster API resources
mkdir -p capi
cat <<EOF > capi/kustomization.yaml
---
resources:
- >-
https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.2/cluster-api-components.yaml
- >-
https://github.com/kubernetes-sigs/cluster-api-provider-openstack/releases/download/v0.7.2/infrastructure-components.yaml
patches:
- patch: |-
- op: replace
path: /spec/template/spec/containers/0/args
value:
- --leader-elect
- --metrics-bind-addr=localhost:8080
target:
kind: Deployment
namespace: capi-system
name: capi-controller-manager
- patch: |-
- op: replace
path: /spec/template/spec/containers/0/args
value:
- --leader-elect
- --metrics-bind-addr=localhost:8080
target:
kind: Deployment
namespace: capi-kubeadm-bootstrap-system
name: capi-kubeadm-bootstrap-controller-manager
- patch: |-
- op: replace
path: /spec/template/spec/containers/0/args
value:
- --leader-elect
- --metrics-bind-addr=localhost:8080
target:
kind: Deployment
namespace: capi-kubeadm-control-plane-system
name: capi-kubeadm-control-plane-controller-manager
EOF
kubectl apply -k capi
kubectl rollout status deployment/capi-controller-manager \
--namespace capi-system \
--timeout 5m \
&& \
kubectl rollout status deployment/capi-kubeadm-bootstrap-controller-manager \
--namespace capi-kubeadm-bootstrap-system \
--timeout 5m \
&& \
kubectl rollout status deployment/capi-kubeadm-control-plane-controller-manager \
--namespace capi-kubeadm-control-plane-system \
--timeout 5m \
&& \
kubectl rollout status deployment/capo-controller-manager \
--namespace capo-system \
--timeout 10m
# Install addon manager
helm upgrade cluster-api-addon-provider cluster-api-addon-provider \
--install \
--repo https://stackhpc.github.io/cluster-api-addon-provider \
--version 0.1.0-dev.0.main.26 \
--namespace capi-addon-system \
--create-namespace \
--wait \
--timeout 10m
}
# install_magnum() - Collect source and prepare
function install_magnum {
install_apache_uwsgi
git_clone $MAGNUM_REPO $MAGNUM_DIR $MAGNUM_BRANCH
setup_develop $MAGNUM_DIR
# get ready for capi driver
setup_capi_management_cluster
}
# start_magnum_api() - Start the API process ahead of other things

View File

@ -13,12 +13,15 @@
import yaml
import certifi
import keystoneauth1
from oslo_log import log as logging
from magnum.common import clients
from magnum.common import utils
import magnum.conf
CONF = magnum.conf.CONF
LOG = logging.getLogger(__name__)
def get_openstack_ca_certificate():
@ -69,3 +72,19 @@ def _create_app_cred(context, cluster):
def get_app_cred_yaml(context, cluster):
app_cred_dict = _create_app_cred(context, cluster)
return yaml.safe_dump(app_cred_dict)
def delete_app_cred(context, cluster):
osc = clients.OpenStackClients(context)
try:
appcred = osc.keystone().client.application_credentials.find(
name=f"magnum-{cluster.uuid}", user=cluster.user_id
)
except keystoneauth1.exceptions.http.NotFound:
# We don't want this to be a failure condition as it may prevent
# cleanup of broken clusters, e.g. if cluster creation fails
# before the appcred is created or cluster deletion fails after
# the appcred is deleted
LOG.warning("Appcred does not exist for %s", cluster.uuid)
else:
appcred.delete()

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import enum
import re
from oslo_log import log as logging
@ -26,9 +27,19 @@ from magnum.drivers.cluster_api import app_creds
from magnum.drivers.cluster_api import helm
from magnum.drivers.cluster_api import kubernetes
from magnum.drivers.common import driver
from magnum.drivers.common import k8s_monitor
from magnum.objects import fields
LOG = logging.getLogger(__name__)
CONF = conf.CONF
NODE_GROUP_ROLE_CONTROLLER = "master"
class NodeGroupState(enum.Enum):
NOT_PRESENT = 1
PENDING = 2
READY = 3
FAILED = 4
class Driver(driver.Driver):
@ -53,8 +64,302 @@ class Driver(driver.Driver):
},
]
def _update_control_plane_nodegroup_status(self, cluster, nodegroup):
# The status of the master nodegroup is determined by the Cluster API
# control plane object
kcp = self._k8s_client.get_kubeadm_control_plane(
self._sanitised_name(
self._get_chart_release_name(cluster), "control-plane"
),
self._namespace(cluster),
)
ng_state = NodeGroupState.NOT_PRESENT
if kcp:
ng_state = NodeGroupState.PENDING
kcp_spec = kcp.get("spec", {}) if kcp else {}
kcp_status = kcp.get("status", {}) if kcp else {}
kcp_true_conditions = {
cond["type"]
for cond in kcp_status.get("conditions", [])
if cond["status"] == "True"
}
kcp_ready = all(
cond in kcp_true_conditions
for cond in (
"MachinesReady",
"Ready",
"EtcdClusterHealthy",
"ControlPlaneComponentsHealthy"
)
)
target_replicas = kcp_spec.get("replicas")
current_replicas = kcp_status.get("replicas")
updated_replicas = kcp_status.get("updatedReplicas")
ready_replicas = kcp_status.get("readyReplicas")
if (
kcp_ready and
target_replicas == current_replicas and
current_replicas == updated_replicas and
updated_replicas == ready_replicas
):
ng_state = NodeGroupState.READY
# TODO(mkjpryor) Work out a way to determine FAILED state
return self._update_nodegroup_status(cluster, nodegroup, ng_state)
def _update_worker_nodegroup_status(self, cluster, nodegroup):
# The status of a worker nodegroup is determined by the corresponding
# Cluster API machine deployment
md = self._k8s_client.get_machine_deployment(
self._sanitised_name(
self._get_chart_release_name(cluster), nodegroup.name
),
self._namespace(cluster),
)
ng_state = NodeGroupState.NOT_PRESENT
if md:
ng_state = NodeGroupState.PENDING
md_status = md.get("status", {}) if md else {}
md_phase = md_status.get("phase")
if md_phase:
if md_phase == "Running":
ng_state = NodeGroupState.READY
elif md_phase in {"Failed", "Unknown"}:
ng_state = NodeGroupState.FAILED
return self._update_nodegroup_status(cluster, nodegroup, ng_state)
def _update_nodegroup_status(self, cluster, nodegroup, ng_state):
# For delete we are waiting for not present
if nodegroup.status.startswith("DELETE_"):
if ng_state == NodeGroupState.NOT_PRESENT:
# Conductor will delete default nodegroups
# when cluster is deleted
LOG.debug(
f"Node group deleted: {nodegroup.name} "
f"for cluster {cluster.uuid}"
)
# signal the node group has been deleted
return None
LOG.debug(
f"Node group not yet delete: {nodegroup.name} "
f"for cluster {cluster.uuid}"
)
return nodegroup
is_update_operation = nodegroup.status.startswith("UPDATE_")
is_create_operation = nodegroup.status.startswith("CREATE_")
if not is_update_operation and not is_create_operation:
LOG.warning(
f"Node group: {nodegroup.name} in unexpected "
f"state: {nodegroup.status} in cluster {cluster.uuid}"
)
elif ng_state == NodeGroupState.READY:
nodegroup.status = (
fields.ClusterStatus.UPDATE_COMPLETE
if is_update_operation
else fields.ClusterStatus.CREATE_COMPLETE
)
LOG.debug(
f"Node group ready: {nodegroup.name} "
f"in cluster {cluster.uuid}"
)
nodegroup.save()
elif ng_state == NodeGroupState.FAILED:
nodegroup.status = (
fields.ClusterStatus.UPDATE_FAILED
if is_update_operation
else fields.ClusterStatus.CREATE_FAILED
)
LOG.debug(
f"Node group failed: {nodegroup.name} "
f"in cluster {cluster.uuid}"
)
nodegroup.save()
elif ng_state == NodeGroupState.NOT_PRESENT:
LOG.debug(
f"Node group not yet found: {nodegroup.name} "
f"state:{nodegroup.status} in cluster {cluster.uuid}"
)
else:
LOG.debug(
f"Node group still pending: {nodegroup.name} "
f"state:{nodegroup.status} in cluster {cluster.uuid}"
)
return nodegroup
def _update_cluster_api_address(self, cluster, capi_cluster):
# As soon as we know the API address, we should set it
# This means users can access the API even if the create is
# not complete, which could be useful for debugging failures,
# e.g. with addons
if not capi_cluster:
# skip update if cluster not yet created
return
if cluster.status not in [
fields.ClusterStatus.CREATE_IN_PROGRESS,
fields.ClusterStatus.UPDATE_IN_PROGRESS,
]:
# only update api-address when updating or creating
return
api_endpoint = capi_cluster["spec"].get("controlPlaneEndpoint")
if api_endpoint:
api_address = (
f"https://{api_endpoint['host']}:{api_endpoint['port']}"
)
if cluster.api_address != api_address:
cluster.api_address = api_address
cluster.save()
LOG.debug(f"Found api_address for {cluster.uuid}")
def _update_status_updating(self, cluster, capi_cluster):
# If the cluster is not yet ready then the create/update
# is still in progress
true_conditions = {
cond["type"]
for cond in capi_cluster.get("status", {}).get("conditions", [])
if cond["status"] == "True"
}
for cond in ("InfrastructureReady", "ControlPlaneReady", "Ready"):
if cond not in true_conditions:
return
is_update_operation = cluster.status.startswith("UPDATE_")
# Check the status of the addons
addons = self._k8s_client.get_addons_by_label(
"addons.stackhpc.com/cluster",
self._sanitised_name(self._get_chart_release_name(cluster)),
self._namespace(cluster)
)
for addon in addons:
addon_phase = addon.get("status", {}).get("phase")
if addon_phase and addon_phase in {"Failed", "Unknown"}:
# If the addon is failed, mark the cluster as failed
cluster.status = (
fields.ClusterStatus.UPDATE_FAILED
if is_update_operation
else fields.ClusterStatus.CREATE_FAILED
)
cluster.save()
return
elif addon_phase and addon_phase == "Deployed":
# If the addon is deployed, move on to the next one
continue
else:
# If there are any addons that are not deployed or failed,
# wait for the next invocation to check again
LOG.debug(
f"addon {addon['metadata']['name']} not yet deployed "
f"for {cluster.uuid}"
)
return
# If we get this far, the cluster has completed successfully
cluster.status = (
fields.ClusterStatus.UPDATE_COMPLETE
if is_update_operation
else fields.ClusterStatus.CREATE_COMPLETE
)
cluster.save()
def _update_status_deleting(self, context, cluster):
# Once the Cluster API cluster is gone, we need to clean up
# the secrets we created
self._k8s_client.delete_all_secrets_by_label(
"magnum.openstack.org/cluster-uuid",
cluster.uuid,
self._namespace(cluster),
)
# We also need to clean up the appcred that we made
app_creds.delete_app_cred(context, cluster)
cluster.status = fields.ClusterStatus.DELETE_COMPLETE
cluster.save()
def _get_capi_cluster(self, cluster):
return self._k8s_client.get_capi_cluster(
self._sanitised_name(self._get_chart_release_name(cluster)),
self._namespace(cluster),
)
def _update_all_nodegroups_status(self, cluster):
"""Returns True if any node group still in progress."""
nodegroups = []
for nodegroup in cluster.nodegroups:
if nodegroup.role == NODE_GROUP_ROLE_CONTROLLER:
updated_nodegroup = (
self._update_control_plane_nodegroup_status(
cluster, nodegroup
)
)
else:
updated_nodegroup = self._update_worker_nodegroup_status(
cluster, nodegroup
)
if updated_nodegroup:
nodegroups.append(updated_nodegroup)
# Return True if any are still in progress
for nodegroup in nodegroups:
if nodegroup.status.endswith("_IN_PROGRESS"):
return True
return False
def update_cluster_status(self, context, cluster):
raise NotImplementedError("don't support update_cluster_status yet")
# NOTE(mkjpryor)
# Because Kubernetes operators are built around reconciliation loops,
# Cluster API clusters don't really go into an error state
# Hence we only currently handle transitioning from IN_PROGRESS
# states to COMPLETE
# TODO(mkjpryor) Add a timeout for create/update/delete
# Update the cluster API address if it is known
# so users can get their coe credentials
capi_cluster = self._get_capi_cluster(cluster)
self._update_cluster_api_address(cluster, capi_cluster)
# Update the nodegroups first
# to ensure API never returns an inconsistent state
nodegroups_in_progress = self._update_all_nodegroups_status(cluster)
if cluster.status in {
fields.ClusterStatus.CREATE_IN_PROGRESS,
fields.ClusterStatus.UPDATE_IN_PROGRESS,
}:
LOG.debug("Checking on an update for %s", cluster.uuid)
# If the cluster does not exist yet,
# create is still in progress
if not capi_cluster:
LOG.debug(f"capi_cluster not yet created for {cluster.uuid}")
return
if nodegroups_in_progress:
LOG.debug(f"Node groups are not all ready for {cluster.uuid}")
return
self._update_status_updating(cluster, capi_cluster)
elif cluster.status == fields.ClusterStatus.DELETE_IN_PROGRESS:
LOG.debug("Checking on a delete for %s", cluster.uuid)
# If the Cluster API cluster still exists,
# the delete is still in progress
if capi_cluster:
LOG.debug(f"capi_cluster still found for {cluster.uuid}")
return
self._update_status_deleting(context, cluster)
def get_monitor(self, context, cluster):
return k8s_monitor.K8sMonitor(context, cluster)
def _namespace(self, cluster):
# We create clusters in a project-specific namespace
@ -162,7 +467,7 @@ class Driver(driver.Driver):
"machineCount": ng.node_count,
}
for ng in cluster.nodegroups
if ng.role != "master"
if ng.role != NODE_GROUP_ROLE_CONTROLLER
],
}
@ -281,6 +586,8 @@ class Driver(driver.Driver):
# and it makes renaming clusters in the API possible
self._generate_release_name(cluster)
# NOTE(johngarbutt) all node groups should already
# be in the CREATE_IN_PROGRESS state
self._k8s_client.ensure_namespace(self._namespace(cluster))
self._create_appcred_secret(context, cluster)
self._ensure_certificate_secrets(context, cluster)
@ -294,6 +601,15 @@ class Driver(driver.Driver):
def delete_cluster(self, context, cluster):
LOG.info("Starting to delete cluster %s", cluster.uuid)
# Copy the helm driver by marking all node groups
# as delete in progress here, as note done by conductor
# We do this before calling uninstall_release because
# update_cluster_status can get called before we return
for ng in cluster.nodegroups:
ng.status = fields.ClusterStatus.DELETE_IN_PROGRESS
ng.save()
# Begin the deletion of the cluster resources by uninstalling the
# Helm release
# Note that this just marks the resources for deletion - it does not

View File

@ -129,6 +129,41 @@ class Client(requests.Session):
def apply_secret(self, secret_name, data, namespace):
Secret(self).apply(secret_name, data, namespace)
def delete_all_secrets_by_label(self, label, value, namespace):
Secret(self).delete_all_by_label(label, value, namespace)
def get_capi_cluster(self, name, namespace):
return Cluster(self).fetch(name, namespace)
def get_kubeadm_control_plane(self, name, namespace):
return KubeadmControlPlane(self).fetch(name, namespace)
def get_machine_deployment(self, name, namespace):
return MachineDeployment(self).fetch(name, namespace)
def get_manifests_by_label(self, label, value, namespace):
return list(
Manifests(self).fetch_all_by_label(
label,
value,
namespace
)
)
def get_helm_releases_by_label(self, label, value, namespace):
return list(
HelmRelease(self).fetch_all_by_label(
label,
value,
namespace
)
)
def get_addons_by_label(self, label, value, namespace):
addons = list(self.get_manifests_by_label(label, value, namespace))
addons.extend(self.get_helm_releases_by_label(label, value, namespace))
return addons
class Resource:
def __init__(self, client):
@ -153,6 +188,39 @@ class Resource:
f"{self.plural_name}{path_name}"
)
def fetch(self, name, namespace=None):
"""Fetches specified object from the target Kubernetes cluster.
If the object is not found, None is returned.
"""
assert self.namespaced == bool(namespace)
response = self.client.get(self.prepare_path(name, namespace))
if 200 <= response.status_code < 300:
return response.json()
elif response.status_code == 404:
return None
else:
response.raise_for_status()
def fetch_all_by_label(self, label, value, namespace=None):
"""Fetches all objects with the specified label from cluster."""
assert self.namespaced == bool(namespace)
continue_token = ""
while True:
params = {"labelSelector": f"{label}={value}"}
if continue_token:
params["continue"] = continue_token
response = self.client.get(
self.prepare_path(namespace=namespace),
params=params
)
response.raise_for_status()
response_data = response.json()
yield from response_data["items"]
continue_token = response_data["metadata"]["continue"]
if not continue_token:
break
def apply(self, name, data=None, namespace=None):
"""Applies the given object to the target Kubernetes cluster."""
assert self.namespaced == bool(namespace)
@ -171,6 +239,15 @@ class Resource:
response.raise_for_status()
return response.json()
def delete_all_by_label(self, label, value, namespace=None):
"""Deletes all objects with the specified label from cluster."""
assert self.namespaced == bool(namespace)
response = self.client.delete(
self.prepare_path(namespace=namespace),
params={"labelSelector": f"{label}={value}"},
)
response.raise_for_status()
class Namespace(Resource):
api_version = "v1"
@ -179,3 +256,24 @@ class Namespace(Resource):
class Secret(Resource):
api_version = "v1"
class Cluster(Resource):
api_version = "cluster.x-k8s.io/v1beta1"
class MachineDeployment(Resource):
api_version = "cluster.x-k8s.io/v1beta1"
class KubeadmControlPlane(Resource):
api_version = "controlplane.cluster.x-k8s.io/v1beta1"
class Manifests(Resource):
api_version = "addons.stackhpc.com/v1alpha1"
plural_name = "manifests"
class HelmRelease(Resource):
api_version = "addons.stackhpc.com/v1alpha1"

View File

@ -12,6 +12,8 @@
import collections
from unittest import mock
import keystoneauth1
from magnum.common import clients
from magnum.common import utils
from magnum.drivers.cluster_api import app_creds
@ -97,3 +99,29 @@ clouds:
application_credential_id: id
"""
self.assertEqual(expected, app_cred)
@mock.patch.object(clients, "OpenStackClients")
def test_delete_app_cred(self, mock_client):
mock_app_cred = mock_client().keystone().client.application_credentials
mock_find = mock.MagicMock()
mock_app_cred.find.return_value = mock_find
app_creds.delete_app_cred("context", self.cluster_obj)
mock_find.delete.assert_called_once_with()
mock_app_cred.find.assert_called_once_with(
name=f"magnum-{self.cluster_obj.uuid}",
user="fake_user",
)
@mock.patch.object(clients, "OpenStackClients")
def test_delete_app_cred_not_found(self, mock_client):
mock_app_cred = mock_client().keystone().client.application_credentials
mock_app_cred.find.side_effect = keystoneauth1.exceptions.http.NotFound
app_creds.delete_app_cred("context", self.cluster_obj)
mock_app_cred.find.assert_called_once_with(
name=f"magnum-{self.cluster_obj.uuid}",
user="fake_user",
)

View File

@ -19,7 +19,9 @@ from magnum.drivers.cluster_api import app_creds
from magnum.drivers.cluster_api import driver
from magnum.drivers.cluster_api import helm
from magnum.drivers.cluster_api import kubernetes
from magnum.drivers.common import k8s_monitor
from magnum import objects
from magnum.objects import fields
from magnum.tests.unit.db import base
from magnum.tests.unit.objects import utils as obj_utils
@ -44,13 +46,767 @@ class ClusterAPIDriverTest(base.DbTestCase):
self.driver.provides,
)
def test_update_cluster_status(self):
self.assertRaises(
NotImplementedError,
self.driver.update_cluster_status,
self.context,
self.cluster_obj,
@mock.patch.object(driver.Driver, "_update_status_deleting")
@mock.patch.object(driver.Driver, "_update_status_updating")
@mock.patch.object(driver.Driver, "_update_all_nodegroups_status")
@mock.patch.object(driver.Driver, "_get_capi_cluster")
def test_update_cluster_status_creating(
self, mock_capi, mock_ng, mock_update, mock_delete
):
mock_ng.return_value = True
mock_capi.return_value = {"spec": {}}
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
self.driver.update_cluster_status(self.context, self.cluster_obj)
mock_ng.assert_called_once_with(self.cluster_obj)
mock_update.assert_not_called()
mock_delete.assert_not_called()
@mock.patch.object(driver.Driver, "_update_status_deleting")
@mock.patch.object(driver.Driver, "_update_status_updating")
@mock.patch.object(driver.Driver, "_update_all_nodegroups_status")
@mock.patch.object(driver.Driver, "_get_capi_cluster")
def test_update_cluster_status_creating_not_found(
self, mock_capi, mock_ng, mock_update, mock_delete
):
mock_ng.return_value = True
mock_capi.return_value = None
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
self.driver.update_cluster_status(self.context, self.cluster_obj)
mock_ng.assert_called_once_with(self.cluster_obj)
mock_update.assert_not_called()
mock_delete.assert_not_called()
@mock.patch.object(driver.Driver, "_update_status_deleting")
@mock.patch.object(driver.Driver, "_update_status_updating")
@mock.patch.object(driver.Driver, "_update_all_nodegroups_status")
@mock.patch.object(driver.Driver, "_get_capi_cluster")
def test_update_cluster_status_created(
self, mock_capi, mock_ng, mock_update, mock_delete
):
mock_ng.return_value = False
mock_capi.return_value = {"spec": {}}
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
self.driver.update_cluster_status(self.context, self.cluster_obj)
mock_ng.assert_called_once_with(self.cluster_obj)
mock_update.assert_called_once_with(self.cluster_obj, {"spec": {}})
mock_delete.assert_not_called()
@mock.patch.object(driver.Driver, "_update_status_deleting")
@mock.patch.object(driver.Driver, "_update_status_updating")
@mock.patch.object(driver.Driver, "_update_all_nodegroups_status")
@mock.patch.object(driver.Driver, "_get_capi_cluster")
def test_update_cluster_status_deleted(
self, mock_capi, mock_ng, mock_update, mock_delete
):
mock_capi.return_value = None
self.cluster_obj.status = fields.ClusterStatus.DELETE_IN_PROGRESS
self.driver.update_cluster_status(self.context, self.cluster_obj)
mock_ng.assert_called_once_with(self.cluster_obj)
mock_update.assert_not_called()
mock_delete.assert_called_once_with(self.context, self.cluster_obj)
@mock.patch.object(driver.Driver, "_update_status_deleting")
@mock.patch.object(driver.Driver, "_update_status_updating")
@mock.patch.object(driver.Driver, "_update_all_nodegroups_status")
@mock.patch.object(driver.Driver, "_get_capi_cluster")
def test_update_cluster_status_deleting(
self, mock_capi, mock_ng, mock_update, mock_delete
):
mock_capi.return_value = {"spec": {}}
self.cluster_obj.status = fields.ClusterStatus.DELETE_IN_PROGRESS
self.driver.update_cluster_status(self.context, self.cluster_obj)
mock_ng.assert_called_once_with(self.cluster_obj)
mock_update.assert_not_called()
mock_delete.assert_not_called()
@mock.patch.object(driver.Driver, "_update_status_deleting")
@mock.patch.object(driver.Driver, "_update_status_updating")
@mock.patch.object(driver.Driver, "_update_all_nodegroups_status")
@mock.patch.object(driver.Driver, "_get_capi_cluster")
def test_update_cluster_status_create_complete(
self, mock_capi, mock_ng, mock_update, mock_delete
):
mock_capi.return_value = {"spec": {}}
self.cluster_obj.status = fields.ClusterStatus.CREATE_COMPLETE
self.driver.update_cluster_status(self.context, self.cluster_obj)
mock_ng.assert_called_once_with(self.cluster_obj)
mock_update.assert_not_called()
mock_delete.assert_not_called()
@mock.patch.object(driver.Driver, "_update_worker_nodegroup_status")
@mock.patch.object(driver.Driver, "_update_control_plane_nodegroup_status")
def test_update_all_nodegroups_status_not_in_progress(
self, mock_cp, mock_w
):
control_plane = [
ng
for ng in self.cluster_obj.nodegroups
if ng.role == driver.NODE_GROUP_ROLE_CONTROLLER
][0]
control_plane.status = fields.ClusterStatus.CREATE_COMPLETE
mock_cp.return_value = control_plane
mock_w.return_value = None
result = self.driver._update_all_nodegroups_status(self.cluster_obj)
self.assertFalse(result)
control_plane = [
ng
for ng in self.cluster_obj.nodegroups
if ng.role == driver.NODE_GROUP_ROLE_CONTROLLER
][0]
mock_cp.assert_called_once_with(self.cluster_obj, mock.ANY)
self.assertEqual(
control_plane.obj_to_primitive(),
mock_cp.call_args_list[0][0][1].obj_to_primitive(),
)
mock_w.assert_called_once_with(self.cluster_obj, mock.ANY)
worker = [
ng
for ng in self.cluster_obj.nodegroups
if ng.role != driver.NODE_GROUP_ROLE_CONTROLLER
][0]
self.assertEqual(
worker.obj_to_primitive(),
mock_w.call_args_list[0][0][1].obj_to_primitive(),
)
@mock.patch.object(driver.Driver, "_update_worker_nodegroup_status")
@mock.patch.object(driver.Driver, "_update_control_plane_nodegroup_status")
def test_update_all_nodegroups_status_in_progress(self, mock_cp, mock_w):
control_plane = [
ng
for ng in self.cluster_obj.nodegroups
if ng.role == driver.NODE_GROUP_ROLE_CONTROLLER
][0]
control_plane.status = fields.ClusterStatus.CREATE_IN_PROGRESS
mock_cp.return_value = control_plane
mock_w.return_value = None
result = self.driver._update_all_nodegroups_status(self.cluster_obj)
self.assertTrue(result)
mock_cp.assert_called_once_with(self.cluster_obj, mock.ANY)
mock_w.assert_called_once_with(self.cluster_obj, mock.ANY)
@mock.patch.object(driver.Driver, "_update_nodegroup_status")
@mock.patch.object(kubernetes.Client, "load")
def test_update_worker_nodegroup_status_empty(
self, mock_load, mock_update
):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
nodegroup = mock.MagicMock()
nodegroup.name = "workers"
mock_client.get_machine_deployment.return_value = None
self.driver._update_worker_nodegroup_status(
self.cluster_obj, nodegroup
)
mock_client.get_machine_deployment.assert_called_once_with(
"cluster-example-a-111111111111-workers", "magnum-fakeproject"
)
mock_update.assert_called_once_with(
self.cluster_obj, mock.ANY, driver.NodeGroupState.NOT_PRESENT
)
@mock.patch.object(driver.Driver, "_update_nodegroup_status")
@mock.patch.object(kubernetes.Client, "load")
def test_update_worker_nodegroup_status_scaling_up(
self, mock_load, mock_update
):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
nodegroup = mock.MagicMock()
nodegroup.name = "workers"
md = {"status": {"phase": "ScalingUp"}}
mock_client.get_machine_deployment.return_value = md
self.driver._update_worker_nodegroup_status(
self.cluster_obj, nodegroup
)
mock_client.get_machine_deployment.assert_called_once_with(
"cluster-example-a-111111111111-workers", "magnum-fakeproject"
)
mock_update.assert_called_once_with(
self.cluster_obj, mock.ANY, driver.NodeGroupState.PENDING
)
@mock.patch.object(driver.Driver, "_update_nodegroup_status")
@mock.patch.object(kubernetes.Client, "load")
def test_update_worker_nodegroup_status_failed(
self, mock_load, mock_update
):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
nodegroup = mock.MagicMock()
nodegroup.name = "workers"
md = {"status": {"phase": "Failed"}}
mock_client.get_machine_deployment.return_value = md
self.driver._update_worker_nodegroup_status(
self.cluster_obj, nodegroup
)
mock_client.get_machine_deployment.assert_called_once_with(
"cluster-example-a-111111111111-workers", "magnum-fakeproject"
)
mock_update.assert_called_once_with(
self.cluster_obj, mock.ANY, driver.NodeGroupState.FAILED
)
@mock.patch.object(driver.Driver, "_update_nodegroup_status")
@mock.patch.object(kubernetes.Client, "load")
def test_update_worker_nodegroup_status_running(
self, mock_load, mock_update
):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
nodegroup = mock.MagicMock()
nodegroup.name = "workers"
md = {"status": {"phase": "Running"}}
mock_client.get_machine_deployment.return_value = md
self.driver._update_worker_nodegroup_status(
self.cluster_obj, nodegroup
)
mock_client.get_machine_deployment.assert_called_once_with(
"cluster-example-a-111111111111-workers", "magnum-fakeproject"
)
mock_update.assert_called_once_with(
self.cluster_obj, mock.ANY, driver.NodeGroupState.READY
)
@mock.patch.object(driver.Driver, "_update_nodegroup_status")
@mock.patch.object(kubernetes.Client, "load")
def test_update_control_plane_nodegroup_status_empty(
self, mock_load, mock_update
):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
nodegroup = mock.MagicMock()
nodegroup.name = "masters"
mock_client.get_kubeadm_control_plane.return_value = None
self.driver._update_control_plane_nodegroup_status(
self.cluster_obj, nodegroup
)
mock_client.get_kubeadm_control_plane.assert_called_once_with(
"cluster-example-a-111111111111-control-plane",
"magnum-fakeproject",
)
mock_update.assert_called_once_with(
self.cluster_obj, mock.ANY, driver.NodeGroupState.NOT_PRESENT
)
@mock.patch.object(driver.Driver, "_update_nodegroup_status")
@mock.patch.object(kubernetes.Client, "load")
def test_update_control_plane_nodegroup_status_condition_false(
self, mock_load, mock_update
):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
nodegroup = mock.MagicMock()
nodegroup.name = "masters"
kcp = {
"spec": {
"replicas": 3,
},
"status": {
"conditions": [
{"type": "MachinesReady", "status": "True"},
{"type": "Ready", "status": "True"},
{"type": "EtcdClusterHealthy", "status": "True"},
{
"type": "ControlPlaneComponentsHealthy",
"status": "False",
},
],
"replicas": 3,
"updatedReplicas": 3,
"readyReplicas": 3,
}
}
mock_client.get_kubeadm_control_plane.return_value = kcp
self.driver._update_control_plane_nodegroup_status(
self.cluster_obj, nodegroup
)
mock_client.get_kubeadm_control_plane.assert_called_once_with(
"cluster-example-a-111111111111-control-plane",
"magnum-fakeproject",
)
mock_update.assert_called_once_with(
self.cluster_obj, mock.ANY, driver.NodeGroupState.PENDING
)
@mock.patch.object(driver.Driver, "_update_nodegroup_status")
@mock.patch.object(kubernetes.Client, "load")
def test_update_control_plane_nodegroup_status_mismatched_replicas(
self, mock_load, mock_update
):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
nodegroup = mock.MagicMock()
nodegroup.name = "masters"
kcp = {
"spec": {
"replicas": 3,
},
"status": {
"conditions": [
{"type": "MachinesReady", "status": "True"},
{"type": "Ready", "status": "True"},
{"type": "EtcdClusterHealthy", "status": "True"},
{
"type": "ControlPlaneComponentsHealthy",
"status": "True",
},
],
"replicas": 3,
"updatedReplicas": 2,
"readyReplicas": 2,
}
}
mock_client.get_kubeadm_control_plane.return_value = kcp
self.driver._update_control_plane_nodegroup_status(
self.cluster_obj, nodegroup
)
mock_client.get_kubeadm_control_plane.assert_called_once_with(
"cluster-example-a-111111111111-control-plane",
"magnum-fakeproject",
)
mock_update.assert_called_once_with(
self.cluster_obj, mock.ANY, driver.NodeGroupState.PENDING
)
@mock.patch.object(driver.Driver, "_update_nodegroup_status")
@mock.patch.object(kubernetes.Client, "load")
def test_update_control_plane_nodegroup_status_ready(
self, mock_load, mock_update
):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
nodegroup = mock.MagicMock()
nodegroup.name = "masters"
kcp = {
"spec": {
"replicas": 3,
},
"status": {
"conditions": [
{"type": "MachinesReady", "status": "True"},
{"type": "Ready", "status": "True"},
{"type": "EtcdClusterHealthy", "status": "True"},
{
"type": "ControlPlaneComponentsHealthy",
"status": "True",
},
],
"replicas": 3,
"updatedReplicas": 3,
"readyReplicas": 3,
}
}
mock_client.get_kubeadm_control_plane.return_value = kcp
self.driver._update_control_plane_nodegroup_status(
self.cluster_obj, nodegroup
)
mock_client.get_kubeadm_control_plane.assert_called_once_with(
"cluster-example-a-111111111111-control-plane",
"magnum-fakeproject",
)
mock_update.assert_called_once_with(
self.cluster_obj, mock.ANY, driver.NodeGroupState.READY
)
@mock.patch.object(k8s_monitor, "K8sMonitor")
def test_get_monitor(self, mock_mon):
self.driver.get_monitor(self.context, self.cluster_obj)
mock_mon.assert_called_once_with(self.context, self.cluster_obj)
@mock.patch.object(kubernetes.Client, "load")
def test_get_capi_cluster(self, mock_load):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
self.driver._get_capi_cluster(self.cluster_obj)
mock_client.get_capi_cluster.assert_called_once_with(
"cluster-example-a-111111111111", "magnum-fakeproject"
)
@mock.patch.object(app_creds, "delete_app_cred")
@mock.patch.object(kubernetes.Client, "load")
def test_update_status_deleting(self, mock_load, mock_delete):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
self.driver._update_status_deleting(self.context, self.cluster_obj)
self.assertEqual("DELETE_COMPLETE", self.cluster_obj.status)
mock_delete.assert_called_once_with(self.context, self.cluster_obj)
mock_client.delete_all_secrets_by_label.assert_called_once_with(
"magnum.openstack.org/cluster-uuid",
self.cluster_obj.uuid,
"magnum-fakeproject",
)
def test_update_status_updating_not_ready(self):
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
capi_cluster = {}
self.driver._update_status_updating(self.cluster_obj, capi_cluster)
self.assertEqual(
fields.ClusterStatus.CREATE_IN_PROGRESS, self.cluster_obj.status
)
@mock.patch.object(kubernetes.Client, "load")
def test_update_status_updating_condition_false(self, mock_load):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_client.get_addons_by_label.return_value = []
mock_load.return_value = mock_client
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
capi_cluster = {
"status": {
"conditions": [
dict(type="InfrastructureReady", status="True"),
dict(type="ControlPlaneReady", status="True"),
dict(type="Ready", status="False"),
]
}
}
self.driver._update_status_updating(self.cluster_obj, capi_cluster)
self.assertEqual(
fields.ClusterStatus.CREATE_IN_PROGRESS, self.cluster_obj.status
)
@mock.patch.object(kubernetes.Client, "load")
def test_update_status_updating_ready_created(self, mock_load):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_client.get_addons_by_label.return_value = []
mock_load.return_value = mock_client
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
capi_cluster = {
"status": {
"conditions": [
dict(type="InfrastructureReady", status="True"),
dict(type="ControlPlaneReady", status="True"),
dict(type="Ready", status="True"),
]
}
}
self.driver._update_status_updating(self.cluster_obj, capi_cluster)
self.assertEqual(
fields.ClusterStatus.CREATE_COMPLETE, self.cluster_obj.status
)
@mock.patch.object(kubernetes.Client, "load")
def test_update_status_updating_addons_unknown(self, mock_load):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_client.get_addons_by_label.return_value = [
{
"metadata": {"name": "cni"},
"status": {},
},
{
"metadata": {"name": "monitoring"},
"status": {},
},
]
mock_load.return_value = mock_client
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
capi_cluster = {
"status": {
"conditions": [
dict(type="InfrastructureReady", status="True"),
dict(type="ControlPlaneReady", status="True"),
dict(type="Ready", status="True"),
]
}
}
self.driver._update_status_updating(self.cluster_obj, capi_cluster)
self.assertEqual(
fields.ClusterStatus.CREATE_IN_PROGRESS, self.cluster_obj.status
)
@mock.patch.object(kubernetes.Client, "load")
def test_update_status_updating_addons_installing(self, mock_load):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_client.get_addons_by_label.return_value = [
{
"metadata": {"name": "cni"},
"status": {"phase": "Deployed"},
},
{
"metadata": {"name": "monitoring"},
"status": {"phase": "Installing"},
},
]
mock_load.return_value = mock_client
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
capi_cluster = {
"status": {
"conditions": [
dict(type="InfrastructureReady", status="True"),
dict(type="ControlPlaneReady", status="True"),
dict(type="Ready", status="True"),
]
}
}
self.driver._update_status_updating(self.cluster_obj, capi_cluster)
self.assertEqual(
fields.ClusterStatus.CREATE_IN_PROGRESS, self.cluster_obj.status
)
@mock.patch.object(kubernetes.Client, "load")
def test_update_status_updating_addons_deployed(self, mock_load):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_client.get_addons_by_label.return_value = [
{
"metadata": {"name": "cni"},
"status": {"phase": "Deployed"},
},
{
"metadata": {"name": "monitoring"},
"status": {"phase": "Deployed"},
},
]
mock_load.return_value = mock_client
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
capi_cluster = {
"status": {
"conditions": [
dict(type="InfrastructureReady", status="True"),
dict(type="ControlPlaneReady", status="True"),
dict(type="Ready", status="True"),
]
}
}
self.driver._update_status_updating(self.cluster_obj, capi_cluster)
self.assertEqual(
fields.ClusterStatus.CREATE_COMPLETE, self.cluster_obj.status
)
@mock.patch.object(kubernetes.Client, "load")
def test_update_status_updating_addons_failed(self, mock_load):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_client.get_addons_by_label.return_value = [
{
"metadata": {"name": "cni"},
"status": {"phase": "Deployed"},
},
{
"metadata": {"name": "monitoring"},
"status": {"phase": "Failed"},
},
]
mock_load.return_value = mock_client
self.cluster_obj.status = fields.ClusterStatus.CREATE_IN_PROGRESS
capi_cluster = {
"status": {
"conditions": [
dict(type="InfrastructureReady", status="True"),
dict(type="ControlPlaneReady", status="True"),
dict(type="Ready", status="True"),
]
}
}
self.driver._update_status_updating(self.cluster_obj, capi_cluster)
self.assertEqual(
fields.ClusterStatus.CREATE_FAILED, self.cluster_obj.status
)
@mock.patch.object(kubernetes.Client, "load")
def test_update_status_updating_ready_updated(self, mock_load):
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_client.get_addons_by_label.return_value = []
mock_load.return_value = mock_client
self.cluster_obj.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
capi_cluster = {
"status": {
"conditions": [
dict(type="InfrastructureReady", status="True"),
dict(type="ControlPlaneReady", status="True"),
dict(type="Ready", status="True"),
]
}
}
self.driver._update_status_updating(self.cluster_obj, capi_cluster)
self.assertEqual(
fields.ClusterStatus.UPDATE_COMPLETE, self.cluster_obj.status
)
def test_update_cluster_api_address(self):
capi_cluster = {
"spec": {"controlPlaneEndpoint": {"host": "foo", "port": 6443}}
}
self.driver._update_cluster_api_address(self.cluster_obj, capi_cluster)
self.assertEqual("https://foo:6443", self.cluster_obj.api_address)
def test_update_cluster_api_address_skip(self):
self.cluster_obj.api_address = "asdf"
capi_cluster = {"spec": {"foo": "bar"}}
self.driver._update_cluster_api_address(self.cluster_obj, capi_cluster)
self.assertEqual("asdf", self.cluster_obj.api_address)
def test_update_cluster_api_address_skip_on_delete(self):
self.cluster_obj.status = fields.ClusterStatus.DELETE_IN_PROGRESS
self.cluster_obj.api_address = "asdf"
capi_cluster = {
"spec": {"controlPlaneEndpoint": {"host": "foo", "port": 6443}}
}
self.driver._update_cluster_api_address(self.cluster_obj, capi_cluster)
self.assertEqual("asdf", self.cluster_obj.api_address)
def test_update_nodegroup_status_create_complete(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.CREATE_IN_PROGRESS
updated = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.READY
)
self.assertEqual(fields.ClusterStatus.CREATE_COMPLETE, updated.status)
def test_update_nodegroup_status_update_complete(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
updated = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.READY
)
self.assertEqual(fields.ClusterStatus.UPDATE_COMPLETE, updated.status)
def test_update_nodegroup_status_create_failed(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.CREATE_IN_PROGRESS
updated = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.FAILED
)
self.assertEqual(fields.ClusterStatus.CREATE_FAILED, updated.status)
def test_update_nodegroup_status_update_failed(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
updated = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.FAILED
)
self.assertEqual(fields.ClusterStatus.UPDATE_FAILED, updated.status)
def test_update_nodegroup_status_create_in_progress(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.CREATE_IN_PROGRESS
updated = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.PENDING
)
self.assertEqual(
fields.ClusterStatus.CREATE_IN_PROGRESS, updated.status
)
def test_update_nodegroup_status_delete_in_progress(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.DELETE_IN_PROGRESS
updated = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.PENDING
)
self.assertEqual(
fields.ClusterStatus.DELETE_IN_PROGRESS, updated.status
)
self.assertEqual(nodegroup.as_dict(), updated.as_dict())
def test_update_nodegroup_creating_but_not_found(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.CREATE_IN_PROGRESS
updated = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.NOT_PRESENT
)
self.assertEqual(
fields.ClusterStatus.CREATE_IN_PROGRESS, updated.status
)
def test_update_nodegroup_status_delete_return_none(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.DELETE_IN_PROGRESS
result = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.NOT_PRESENT
)
self.assertIsNone(result)
def test_update_nodegroup_status_delete_unexpected_state(self):
nodegroup = obj_utils.create_test_nodegroup(self.context)
nodegroup.status = fields.ClusterStatus.ROLLBACK_IN_PROGRESS
updated = self.driver._update_nodegroup_status(
self.cluster_obj, nodegroup, driver.NodeGroupState.NOT_PRESENT
)
self.assertEqual(
fields.ClusterStatus.ROLLBACK_IN_PROGRESS, updated.status
)
self.assertEqual(nodegroup.as_dict(), updated.as_dict())
def test_namespace(self):
self.cluster_obj.project_id = "123-456F"
@ -215,7 +971,6 @@ class ClusterAPIDriverTest(base.DbTestCase):
mock_image.return_value = ("imageid1", "1.27.4")
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
self.cluster_obj.keypair = "kp1"
self.driver.create_cluster(self.context, self.cluster_obj, 10)
@ -264,6 +1019,133 @@ class ClusterAPIDriverTest(base.DbTestCase):
mock_appcred.assert_called_once_with(self.context, self.cluster_obj)
mock_certs.assert_called_once_with(self.context, self.cluster_obj)
@mock.patch.object(driver.Driver, "_ensure_certificate_secrets")
@mock.patch.object(driver.Driver, "_create_appcred_secret")
@mock.patch.object(kubernetes.Client, "load")
@mock.patch.object(driver.Driver, "_get_image_details")
@mock.patch.object(helm.Client, "install_or_upgrade")
def test_create_cluster_no_dns(
self,
mock_install,
mock_image,
mock_load,
mock_appcred,
mock_certs,
):
mock_image.return_value = ("imageid1", "1.27.4")
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
self.cluster_obj.cluster_template.dns_nameserver = ""
self.cluster_obj.keypair = "kp1"
self.driver.create_cluster(self.context, self.cluster_obj, 10)
app_cred_name = "cluster-example-a-111111111111-cloud-credentials"
mock_install.assert_called_once_with(
"cluster-example-a-111111111111",
"openstack-cluster",
{
"kubernetesVersion": "1.27.4",
"machineImageId": "imageid1",
"cloudCredentialsSecretName": app_cred_name,
"clusterNetworking": {
"internalNetwork": {"nodeCidr": "10.0.0.0/24"},
},
"apiServer": {
"enableLoadBalancer": True,
"loadBalancerProvider": "amphora",
},
"controlPlane": {
"machineFlavor": "flavor_small",
"machineCount": 3,
},
"addons": {
"monitoring": {"enabled": True},
"kubernetesDashboard": {"enabled": True},
"ingress": {"enabled": False},
},
"nodeGroups": [
{
"name": "test-worker",
"machineFlavor": "flavor_medium",
"machineCount": 3,
}
],
"machineSSHKeyName": "kp1",
},
repo=CONF.capi_driver.helm_chart_repo,
version=CONF.capi_driver.helm_chart_version,
namespace="magnum-fakeproject",
)
mock_client.ensure_namespace.assert_called_once_with(
"magnum-fakeproject"
)
mock_appcred.assert_called_once_with(self.context, self.cluster_obj)
mock_certs.assert_called_once_with(self.context, self.cluster_obj)
@mock.patch.object(driver.Driver, "_ensure_certificate_secrets")
@mock.patch.object(driver.Driver, "_create_appcred_secret")
@mock.patch.object(kubernetes.Client, "load")
@mock.patch.object(driver.Driver, "_get_image_details")
@mock.patch.object(helm.Client, "install_or_upgrade")
def test_create_cluster_no_keypair(
self,
mock_install,
mock_image,
mock_load,
mock_appcred,
mock_certs,
):
mock_image.return_value = ("imageid1", "1.27.4")
mock_client = mock.MagicMock(spec=kubernetes.Client)
mock_load.return_value = mock_client
self.cluster_obj.keypair = ""
self.driver.create_cluster(self.context, self.cluster_obj, 10)
app_cred_name = "cluster-example-a-111111111111-cloud-credentials"
mock_install.assert_called_once_with(
"cluster-example-a-111111111111",
"openstack-cluster",
{
"kubernetesVersion": "1.27.4",
"machineImageId": "imageid1",
"cloudCredentialsSecretName": app_cred_name,
"clusterNetworking": {
"internalNetwork": {"nodeCidr": "10.0.0.0/24"},
"dnsNameservers": ["8.8.1.1"],
},
"apiServer": {
"enableLoadBalancer": True,
"loadBalancerProvider": "amphora",
},
"controlPlane": {
"machineFlavor": "flavor_small",
"machineCount": 3,
},
"addons": {
"monitoring": {"enabled": True},
"kubernetesDashboard": {"enabled": True},
"ingress": {"enabled": False},
},
"nodeGroups": [
{
"name": "test-worker",
"machineFlavor": "flavor_medium",
"machineCount": 3,
}
],
},
repo=CONF.capi_driver.helm_chart_repo,
version=CONF.capi_driver.helm_chart_version,
namespace="magnum-fakeproject",
)
mock_client.ensure_namespace.assert_called_once_with(
"magnum-fakeproject"
)
mock_appcred.assert_called_once_with(self.context, self.cluster_obj)
mock_certs.assert_called_once_with(self.context, self.cluster_obj)
@mock.patch.object(app_creds, "get_app_cred_yaml")
@mock.patch.object(app_creds, "get_openstack_ca_certificate")
@mock.patch.object(kubernetes.Client, "load")

View File

@ -158,3 +158,292 @@ class TestKubernetesClient(base.TestCase):
headers={"Content-Type": "application/apply-patch+yaml"},
params={"fieldManager": "magnum", "force": "true"},
)
@mock.patch.object(requests.Session, "request")
def test_delete_all_secrets_by_label(self, mock_request):
client = kubernetes.Client(TEST_KUBECONFIG)
mock_response = mock.MagicMock()
mock_request.return_value = mock_response
client.delete_all_secrets_by_label("label", "cluster1", "ns1")
mock_request.assert_called_once_with(
"DELETE",
"https://test:6443/api/v1/namespaces/ns1/secrets",
params={"labelSelector": "label=cluster1"},
)
mock_response.raise_for_status.assert_called_once_with()
@mock.patch.object(requests.Session, "request")
def test_get_capi_cluster_found(self, mock_request):
client = kubernetes.Client(TEST_KUBECONFIG)
mock_response = mock.MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = "mock_json"
mock_request.return_value = mock_response
cluster = client.get_capi_cluster("name", "ns1")
mock_request.assert_called_once_with(
"GET",
(
"https://test:6443/apis/cluster.x-k8s.io/"
"v1beta1/namespaces/ns1/clusters/name"
),
allow_redirects=True,
)
self.assertEqual("mock_json", cluster)
@mock.patch.object(requests.Session, "request")
def test_get_capi_cluster_not_found(self, mock_request):
client = kubernetes.Client(TEST_KUBECONFIG)
mock_response = mock.MagicMock()
mock_response.status_code = 404
mock_request.return_value = mock_response
cluster = client.get_capi_cluster("name", "ns1")
self.assertIsNone(cluster)
@mock.patch.object(requests.Session, "request")
def test_get_capi_cluster_error(self, mock_request):
client = kubernetes.Client(TEST_KUBECONFIG)
mock_response = mock.MagicMock()
mock_response.status_code = 500
mock_response.raise_for_status.side_effect = requests.HTTPError
mock_request.return_value = mock_response
self.assertRaises(
requests.HTTPError, client.get_capi_cluster, "name", "ns1"
)
@mock.patch.object(requests.Session, "request")
def test_get_kubeadm_control_plane_found(self, mock_request):
client = kubernetes.Client(TEST_KUBECONFIG)
mock_response = mock.MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = "mock_json"
mock_request.return_value = mock_response
cluster = client.get_kubeadm_control_plane("name", "ns1")
mock_request.assert_called_once_with(
"GET",
(
"https://test:6443/apis/controlplane.cluster.x-k8s.io/"
"v1beta1/namespaces/ns1/kubeadmcontrolplanes/name"
),
allow_redirects=True,
)
self.assertEqual("mock_json", cluster)
@mock.patch.object(requests.Session, "request")
def test_get_machine_deployment_found(self, mock_request):
client = kubernetes.Client(TEST_KUBECONFIG)
mock_response = mock.MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = "mock_json"
mock_request.return_value = mock_response
cluster = client.get_machine_deployment("name", "ns1")
mock_request.assert_called_once_with(
"GET",
(
"https://test:6443/apis/cluster.x-k8s.io/"
"v1beta1/namespaces/ns1/machinedeployments/name"
),
allow_redirects=True,
)
self.assertEqual("mock_json", cluster)
@mock.patch.object(requests.Session, "request")
def test_get_manifests_by_label_found(self, mock_request):
items = [
{
"kind": "Manifests",
"metadata": {
"name": f"manifests{idx}",
"namespace": "ns1"
},
}
for idx in range(5)
]
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"metadata": {
"continue": "",
},
"items": items,
}
mock_request.return_value = mock_response
client = kubernetes.Client(TEST_KUBECONFIG)
manifests = client.get_manifests_by_label("label", "cluster1", "ns1")
mock_request.assert_called_once_with(
"GET",
(
"https://test:6443/apis/addons.stackhpc.com/"
"v1alpha1/namespaces/ns1/manifests"
),
params={"labelSelector": "label=cluster1"},
allow_redirects=True
)
self.assertEqual(items, manifests)
@mock.patch.object(requests.Session, "request")
def test_get_helm_releases_by_label_found(self, mock_request):
items = [
{
"kind": "HelmRelease",
"metadata": {
"name": f"helmrelease{idx}",
"namespace": "ns1"
},
}
for idx in range(5)
]
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"metadata": {
"continue": "",
},
"items": items,
}
mock_request.return_value = mock_response
client = kubernetes.Client(TEST_KUBECONFIG)
helm_releases = client.get_helm_releases_by_label(
"label",
"cluster1",
"ns1"
)
mock_request.assert_called_once_with(
"GET",
(
"https://test:6443/apis/addons.stackhpc.com/"
"v1alpha1/namespaces/ns1/helmreleases"
),
params={"labelSelector": "label=cluster1"},
allow_redirects=True
)
self.assertEqual(items, helm_releases)
@mock.patch.object(requests.Session, "request")
def test_get_helm_releases_by_label_multipage(self, mock_request):
items = [
{
"kind": "HelmRelease",
"metadata": {
"name": f"helmrelease{idx}",
"namespace": "ns1"
},
}
for idx in range(10)
]
mock_response_page1 = mock.Mock()
mock_response_page1.raise_for_status.return_value = None
mock_response_page1.json.return_value = {
"metadata": {
"continue": "continuetoken",
},
"items": items[:5],
}
mock_response_page2 = mock.Mock()
mock_response_page2.raise_for_status.return_value = None
mock_response_page2.json.return_value = {
"metadata": {
"continue": "",
},
"items": items[5:],
}
mock_request.side_effect = [
mock_response_page1,
mock_response_page2,
]
client = kubernetes.Client(TEST_KUBECONFIG)
helm_releases = client.get_helm_releases_by_label(
"label",
"cluster1",
"ns1"
)
self.assertEqual(mock_request.call_count, 2)
mock_request.assert_has_calls([
mock.call(
"GET",
(
"https://test:6443/apis/addons.stackhpc.com/"
"v1alpha1/namespaces/ns1/helmreleases"
),
params={"labelSelector": "label=cluster1"},
allow_redirects=True
),
mock.call(
"GET",
(
"https://test:6443/apis/addons.stackhpc.com/"
"v1alpha1/namespaces/ns1/helmreleases"
),
params={
"labelSelector": "label=cluster1",
"continue": "continuetoken",
},
allow_redirects=True
),
])
self.assertEqual(items, helm_releases)
@mock.patch.object(kubernetes.Client, "get_helm_releases_by_label")
@mock.patch.object(kubernetes.Client, "get_manifests_by_label")
def test_get_addons_by_label_found(
self,
mock_get_manifests,
mock_get_helm_releases
):
manifests = [
{
"kind": "Manifests",
"metadata": {
"name": f"manifests{idx}",
"namespace": "ns1"
},
}
for idx in range(5)
]
helm_releases = [
{
"kind": "HelmRelease",
"metadata": {
"name": f"helmrelease{idx}",
"namespace": "ns1"
},
}
for idx in range(5)
]
mock_get_manifests.return_value = manifests
mock_get_helm_releases.return_value = helm_releases
client = kubernetes.Client(TEST_KUBECONFIG)
addons = client.get_addons_by_label("label", "cluster1", "ns1")
mock_get_manifests.assert_called_once_with(
"label",
"cluster1",
"ns1"
)
mock_get_helm_releases.assert_called_once_with(
"label",
"cluster1",
"ns1"
)
self.assertEqual(manifests + helm_releases, addons)

View File

@ -0,0 +1,29 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.drivers.cluster_api import driver
from magnum.tests.unit.db import base
from magnum.tests.unit.objects import utils as obj_utils
class ClusterAPIDriverTest(base.DbTestCase):
def setUp(self):
super(ClusterAPIDriverTest, self).setUp()
self.driver = driver.Driver()
self.cluster_obj = obj_utils.create_test_cluster(
self.context, name='cluster_example_A',
master_flavor_id="flavor_small",
flavor_id="flavor_medium")
self.cluster_template = self.cluster_obj.cluster_template
self.cluster_template.labels = {'kube_tag': 'v1.24.3'}
# TODO(johngarbutt) : complete this testing!!