Adding support for displaying kubernetes versions
In preparation for supporting kubernetes upgrades, I am adding the following CLIs (and REST APIs) to show the supported kubernetes versions and their state in the system: +---------+--------+-----------+ | version | target | state | +---------+--------+-----------+ | v1.16.2 | True | active | +---------+--------+-----------+ +-------------------+-----------+ | Property | Value | +-------------------+-----------+ | applied_patches | [] | | available_patches | [] | | downgrade_to | [] | | state | active | | target | True | | upgrade_from | [] | | version | v1.16.2 | +-------------------+-----------+ This also includes new utitilies to query version info from the kubernetes API and a set of unit tests for the new code. Change-Id: I0dc4ece131daeb8cbce3e621e18b93276b2324ff Story: 2006590 Task: 36724 Signed-off-by: Bart Wensley <barton.wensley@windriver.com>
This commit is contained in:
parent
487f6a1f5f
commit
f4c05b477c
|
@ -1,2 +1,2 @@
|
|||
SRC_DIR="cgts-client"
|
||||
TIS_PATCH_VER=71
|
||||
TIS_PATCH_VER=72
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
#
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
|
||||
import testtools
|
||||
|
||||
from cgtsclient.tests import utils
|
||||
import cgtsclient.v1.kube_version
|
||||
|
||||
KUBE_VERSION = {'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.1', 'KUBE.2'],
|
||||
'available_patches': ['KUBE.3'],
|
||||
'target': True,
|
||||
'state': 'TODO',
|
||||
}
|
||||
|
||||
KUBE_VERSION_2 = {'version': 'v1.42.3',
|
||||
'upgrade_from': ['v1.42.2'],
|
||||
'downgrade_to': ['v1.42.2'],
|
||||
'applied_patches': ['KUBE.3', 'KUBE.4'],
|
||||
'available_patches': ['KUBE.5'],
|
||||
'target': False,
|
||||
'state': 'TODO',
|
||||
}
|
||||
|
||||
fixtures = {
|
||||
'/v1/kube_versions':
|
||||
{
|
||||
'GET': (
|
||||
{},
|
||||
{"kube_versions": [KUBE_VERSION, KUBE_VERSION_2]},
|
||||
),
|
||||
},
|
||||
'/v1/kube_versions/%s' % KUBE_VERSION['version']:
|
||||
{
|
||||
'GET': (
|
||||
{},
|
||||
KUBE_VERSION,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class KubeVersionManagerTest(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(KubeVersionManagerTest, self).setUp()
|
||||
self.api = utils.FakeAPI(fixtures)
|
||||
self.mgr = cgtsclient.v1.kube_version.KubeVersionManager(self.api)
|
||||
|
||||
def test_kube_version_list(self):
|
||||
kube_versions = self.mgr.list()
|
||||
expect = [
|
||||
('GET', '/v1/kube_versions', {}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(len(kube_versions), 2)
|
||||
|
||||
def test_kube_version_show(self):
|
||||
kube_version = self.mgr.get(KUBE_VERSION['version'])
|
||||
expect = [
|
||||
('GET', '/v1/kube_versions/%s' % KUBE_VERSION['version'], {}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(kube_version.version,
|
||||
KUBE_VERSION['version'])
|
||||
self.assertEqual(kube_version.upgrade_from,
|
||||
KUBE_VERSION['upgrade_from'])
|
||||
self.assertEqual(kube_version.downgrade_to,
|
||||
KUBE_VERSION['downgrade_to'])
|
||||
self.assertEqual(kube_version.applied_patches,
|
||||
KUBE_VERSION['applied_patches'])
|
||||
self.assertEqual(kube_version.available_patches,
|
||||
KUBE_VERSION['available_patches'])
|
||||
self.assertEqual(kube_version.target,
|
||||
KUBE_VERSION['target'])
|
||||
self.assertEqual(kube_version.state,
|
||||
KUBE_VERSION['state'])
|
|
@ -0,0 +1,73 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
from cgtsclient.tests import test_shell
|
||||
from cgtsclient.v1.kube_version import KubeVersion
|
||||
|
||||
|
||||
class KubeVersionTest(test_shell.ShellTest):
|
||||
|
||||
def setUp(self):
|
||||
super(KubeVersionTest, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
super(KubeVersionTest, self).tearDown()
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_version.KubeVersionManager.list')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_version_list(self, mock_get_endpoint, mock_get_client,
|
||||
mock_list):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
fake_version = {'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.1', 'KUBE.2'],
|
||||
'available_patches': ['KUBE.3'],
|
||||
'target': True,
|
||||
'state': 'TODO',
|
||||
}
|
||||
|
||||
mock_list.return_value = [KubeVersion(None, fake_version, True)]
|
||||
self.make_env()
|
||||
version_results = self.shell("kube-version-list")
|
||||
self.assertIn(fake_version['version'], version_results)
|
||||
self.assertIn(str(fake_version['target']), version_results)
|
||||
self.assertIn(fake_version['state'], version_results)
|
||||
self.assertNotIn(str(fake_version['upgrade_from']), version_results)
|
||||
self.assertNotIn(str(fake_version['downgrade_to']), version_results)
|
||||
self.assertNotIn(str(fake_version['applied_patches']), version_results)
|
||||
self.assertNotIn(str(fake_version['available_patches']),
|
||||
version_results)
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_version.KubeVersionManager.get')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_version_show(self, mock_get_endpoint, mock_get_client,
|
||||
mock_get):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
fake_version = {'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.1', 'KUBE.2'],
|
||||
'available_patches': ['KUBE.3'],
|
||||
'target': True,
|
||||
'state': 'TODO',
|
||||
}
|
||||
|
||||
mock_get.return_value = KubeVersion(None, fake_version, True)
|
||||
self.make_env()
|
||||
version_results = self.shell("kube-version-show %s" %
|
||||
fake_version['version'])
|
||||
self.assertIn(fake_version['version'], version_results)
|
||||
self.assertIn(str(fake_version['upgrade_from']), version_results)
|
||||
self.assertIn(str(fake_version['downgrade_to']), version_results)
|
||||
self.assertIn(str(fake_version['target']), version_results)
|
||||
self.assertIn(fake_version['state'], version_results)
|
||||
self.assertIn(str(fake_version['applied_patches']), version_results)
|
||||
self.assertIn(str(fake_version['available_patches']), version_results)
|
|
@ -53,6 +53,7 @@ from cgtsclient.v1 import istor
|
|||
from cgtsclient.v1 import isystem
|
||||
from cgtsclient.v1 import itrapdest
|
||||
from cgtsclient.v1 import iuser
|
||||
from cgtsclient.v1 import kube_version
|
||||
from cgtsclient.v1 import label
|
||||
from cgtsclient.v1 import license
|
||||
from cgtsclient.v1 import lldp_agent
|
||||
|
@ -159,3 +160,4 @@ class Client(http.HTTPClient):
|
|||
self.fernet = fernet.FernetManager(self)
|
||||
self.app = app.AppManager(self)
|
||||
self.host_fs = host_fs.HostFsManager(self)
|
||||
self.kube_version = kube_version.KubeVersionManager(self)
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from cgtsclient.common import base
|
||||
|
||||
|
||||
class KubeVersion(base.Resource):
|
||||
def __repr__(self):
|
||||
return "<kube_version %s>" % self._info
|
||||
|
||||
|
||||
class KubeVersionManager(base.Manager):
|
||||
resource_class = KubeVersion
|
||||
|
||||
@staticmethod
|
||||
def _path(name=None):
|
||||
return '/v1/kube_versions/%s' % name if name else '/v1/kube_versions'
|
||||
|
||||
def list(self):
|
||||
"""Retrieve the list of kubernetes versions known to the system."""
|
||||
|
||||
return self._list(self._path(), 'kube_versions')
|
||||
|
||||
def get(self, version):
|
||||
"""Retrieve the details of a given kubernetes version
|
||||
|
||||
:param version: kubernetes version
|
||||
"""
|
||||
try:
|
||||
return self._list(self._path(version))[0]
|
||||
except IndexError:
|
||||
return None
|
|
@ -0,0 +1,35 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from cgtsclient.common import utils
|
||||
from cgtsclient import exc
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
def _print_kube_version_show(kube_version):
|
||||
ordereddata = OrderedDict(sorted(kube_version.to_dict().items(),
|
||||
key=lambda t: t[0]))
|
||||
utils.print_dict(ordereddata, wrap=72)
|
||||
|
||||
|
||||
def do_kube_version_list(cc, args):
|
||||
"""List all kubernetes versions"""
|
||||
versions = cc.kube_version.list()
|
||||
labels = ['version', 'target', 'state']
|
||||
fields = ['version', 'target', 'state']
|
||||
utils.print_list(versions, fields, labels, sortby=0)
|
||||
|
||||
|
||||
@utils.arg('version', metavar='<kubernetes version>',
|
||||
help="Kubernetes version")
|
||||
def do_kube_version_show(cc, args):
|
||||
"""Show kubernetes version details"""
|
||||
try:
|
||||
version = cc.kube_version.get(args.version)
|
||||
_print_kube_version_show(version)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('kubernetes version not found: %s' %
|
||||
args.version)
|
|
@ -41,6 +41,7 @@ from cgtsclient.v1 import isystem_shell
|
|||
from cgtsclient.v1 import itrapdest_shell
|
||||
from cgtsclient.v1 import iuser_shell
|
||||
|
||||
from cgtsclient.v1 import kube_version_shell
|
||||
from cgtsclient.v1 import label_shell
|
||||
from cgtsclient.v1 import license_shell
|
||||
from cgtsclient.v1 import lldp_agent_shell
|
||||
|
@ -119,6 +120,7 @@ COMMAND_MODULES = [
|
|||
label_shell,
|
||||
app_shell,
|
||||
host_fs_shell,
|
||||
kube_version_shell,
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[tox]
|
||||
envlist = py27,pep8,cover,pylint
|
||||
envlist = py27,pep8,pylint
|
||||
minversion = 1.6
|
||||
#skipsdist = True
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
SRC_DIR="."
|
||||
COPY_LIST_TO_TAR="LICENSE sysinv-agent sysinv-agent.conf"
|
||||
EXCLUDE_LIST_FROM_TAR="centos sysinv-agent.bb"
|
||||
TIS_PATCH_VER=5
|
||||
TIS_PATCH_VER=6
|
||||
|
|
|
@ -39,6 +39,7 @@ from sysinv.api.controllers.v1 import health
|
|||
from sysinv.api.controllers.v1 import helm_charts
|
||||
from sysinv.api.controllers.v1 import host
|
||||
from sysinv.api.controllers.v1 import kube_app
|
||||
from sysinv.api.controllers.v1 import kube_version
|
||||
from sysinv.api.controllers.v1 import label
|
||||
from sysinv.api.controllers.v1 import interface
|
||||
from sysinv.api.controllers.v1 import interface_network
|
||||
|
@ -249,6 +250,9 @@ class V1(base.APIBase):
|
|||
host_fs = [link.Link]
|
||||
"Links to the host_fs resource"
|
||||
|
||||
kube_versions = [link.Link]
|
||||
"Links to the kube_version resource"
|
||||
|
||||
@classmethod
|
||||
def convert(self):
|
||||
v1 = V1()
|
||||
|
@ -775,6 +779,13 @@ class V1(base.APIBase):
|
|||
'host_fs', '',
|
||||
bookmark=True)]
|
||||
|
||||
v1.kube_versions = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'kube_versions', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'kube_versions', '',
|
||||
bookmark=True)]
|
||||
|
||||
return v1
|
||||
|
||||
|
||||
|
@ -843,6 +854,7 @@ class Controller(rest.RestController):
|
|||
datanetworks = datanetwork.DataNetworkController()
|
||||
interface_datanetworks = interface_datanetwork.InterfaceDataNetworkController()
|
||||
host_fs = host_fs.HostFsController()
|
||||
kube_versions = kube_version.KubeVersionController()
|
||||
|
||||
@wsme_pecan.wsexpose(V1)
|
||||
def get(self):
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from sysinv.api.controllers.v1 import base
|
||||
from sysinv.api.controllers.v1 import collection
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv import objects
|
||||
|
||||
|
||||
class KubeVersion(base.APIBase):
|
||||
"""API representation of a k8s version."""
|
||||
|
||||
version = wtypes.text
|
||||
"Unique version for this entry"
|
||||
|
||||
upgrade_from = [wtypes.text]
|
||||
"List of versions that can upgrade to this version"
|
||||
|
||||
downgrade_to = [wtypes.text]
|
||||
"List of versions that this version can downgrade to"
|
||||
|
||||
applied_patches = [wtypes.text]
|
||||
"List of patches that must be applied before upgrading to this version"
|
||||
|
||||
available_patches = [wtypes.text]
|
||||
"List of patches that must be available before upgrading to this version"
|
||||
|
||||
target = bool
|
||||
"Denotes whether this is the target version"
|
||||
|
||||
state = wtypes.text
|
||||
"State of this version"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.fields = objects.kube_version.fields.keys()
|
||||
for k in self.fields:
|
||||
if not hasattr(self, k):
|
||||
continue
|
||||
setattr(self, k, kwargs.get(k, wtypes.Unset))
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, rpc_kube_version, expand=True):
|
||||
kube_version = KubeVersion(**rpc_kube_version.as_dict())
|
||||
if not expand:
|
||||
kube_version.unset_fields_except(['version', 'target', 'state'])
|
||||
|
||||
# The version is not a database object so does not have timestamps.
|
||||
kube_version.created_at = wtypes.Unset
|
||||
kube_version.updated_at = wtypes.Unset
|
||||
return kube_version
|
||||
|
||||
|
||||
class KubeVersionCollection(collection.Collection):
|
||||
"""API representation of a collection of k8s versions."""
|
||||
|
||||
kube_versions = [KubeVersion]
|
||||
"A list containing kubernetes version objects"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._type = 'kube_versions'
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, rpc_kube_version, expand=False):
|
||||
collection = KubeVersionCollection()
|
||||
collection.kube_versions = [KubeVersion.convert_with_links(p, expand)
|
||||
for p in rpc_kube_version]
|
||||
return collection
|
||||
|
||||
|
||||
class KubeVersionController(rest.RestController):
|
||||
"""REST controller for Kubernetes Versions."""
|
||||
|
||||
def __init__(self, parent=None, **kwargs):
|
||||
self._parent = parent
|
||||
self._kube_operator = kubernetes.KubeOperator()
|
||||
|
||||
@wsme_pecan.wsexpose(KubeVersionCollection)
|
||||
def get_all(self):
|
||||
"""Retrieve a list of kubernetes versions."""
|
||||
|
||||
# Get the dynamic version information
|
||||
version_states = self._kube_operator.kube_get_version_states()
|
||||
|
||||
rpc_kube_versions = []
|
||||
for version in kubernetes.get_kube_versions():
|
||||
version_obj = KubeVersion()
|
||||
version_obj.version = version['version']
|
||||
version_obj.upgrade_from = version['upgrade_from']
|
||||
version_obj.downgrade_to = version['downgrade_to']
|
||||
version_obj.applied_patches = version['applied_patches']
|
||||
version_obj.available_patches = version['available_patches']
|
||||
version_obj.state = version_states[version['version']]
|
||||
# For now, the active version will be marked as the target. When
|
||||
# upgrades are supported, we will also have to consider whether
|
||||
# an upgrade is in progress to determine the target.
|
||||
if version_obj.state == kubernetes.KUBE_STATE_ACTIVE:
|
||||
version_obj.target = True
|
||||
else:
|
||||
version_obj.target = False
|
||||
rpc_kube_versions.append(version_obj)
|
||||
|
||||
return KubeVersionCollection.convert_with_links(rpc_kube_versions)
|
||||
|
||||
@wsme_pecan.wsexpose(KubeVersion, wtypes.text)
|
||||
def get_one(self, version):
|
||||
"""Retrieve information about the given kubernetes version."""
|
||||
|
||||
# Get the static version information
|
||||
rpc_kube_version = objects.kube_version.get_by_version(version)
|
||||
|
||||
# Get the dynamic version information
|
||||
version_states = self._kube_operator.kube_get_version_states()
|
||||
rpc_kube_version.state = version_states[version]
|
||||
# For now, the active version will be marked as the target. When
|
||||
# upgrades are supported, we will also have to consider whether
|
||||
# an upgrade is in progress to determine the target.
|
||||
if rpc_kube_version.state == kubernetes.KUBE_STATE_ACTIVE:
|
||||
rpc_kube_version.target = True
|
||||
else:
|
||||
rpc_kube_version.target = False
|
||||
return KubeVersion.convert_with_links(rpc_kube_version)
|
|
@ -236,26 +236,6 @@ class CephPoolSetParamFailure(CephFailure):
|
|||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class KubeAppUploadFailure(SysinvException):
|
||||
message = _("Upload of application %(name)s (%(version)s) failed: %(reason)s")
|
||||
|
||||
|
||||
class KubeAppApplyFailure(SysinvException):
|
||||
message = _("Deployment of application %(name)s (%(version)s) failed: %(reason)s")
|
||||
|
||||
|
||||
class KubeAppDeleteFailure(SysinvException):
|
||||
message = _("Delete of application %(name)s (%(version)s) failed: %(reason)s")
|
||||
|
||||
|
||||
class KubeAppAbort(SysinvException):
|
||||
message = _("Operation aborted by user.")
|
||||
|
||||
|
||||
class HelmTillerFailure(SysinvException):
|
||||
message = _("Helm operation failure: %(reason)s")
|
||||
|
||||
|
||||
class InvalidCPUInfo(Invalid):
|
||||
message = _("Unacceptable CPU info") + ": %(reason)s"
|
||||
|
||||
|
@ -565,20 +545,6 @@ class CertificateAlreadyExists(Conflict):
|
|||
message = _("A Certificate with uuid %(uuid)s already exists.")
|
||||
|
||||
|
||||
class HelmOverrideAlreadyExists(Conflict):
|
||||
message = _("A HelmOverride with name %(name)s and namespace "
|
||||
"%(namespace)s already exists.")
|
||||
|
||||
|
||||
class KubeAppAlreadyExists(Conflict):
|
||||
message = _("An application with name %(name)s %(version)s already exists.")
|
||||
|
||||
|
||||
class KubeAppChartReleaseAlreadyExists(Conflict):
|
||||
message = _("A chart release with name %(name)s and namespace "
|
||||
"%(namespace)s for application %(app_id)s already exists.")
|
||||
|
||||
|
||||
class InstanceDeployFailure(Invalid):
|
||||
message = _("Failed to deploy instance: %(reason)s")
|
||||
|
||||
|
@ -911,32 +877,10 @@ class CertificateNotFound(NotFound):
|
|||
message = _("No certificate with uuid %(uuid)s")
|
||||
|
||||
|
||||
class HelmOverrideNotFound(NotFound):
|
||||
message = _("No helm override with name %(name)s and namespace "
|
||||
"%(namespace)s")
|
||||
|
||||
|
||||
class CertificateTypeNotFound(NotFound):
|
||||
message = _("No certificate type of %(certtype)s")
|
||||
|
||||
|
||||
class KubeAppNotFound(NotFound):
|
||||
message = _("No application with name %(name)s.")
|
||||
|
||||
|
||||
class KubeAppInactiveNotFound(NotFound):
|
||||
message = _("No inactive application with name %(name)s and version %(version)s")
|
||||
|
||||
|
||||
class KubeAppChartReleaseNotFound(NotFound):
|
||||
message = _("No chart release with name %(name)s and "
|
||||
"namespace %(namespace)s for application %(app_id)s")
|
||||
|
||||
|
||||
class KubeAppReleasesNotFound(NotFound):
|
||||
message = _("No releases found for application %(app_id)s")
|
||||
|
||||
|
||||
class DockerRegistryCredentialNotFound(NotFound):
|
||||
message = _("Credentials to access local docker registry "
|
||||
"for user %(name)s could not be found.")
|
||||
|
@ -1066,14 +1010,6 @@ class SysInvSignalTimeout(SysinvException):
|
|||
message = "Sysinv Timeout."
|
||||
|
||||
|
||||
class KubeAppProgressMonitorTimeout(SysinvException):
|
||||
message = "Armada execution progress monitor timed out."
|
||||
|
||||
|
||||
class K8sNamespaceDeleteTimeout(SysinvException):
|
||||
message = "Namespace %(name)s deletion timeout."
|
||||
|
||||
|
||||
class InvalidEndpoint(SysinvException):
|
||||
message = "The provided endpoint is invalid"
|
||||
|
||||
|
@ -1189,10 +1125,6 @@ class HostLabelInvalid(Invalid):
|
|||
message = _("Host label is invalid. Reason: %(reason)s")
|
||||
|
||||
|
||||
class K8sNodeNotFound(NotFound):
|
||||
message = _("Kubernetes Node %(name)s could not be found.")
|
||||
|
||||
|
||||
class PickleableException(Exception):
|
||||
"""
|
||||
Pickleable Exception
|
||||
|
@ -1304,10 +1236,6 @@ class IncompleteCephMonNetworkConfig(CephFailure):
|
|||
"found: %(results)s")
|
||||
|
||||
|
||||
class InvalidHelmNamespace(Invalid):
|
||||
message = _("Invalid helm overrides namespace (%(namespace)s) for chart %(chart)s.")
|
||||
|
||||
|
||||
class LocalManagementPersonalityNotFound(NotFound):
|
||||
message = _("Local management personality is None: "
|
||||
"config_uuid=%(config_uuid)s, config_dict=%(config_dict)s, "
|
||||
|
@ -1325,10 +1253,6 @@ class LocalHostUUIDNotFound(NotFound):
|
|||
message = _("Local Host UUID not found")
|
||||
|
||||
|
||||
class InvalidHelmDockerImageSource(Invalid):
|
||||
message = _("Invalid docker image source: %(source)s. Must be one of %(valid_srcs)s")
|
||||
|
||||
|
||||
# DataNetwork
|
||||
class UnsupportedInterfaceDataNetworkType(Conflict):
|
||||
message = _("Interface with datanetwork type '%(datanetworktype)s' "
|
||||
|
@ -1390,19 +1314,107 @@ class FilesystemAlreadyExists(Conflict):
|
|||
class FilesystemNotFound(NotFound):
|
||||
message = _("Host FS with id %(fs_id)s not found")
|
||||
|
||||
#
|
||||
# Kubernetes application and Helm related exceptions
|
||||
#
|
||||
|
||||
|
||||
class KubeAppUploadFailure(SysinvException):
|
||||
message = _("Upload of application %(name)s (%(version)s) failed: %(reason)s")
|
||||
|
||||
|
||||
class KubeAppApplyFailure(SysinvException):
|
||||
message = _("Deployment of application %(name)s (%(version)s) failed: %(reason)s")
|
||||
|
||||
|
||||
class KubeAppDeleteFailure(SysinvException):
|
||||
message = _("Delete of application %(name)s (%(version)s) failed: %(reason)s")
|
||||
|
||||
|
||||
class KubeAppAbort(SysinvException):
|
||||
message = _("Operation aborted by user.")
|
||||
|
||||
|
||||
class KubeAppAlreadyExists(Conflict):
|
||||
message = _("An application with name %(name)s %(version)s already exists.")
|
||||
|
||||
|
||||
class KubeAppChartReleaseAlreadyExists(Conflict):
|
||||
message = _("A chart release with name %(name)s and namespace "
|
||||
"%(namespace)s for application %(app_id)s already exists.")
|
||||
|
||||
|
||||
class KubeAppNotFound(NotFound):
|
||||
message = _("No application with name %(name)s.")
|
||||
|
||||
|
||||
class KubeAppInactiveNotFound(NotFound):
|
||||
message = _("No inactive application with name %(name)s and version %(version)s")
|
||||
|
||||
|
||||
class KubeAppChartReleaseNotFound(NotFound):
|
||||
message = _("No chart release with name %(name)s and "
|
||||
"namespace %(namespace)s for application %(app_id)s")
|
||||
|
||||
|
||||
class KubeAppReleasesNotFound(NotFound):
|
||||
message = _("No releases found for application %(app_id)s")
|
||||
|
||||
|
||||
class KubeAppProgressMonitorTimeout(SysinvException):
|
||||
message = "Armada execution progress monitor timed out."
|
||||
|
||||
|
||||
class KubeNamespaceDeleteTimeout(SysinvException):
|
||||
message = "Namespace %(name)s deletion timeout."
|
||||
|
||||
|
||||
class HelmTillerFailure(SysinvException):
|
||||
message = _("Helm operation failure: %(reason)s")
|
||||
|
||||
|
||||
class HelmOverrideNotFound(NotFound):
|
||||
message = _("No helm override with name %(name)s and namespace "
|
||||
"%(namespace)s")
|
||||
|
||||
|
||||
class HelmOverrideAlreadyExists(Conflict):
|
||||
message = _("A HelmOverride with name %(name)s and namespace "
|
||||
"%(namespace)s already exists.")
|
||||
|
||||
|
||||
class InvalidHelmNamespace(Invalid):
|
||||
message = _("Invalid helm overrides namespace (%(namespace)s) for chart %(chart)s.")
|
||||
|
||||
|
||||
class InvalidHelmDockerImageSource(Invalid):
|
||||
message = _("Invalid docker image source: %(source)s. Must be one of %(valid_srcs)s")
|
||||
|
||||
#
|
||||
# Kubernetes related exceptions
|
||||
#
|
||||
|
||||
|
||||
class KubeNodeNotFound(NotFound):
|
||||
message = _("Kubernetes Node %(name)s could not be found.")
|
||||
|
||||
|
||||
class KubeHostUpgradeAlreadyExists(Conflict):
|
||||
message = _("A Kube Host Upgrade with id %(host_upgrade_id)s already exists.")
|
||||
message = _("A Kubernetes Host Upgrade with id %(host_upgrade_id)s already exists.")
|
||||
|
||||
|
||||
class KubeHostUpgradeNotFound(NotFound):
|
||||
message = _("Kube Host Upgrade with id %(host_upgrade_id)s not found")
|
||||
message = _("Kubernetes Host Upgrade with id %(host_upgrade_id)s not found")
|
||||
|
||||
|
||||
class KubeUpgradeAlreadyExists(Conflict):
|
||||
message = _(
|
||||
"A Kube Upgrade with id %(upgrade_id)s already exists.")
|
||||
"A Kubernetes Upgrade with id %(upgrade_id)s already exists.")
|
||||
|
||||
|
||||
class KubeUpgradeNotFound(NotFound):
|
||||
message = _("Kube Upgrade with id %(upgrade_id)s not found")
|
||||
message = _("Kubernetes Upgrade with id %(upgrade_id)s not found")
|
||||
|
||||
|
||||
class KubeVersionNotFound(NotFound):
|
||||
message = _("Kubernetes version %(version)s not found")
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
""" System Inventory Kubernetes Utilities and helper functions."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from distutils.version import LooseVersion
|
||||
import json
|
||||
|
||||
from kubernetes import config
|
||||
|
@ -19,16 +20,41 @@ from kubernetes import client
|
|||
from kubernetes.client import Configuration
|
||||
from kubernetes.client.rest import ApiException
|
||||
from six.moves import http_client as httplib
|
||||
|
||||
from sysinv.common import exception
|
||||
from sysinv.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Possible states for each supported kubernetes version
|
||||
KUBE_STATE_AVAILABLE = 'available'
|
||||
KUBE_STATE_ACTIVE = 'active'
|
||||
KUBE_STATE_PARTIAL = 'partial'
|
||||
|
||||
# Kubernetes namespaces
|
||||
NAMESPACE_KUBE_SYSTEM = 'kube-system'
|
||||
|
||||
# Kubernetes control plane components
|
||||
KUBE_APISERVER = 'kube-apiserver'
|
||||
KUBE_CONTROLLER_MANAGER = 'kube-controller-manager'
|
||||
KUBE_SCHEDULER = 'kube-scheduler'
|
||||
|
||||
|
||||
def get_kube_versions():
|
||||
"""Provides a list of supported kubernetes versions."""
|
||||
return [
|
||||
{'version': 'v1.16.2',
|
||||
'upgrade_from': [],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class KubeOperator(object):
|
||||
|
||||
def __init__(self, dbapi):
|
||||
self._dbapi = dbapi
|
||||
def __init__(self):
|
||||
self._kube_client_batch = None
|
||||
self._kube_client_core = None
|
||||
self._kube_client_custom_objects = None
|
||||
|
@ -68,7 +94,7 @@ class KubeOperator(object):
|
|||
reason = json.loads(e.body).get('message', "")
|
||||
raise exception.HostLabelInvalid(reason=reason)
|
||||
elif e.status == httplib.NOT_FOUND:
|
||||
raise exception.K8sNodeNotFound(name=name)
|
||||
raise exception.KubeNodeNotFound(name=name)
|
||||
else:
|
||||
raise
|
||||
except Exception as e:
|
||||
|
@ -101,6 +127,33 @@ class KubeOperator(object):
|
|||
LOG.error("Kubernetes exception in list_namespaced_pod: %s" % e)
|
||||
raise
|
||||
|
||||
def kube_get_image_by_pod_name(self, pod_name, namespace, container_name):
|
||||
"""Returns the image for the specified container."""
|
||||
LOG.debug("kube_get_image_by_pod_name pod_name=%s, namespace=%s, "
|
||||
"container_name=%s" % (pod_name, namespace, container_name))
|
||||
try:
|
||||
# Retrieve the named pod
|
||||
api_response = \
|
||||
self._get_kubernetesclient_core().list_namespaced_pod(
|
||||
namespace, field_selector="metadata.name=%s" % pod_name)
|
||||
# We expect only one pod with this name
|
||||
if len(api_response.items) != 1:
|
||||
LOG.warn("Expected one pod with pod_name=%s, namespace=%s, "
|
||||
"container_name=%s but found %d" %
|
||||
(pod_name, namespace, container_name,
|
||||
len(api_response.items)))
|
||||
# Use the first pod
|
||||
if len(api_response.items) >= 1:
|
||||
pod = api_response.items[0]
|
||||
for container in pod.spec.containers:
|
||||
if container.name == container_name:
|
||||
return container.image
|
||||
|
||||
return None
|
||||
except ApiException as e:
|
||||
LOG.error("Kubernetes exception in list_namespaced_pod: %s" % e)
|
||||
raise
|
||||
|
||||
def kube_create_namespace(self, namespace):
|
||||
body = {'metadata': {'name': namespace}}
|
||||
|
||||
|
@ -303,3 +356,93 @@ class KubeOperator(object):
|
|||
LOG.error("Failed to delete custom object, Namespace %s: %s"
|
||||
% (namespace, e))
|
||||
raise
|
||||
|
||||
def kube_get_control_plane_versions(self):
|
||||
"""Returns the lowest control plane component version on each
|
||||
master node."""
|
||||
c = self._get_kubernetesclient_core()
|
||||
|
||||
# First get a list of master nodes
|
||||
master_nodes = list()
|
||||
api_response = c.list_node(
|
||||
label_selector="node-role.kubernetes.io/master")
|
||||
for node in api_response.items:
|
||||
master_nodes.append(node.metadata.name)
|
||||
|
||||
node_versions = dict()
|
||||
for node_name in master_nodes:
|
||||
versions = dict()
|
||||
for component in [KUBE_APISERVER,
|
||||
KUBE_CONTROLLER_MANAGER,
|
||||
KUBE_SCHEDULER]:
|
||||
# Control plane pods are named by component and node.
|
||||
# E.g. kube-apiserver-controller-0
|
||||
pod_name = component + '-' + node_name
|
||||
image = self.kube_get_image_by_pod_name(
|
||||
pod_name, NAMESPACE_KUBE_SYSTEM, component)
|
||||
versions[component] = image.rsplit(':')[-1]
|
||||
|
||||
# Calculate the lowest version
|
||||
lowest_version = min(
|
||||
LooseVersion(versions[KUBE_APISERVER]),
|
||||
LooseVersion(versions[KUBE_CONTROLLER_MANAGER]),
|
||||
LooseVersion(versions[KUBE_SCHEDULER]))
|
||||
node_versions[node_name] = str(lowest_version)
|
||||
|
||||
return node_versions
|
||||
|
||||
def kube_get_kubelet_versions(self):
|
||||
"""Returns the kubelet version on each node."""
|
||||
c = self._get_kubernetesclient_core()
|
||||
|
||||
kubelet_versions = dict()
|
||||
|
||||
api_response = c.list_node()
|
||||
for node in api_response.items:
|
||||
kubelet_versions[node.metadata.name] = \
|
||||
node.status.node_info.kubelet_version
|
||||
|
||||
return kubelet_versions
|
||||
|
||||
def kube_get_version_states(self):
|
||||
"""Returns the state of each known kubernetes version."""
|
||||
|
||||
# Set counts to 0
|
||||
version_counts = dict()
|
||||
for version in get_kube_versions():
|
||||
version_counts[version['version']] = 0
|
||||
|
||||
# Count versions running on control plane
|
||||
cp_versions = self.kube_get_control_plane_versions()
|
||||
for cp_version in cp_versions.values():
|
||||
if cp_version in version_counts:
|
||||
version_counts[cp_version] += 1
|
||||
else:
|
||||
LOG.error("Unknown control plane version %s running." %
|
||||
cp_version)
|
||||
|
||||
# Count versions running on kubelets
|
||||
kubelet_versions = self.kube_get_kubelet_versions()
|
||||
for kubelet_version in kubelet_versions.values():
|
||||
if kubelet_version in version_counts:
|
||||
version_counts[kubelet_version] += 1
|
||||
else:
|
||||
LOG.error("Unknown kubelet version %s running." %
|
||||
kubelet_version)
|
||||
|
||||
version_states = dict()
|
||||
active_candidates = list()
|
||||
for version, count in version_counts.items():
|
||||
if count > 0:
|
||||
# This version is at least partially running
|
||||
version_states[version] = KUBE_STATE_PARTIAL
|
||||
active_candidates.append(version)
|
||||
else:
|
||||
# This version is not running anywhere
|
||||
version_states[version] = KUBE_STATE_AVAILABLE
|
||||
|
||||
# If only a single version is running, then mark it as active
|
||||
if len(active_candidates) == 1:
|
||||
version_states[active_candidates[0]] = KUBE_STATE_ACTIVE
|
||||
|
||||
return version_states
|
||||
|
|
|
@ -129,7 +129,7 @@ class AppOperator(object):
|
|||
self._fm_api = fm_api.FaultAPIs()
|
||||
self._docker = DockerHelper(self._dbapi)
|
||||
self._helm = helm.HelmOperator(self._dbapi)
|
||||
self._kube = kubernetes.KubeOperator(self._dbapi)
|
||||
self._kube = kubernetes.KubeOperator()
|
||||
self._utils = kube_app.KubeAppHelper(self._dbapi)
|
||||
self._lock = threading.Lock()
|
||||
|
||||
|
@ -974,7 +974,7 @@ class AppOperator(object):
|
|||
body['metadata']['labels'].update(label_dict)
|
||||
try:
|
||||
self._kube.kube_patch_node(hostname, body)
|
||||
except exception.K8sNodeNotFound:
|
||||
except exception.KubeNodeNotFound:
|
||||
pass
|
||||
|
||||
def _assign_host_labels(self, hosts, labels):
|
||||
|
@ -1168,7 +1168,7 @@ class AppOperator(object):
|
|||
time.sleep(1)
|
||||
|
||||
if loop_timeout > timeout:
|
||||
raise exception.K8sNamespaceDeleteTimeout(name=namespace)
|
||||
raise exception.KubeNamespaceDeleteTimeout(name=namespace)
|
||||
LOG.info("Namespace %s delete completed." % namespace)
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
|
|
|
@ -227,7 +227,7 @@ class ConductorManager(service.PeriodicService):
|
|||
self._docker = kube_app.DockerHelper(self.dbapi)
|
||||
self._ceph = iceph.CephOperator(self.dbapi)
|
||||
self._helm = helm.HelmOperator(self.dbapi)
|
||||
self._kube = kubernetes.KubeOperator(self.dbapi)
|
||||
self._kube = kubernetes.KubeOperator()
|
||||
self._kube_app_helper = kube_api.KubeAppHelper(self.dbapi)
|
||||
self._fernet = fernet.FernetOperator()
|
||||
|
||||
|
@ -10446,7 +10446,7 @@ class ConductorManager(service.PeriodicService):
|
|||
body['metadata']['labels'].update(label_dict)
|
||||
try:
|
||||
self._kube.kube_patch_node(host.hostname, body)
|
||||
except exception.K8sNodeNotFound:
|
||||
except exception.KubeNodeNotFound:
|
||||
LOG.info("Host %s does not exist in kubernetes yet, label will "
|
||||
"be added after node's unlock by audit" % host.hostname)
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ from sysinv.objects import kube_app
|
|||
from sysinv.objects import kube_app_releases
|
||||
from sysinv.objects import kube_host_upgrade
|
||||
from sysinv.objects import kube_upgrade
|
||||
from sysinv.objects import kube_version
|
||||
from sysinv.objects import interface
|
||||
from sysinv.objects import interface_ae
|
||||
from sysinv.objects import interface_ethernet
|
||||
|
@ -189,6 +190,7 @@ kube_app = kube_app.KubeApp
|
|||
kube_app_releases = kube_app_releases.KubeAppReleases
|
||||
kube_host_upgrade = kube_host_upgrade.KubeHostUpgrade
|
||||
kube_upgrade = kube_upgrade.KubeUpgrade
|
||||
kube_version = kube_version.KubeVersion
|
||||
datanetwork = datanetwork.DataNetwork
|
||||
host_fs = host_fs.HostFS
|
||||
|
||||
|
@ -260,6 +262,7 @@ __all__ = (system,
|
|||
kube_app_releases,
|
||||
kube_host_upgrade,
|
||||
kube_upgrade,
|
||||
kube_version,
|
||||
datanetwork,
|
||||
interface_network,
|
||||
host_fs,
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.objects import base
|
||||
from sysinv.objects import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class KubeVersion(base.SysinvObject):
|
||||
# VERSION 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {'version': utils.str_or_none,
|
||||
'upgrade_from': utils.list_of_strings_or_none,
|
||||
'downgrade_to': utils.list_of_strings_or_none,
|
||||
'applied_patches': utils.list_of_strings_or_none,
|
||||
'available_patches': utils.list_of_strings_or_none,
|
||||
'target': utils.bool_or_none,
|
||||
'state': utils.str_or_none,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_by_version(cls, version):
|
||||
for kube_version in kubernetes.get_kube_versions():
|
||||
if kube_version['version'] == version:
|
||||
version_obj = KubeVersion()
|
||||
# Must set created/updated_at as these are defined in the
|
||||
# base class.
|
||||
version_obj.created_at = None
|
||||
version_obj.updated_at = None
|
||||
version_obj.version = kube_version['version']
|
||||
version_obj.upgrade_from = kube_version['upgrade_from']
|
||||
version_obj.downgrade_to = kube_version['downgrade_to']
|
||||
version_obj.applied_patches = kube_version['applied_patches']
|
||||
version_obj.available_patches = kube_version['available_patches']
|
||||
version_obj.target = False
|
||||
version_obj.state = 'unknown'
|
||||
return version_obj
|
||||
|
||||
raise exception.KubeVersionNotFound(version)
|
||||
|
||||
def can_upgrade_from(self, version):
|
||||
return version in self.upgrade_from
|
||||
|
||||
def can_downgrade_to(self, version):
|
||||
return version in self.downgrade_to
|
|
@ -0,0 +1,113 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Tests for the API /kube_version/ methods.
|
||||
"""
|
||||
|
||||
import mock
|
||||
import webtest.app
|
||||
|
||||
from sysinv.tests.api import base
|
||||
|
||||
FAKE_KUBE_VERSIONS = [
|
||||
{'version': 'v1.42.1',
|
||||
'upgrade_from': [],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.1', 'KUBE.2'],
|
||||
'available_patches': ['KUBE.3'],
|
||||
},
|
||||
{'version': 'v1.43.1',
|
||||
'upgrade_from': ['v1.42.2'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.11', 'KUBE.12'],
|
||||
'available_patches': ['KUBE.13'],
|
||||
},
|
||||
{'version': 'v1.43.2',
|
||||
'upgrade_from': ['v1.43.1', 'v1.42.2'],
|
||||
'downgrade_to': ['v1.43.1'],
|
||||
'applied_patches': ['KUBE.14', 'KUBE.15'],
|
||||
'available_patches': ['KUBE.16'],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def mock_get_kube_versions():
|
||||
return FAKE_KUBE_VERSIONS
|
||||
|
||||
|
||||
class TestKubeVersion(base.FunctionalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(TestKubeVersion, self).setUp()
|
||||
|
||||
def mock_kube_get_version_states(obj):
|
||||
return {'v1.42.1': 'available',
|
||||
'v1.42.2': 'available',
|
||||
'v1.43.1': 'active',
|
||||
'v1.43.2': 'available'}
|
||||
self.mocked_kube_get_version_states = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_version_states',
|
||||
mock_kube_get_version_states)
|
||||
self.mocked_kube_get_version_states.start()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestKubeVersion, self).tearDown()
|
||||
|
||||
self.mocked_kube_get_version_states.stop()
|
||||
|
||||
|
||||
@mock.patch('sysinv.common.kubernetes.get_kube_versions',
|
||||
mock_get_kube_versions)
|
||||
class TestListKubeVersions(TestKubeVersion):
|
||||
|
||||
def test_one(self):
|
||||
result = self.get_json('/kube_versions/v1.42.2')
|
||||
|
||||
# Verify that the version has the expected attributes
|
||||
self.assertEqual(result['version'],
|
||||
FAKE_KUBE_VERSIONS[1]['version'])
|
||||
self.assertEqual(result['upgrade_from'],
|
||||
FAKE_KUBE_VERSIONS[1]['upgrade_from'])
|
||||
self.assertEqual(result['downgrade_to'],
|
||||
FAKE_KUBE_VERSIONS[1]['downgrade_to'])
|
||||
self.assertEqual(result['applied_patches'],
|
||||
FAKE_KUBE_VERSIONS[1]['applied_patches'])
|
||||
self.assertEqual(result['available_patches'],
|
||||
FAKE_KUBE_VERSIONS[1]['available_patches'])
|
||||
self.assertEqual(result['state'], 'available')
|
||||
self.assertEqual(result['target'], False)
|
||||
|
||||
def test_one_active(self):
|
||||
result = self.get_json('/kube_versions/v1.43.1')
|
||||
|
||||
# Verify that the version has the expected attributes
|
||||
self.assertEqual(result['version'],
|
||||
FAKE_KUBE_VERSIONS[2]['version'])
|
||||
self.assertEqual(result['upgrade_from'],
|
||||
FAKE_KUBE_VERSIONS[2]['upgrade_from'])
|
||||
self.assertEqual(result['downgrade_to'],
|
||||
FAKE_KUBE_VERSIONS[2]['downgrade_to'])
|
||||
self.assertEqual(result['applied_patches'],
|
||||
FAKE_KUBE_VERSIONS[2]['applied_patches'])
|
||||
self.assertEqual(result['available_patches'],
|
||||
FAKE_KUBE_VERSIONS[2]['available_patches'])
|
||||
self.assertEqual(result['state'], 'active')
|
||||
self.assertEqual(result['target'], True)
|
||||
|
||||
def test_bad_version(self):
|
||||
self.assertRaises(webtest.app.AppError, self.get_json,
|
||||
'/kube_versions/v1.42.2.unknown')
|
||||
|
||||
def test_all(self):
|
||||
data = self.get_json('/kube_versions')
|
||||
self.assertEqual(len(FAKE_KUBE_VERSIONS), len(data['kube_versions']))
|
|
@ -0,0 +1,4 @@
|
|||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
|
@ -0,0 +1,547 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Tests for the kubernetes utilities.
|
||||
"""
|
||||
|
||||
import kubernetes
|
||||
import mock
|
||||
|
||||
from sysinv.common import kubernetes as kube
|
||||
|
||||
from sysinv.tests import base
|
||||
|
||||
FAKE_KUBE_VERSIONS = [
|
||||
{'version': 'v1.41.3',
|
||||
'upgrade_from': [],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.0',
|
||||
'upgrade_from': ['v1.41.3'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.1',
|
||||
'upgrade_from': ['v1.42.0'],
|
||||
'downgrade_to': ['v1.42.0'],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.3',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.1', 'KUBE.2'],
|
||||
'available_patches': ['KUBE.3'],
|
||||
},
|
||||
{'version': 'v1.42.4',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.1', 'KUBE.2'],
|
||||
'available_patches': ['KUBE.3'],
|
||||
},
|
||||
{'version': 'v1.43.1',
|
||||
'upgrade_from': ['v1.42.2'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.11', 'KUBE.12'],
|
||||
'available_patches': ['KUBE.13'],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def mock_get_kube_versions():
|
||||
return FAKE_KUBE_VERSIONS
|
||||
|
||||
|
||||
def mock_load_kube_config(path):
|
||||
return
|
||||
|
||||
|
||||
@mock.patch('kubernetes.config.load_kube_config', mock_load_kube_config)
|
||||
@mock.patch('sysinv.common.kubernetes.get_kube_versions',
|
||||
mock_get_kube_versions)
|
||||
class TestKubeOperator(base.TestCase):
|
||||
|
||||
def setup_result(self):
|
||||
self.single_pod_result = {
|
||||
'test-pod-1': kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="test-pod-1",
|
||||
namespace="test-namespace-1"),
|
||||
status=kubernetes.client.V1PodStatus(
|
||||
conditions=[
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="Initialized"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="False",
|
||||
type="Ready"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="ContainersReady"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="PodScheduled"),
|
||||
],
|
||||
),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="test-container-1",
|
||||
image="test-image-1:imageversion-1"),
|
||||
],
|
||||
),
|
||||
),
|
||||
]
|
||||
),
|
||||
}
|
||||
|
||||
self.no_pod_result = {
|
||||
'test-pod-1': kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[]
|
||||
)
|
||||
}
|
||||
|
||||
self.multiple_pod_result = {
|
||||
'test-pod-1': kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="test-pod-1",
|
||||
namespace="test-namespace-1"),
|
||||
status=kubernetes.client.V1PodStatus(
|
||||
conditions=[
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="Initialized"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="False",
|
||||
type="Ready"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="ContainersReady"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="PodScheduled"),
|
||||
],
|
||||
),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="test-container-1",
|
||||
image="test-image-1:imageversion-1"),
|
||||
],
|
||||
),
|
||||
),
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="test-pod-1",
|
||||
namespace="test-namespace-1"),
|
||||
status=kubernetes.client.V1PodStatus(
|
||||
conditions=[
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="Initialized"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="False",
|
||||
type="Ready"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="ContainersReady"),
|
||||
kubernetes.client.V1PodCondition(
|
||||
status="True",
|
||||
type="PodScheduled"),
|
||||
],
|
||||
),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="test-container-2",
|
||||
image="test-image-2:imageversion-2"),
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
}
|
||||
|
||||
self.cp_pods_result = {
|
||||
'kube-apiserver-test-node-1':
|
||||
kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="kube-apiserver-test-node-1",
|
||||
namespace="kube-system"),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="kube-apiserver",
|
||||
image="test-image-1:v1.42.1"),
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
'kube-controller-manager-test-node-1':
|
||||
kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="kube-controller-manager-test-node-1",
|
||||
namespace="kube-system"),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="kube-controller-manager",
|
||||
image="test-image-2:v1.42.1"),
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
'kube-scheduler-test-node-1':
|
||||
kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="kube-scheduler-test-node-1",
|
||||
namespace="kube-system"),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="kube-scheduler",
|
||||
image="test-image-3:v1.42.1"),
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
'kube-apiserver-test-node-2':
|
||||
kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="kube-apiserver-test-node-2",
|
||||
namespace="kube-system"),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="kube-apiserver",
|
||||
image="test-image-1:v1.42.1"),
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
'kube-controller-manager-test-node-2':
|
||||
kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="kube-controller-manager-test-node-2",
|
||||
namespace="kube-system"),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="kube-controller-manager",
|
||||
image="test-image-2:v1.42.1"),
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
'kube-scheduler-test-node-2':
|
||||
kubernetes.client.V1PodList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Pod",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="kube-scheduler-test-node-2",
|
||||
namespace="kube-system"),
|
||||
spec=kubernetes.client.V1PodSpec(
|
||||
containers=[
|
||||
kubernetes.client.V1Container(
|
||||
name="kube-scheduler",
|
||||
image="test-image-3:v1.42.1"),
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
}
|
||||
|
||||
self.single_node_result = kubernetes.client.V1NodeList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Node",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="test-node-1",
|
||||
namespace="test-namespace-1"),
|
||||
status=kubernetes.client.V1NodeStatus(
|
||||
node_info=kubernetes.client.V1NodeSystemInfo(
|
||||
architecture="fake-architecture",
|
||||
boot_id="fake-boot-id",
|
||||
container_runtime_version="fake-cr-version",
|
||||
kernel_version="fake-kernel-version",
|
||||
kube_proxy_version="fake-proxy-version",
|
||||
kubelet_version="v1.42.4",
|
||||
machine_id="fake-machine-id",
|
||||
operating_system="fake-os",
|
||||
os_image="fake-os-image",
|
||||
system_uuid="fake-system-uuid"))
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
self.multi_node_result = kubernetes.client.V1NodeList(
|
||||
api_version="v1",
|
||||
items=[
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Node",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="test-node-1",
|
||||
namespace="test-namespace-1"),
|
||||
status=kubernetes.client.V1NodeStatus(
|
||||
node_info=kubernetes.client.V1NodeSystemInfo(
|
||||
architecture="fake-architecture",
|
||||
boot_id="fake-boot-id",
|
||||
container_runtime_version="fake-cr-version",
|
||||
kernel_version="fake-kernel-version",
|
||||
kube_proxy_version="fake-proxy-version",
|
||||
kubelet_version="v1.42.4",
|
||||
machine_id="fake-machine-id",
|
||||
operating_system="fake-os",
|
||||
os_image="fake-os-image",
|
||||
system_uuid="fake-system-uuid"))
|
||||
),
|
||||
kubernetes.client.V1Pod(
|
||||
api_version="v1",
|
||||
kind="Node",
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="test-node-2",
|
||||
namespace="test-namespace-1"),
|
||||
status=kubernetes.client.V1NodeStatus(
|
||||
node_info=kubernetes.client.V1NodeSystemInfo(
|
||||
architecture="fake-architecture",
|
||||
boot_id="fake-boot-id",
|
||||
container_runtime_version="fake-cr-version",
|
||||
kernel_version="fake-kernel-version",
|
||||
kube_proxy_version="fake-proxy-version",
|
||||
kubelet_version="v1.42.3",
|
||||
machine_id="fake-machine-id",
|
||||
operating_system="fake-os",
|
||||
os_image="fake-os-image",
|
||||
system_uuid="fake-system-uuid"))
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
super(TestKubeOperator, self).setUp()
|
||||
|
||||
self.setup_result()
|
||||
|
||||
self.list_namespaced_pod_result = None
|
||||
|
||||
def mock_list_namespaced_pod(obj, namespace, field_selector=""):
|
||||
pod_name = field_selector.split('metadata.name=', 1)[1]
|
||||
return self.list_namespaced_pod_result[pod_name]
|
||||
self.mocked_list_namespaced_pod = mock.patch(
|
||||
'kubernetes.client.CoreV1Api.list_namespaced_pod',
|
||||
mock_list_namespaced_pod)
|
||||
self.mocked_list_namespaced_pod.start()
|
||||
|
||||
self.list_node_result = None
|
||||
|
||||
def mock_list_node(obj, label_selector=""):
|
||||
return self.list_node_result
|
||||
self.mocked_list_node = mock.patch(
|
||||
'kubernetes.client.CoreV1Api.list_node',
|
||||
mock_list_node)
|
||||
self.mocked_list_node.start()
|
||||
|
||||
self.kube_operator = kube.KubeOperator()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestKubeOperator, self).tearDown()
|
||||
|
||||
self.mocked_list_namespaced_pod.stop()
|
||||
self.mocked_list_node.stop()
|
||||
|
||||
def test_kube_get_image_by_pod_name(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.single_pod_result
|
||||
|
||||
result = self.kube_operator.kube_get_image_by_pod_name(
|
||||
'test-pod-1', 'test-namespace-1', 'test-container-1')
|
||||
assert result == "test-image-1:imageversion-1"
|
||||
|
||||
def test_kube_get_image_by_pod_name_no_pod(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.no_pod_result
|
||||
|
||||
result = self.kube_operator.kube_get_image_by_pod_name(
|
||||
'test-pod-1', 'test-namespace-1', 'test-container-1')
|
||||
assert result is None
|
||||
|
||||
def test_kube_get_image_by_pod_name_multiple_pods(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.multiple_pod_result
|
||||
|
||||
result = self.kube_operator.kube_get_image_by_pod_name(
|
||||
'test-pod-1', 'test-namespace-1', 'test-container-1')
|
||||
assert result == "test-image-1:imageversion-1"
|
||||
|
||||
def test_kube_get_control_plane_versions(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.cp_pods_result
|
||||
self.list_node_result = self.single_node_result
|
||||
|
||||
result = self.kube_operator.kube_get_control_plane_versions()
|
||||
assert result == {'test-node-1': 'v1.42.1'}
|
||||
|
||||
def test_kube_get_control_plane_versions_mixed_versions(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.cp_pods_result
|
||||
self.list_node_result = self.single_node_result
|
||||
|
||||
self.cp_pods_result['kube-controller-manager-test-node-1'].items[0].\
|
||||
spec.containers[0].image = "test-image-2:v1.42.0"
|
||||
|
||||
result = self.kube_operator.kube_get_control_plane_versions()
|
||||
assert result == {'test-node-1': 'v1.42.0'}
|
||||
|
||||
def test_kube_get_control_plane_versions_multi_node(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.cp_pods_result
|
||||
self.list_node_result = self.multi_node_result
|
||||
|
||||
result = self.kube_operator.kube_get_control_plane_versions()
|
||||
assert result == {'test-node-1': 'v1.42.1',
|
||||
'test-node-2': 'v1.42.1'}
|
||||
|
||||
def test_kube_get_control_plane_versions_multi_node_mixed_versions(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.cp_pods_result
|
||||
self.list_node_result = self.multi_node_result
|
||||
|
||||
self.cp_pods_result['kube-controller-manager-test-node-1'].items[0].\
|
||||
spec.containers[0].image = "test-image-2:v1.42.0"
|
||||
self.cp_pods_result['kube-scheduler-test-node-2'].items[0].\
|
||||
spec.containers[0].image = "test-image-3:v1.42.3"
|
||||
|
||||
result = self.kube_operator.kube_get_control_plane_versions()
|
||||
assert result == {'test-node-1': 'v1.42.0',
|
||||
'test-node-2': 'v1.42.1'}
|
||||
|
||||
def test_kube_get_kubelet_versions(self):
|
||||
|
||||
self.list_node_result = self.single_node_result
|
||||
|
||||
result = self.kube_operator.kube_get_kubelet_versions()
|
||||
assert result == {'test-node-1': 'v1.42.4'}
|
||||
|
||||
def test_kube_get_kubelet_versions_multi_node(self):
|
||||
|
||||
self.list_node_result = self.multi_node_result
|
||||
|
||||
result = self.kube_operator.kube_get_kubelet_versions()
|
||||
assert result == {'test-node-1': 'v1.42.4',
|
||||
'test-node-2': 'v1.42.3'}
|
||||
|
||||
def test_kube_get_version_states(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.cp_pods_result
|
||||
self.list_node_result = self.single_node_result
|
||||
|
||||
result = self.kube_operator.kube_get_version_states()
|
||||
|
||||
assert result == {'v1.41.3': 'available',
|
||||
'v1.42.0': 'available',
|
||||
'v1.42.1': 'partial',
|
||||
'v1.42.3': 'available',
|
||||
'v1.42.4': 'partial',
|
||||
'v1.43.1': 'available'}
|
||||
|
||||
def test_kube_get_version_states_active(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.cp_pods_result
|
||||
self.list_node_result = self.single_node_result
|
||||
self.single_node_result.items[0].status.node_info.kubelet_version = \
|
||||
"v1.42.1"
|
||||
|
||||
result = self.kube_operator.kube_get_version_states()
|
||||
assert result == {'v1.41.3': 'available',
|
||||
'v1.42.0': 'available',
|
||||
'v1.42.1': 'active',
|
||||
'v1.42.3': 'available',
|
||||
'v1.42.4': 'available',
|
||||
'v1.43.1': 'available'}
|
||||
|
||||
def test_kube_get_version_states_multi_node(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.cp_pods_result
|
||||
self.list_node_result = self.multi_node_result
|
||||
|
||||
result = self.kube_operator.kube_get_version_states()
|
||||
assert result == {'v1.41.3': 'available',
|
||||
'v1.42.0': 'available',
|
||||
'v1.42.1': 'partial',
|
||||
'v1.42.3': 'partial',
|
||||
'v1.42.4': 'partial',
|
||||
'v1.43.1': 'available'}
|
||||
|
||||
def test_kube_get_version_states_ignore_unknown_version(self):
|
||||
|
||||
self.list_namespaced_pod_result = self.cp_pods_result
|
||||
self.cp_pods_result['kube-controller-manager-test-node-1'].items[0].\
|
||||
spec.containers[0].image = "test-image-2:v1.48.0"
|
||||
self.list_node_result = self.single_node_result
|
||||
self.single_node_result.items[0].status.node_info.kubelet_version = \
|
||||
"v1.49.1"
|
||||
|
||||
result = self.kube_operator.kube_get_version_states()
|
||||
assert result == {'v1.41.3': 'available',
|
||||
'v1.42.0': 'available',
|
||||
'v1.42.1': 'active',
|
||||
'v1.42.3': 'available',
|
||||
'v1.42.4': 'available',
|
||||
'v1.43.1': 'available'}
|
|
@ -0,0 +1,84 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
import sysinv.common.exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv import objects
|
||||
from sysinv.tests import base
|
||||
|
||||
FAKE_KUBE_VERSIONS = [
|
||||
{'version': 'v1.42.1',
|
||||
'upgrade_from': [],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.1', 'KUBE.2'],
|
||||
'available_patches': ['KUBE.3'],
|
||||
},
|
||||
{'version': 'v1.43.1',
|
||||
'upgrade_from': ['v1.42.2'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.11', 'KUBE.12'],
|
||||
'available_patches': ['KUBE.13'],
|
||||
},
|
||||
{'version': 'v1.43.2',
|
||||
'upgrade_from': ['v1.43.1', 'v1.42.2'],
|
||||
'downgrade_to': ['v1.43.1'],
|
||||
'applied_patches': ['KUBE.14', 'KUBE.15'],
|
||||
'available_patches': ['KUBE.16'],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def mock_get_kube_versions():
|
||||
return FAKE_KUBE_VERSIONS
|
||||
|
||||
|
||||
@mock.patch('sysinv.common.kubernetes.get_kube_versions',
|
||||
mock_get_kube_versions)
|
||||
class TestKubeVersionObject(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestKubeVersionObject, self).setUp()
|
||||
kubernetes.KUBE_VERSIONS = FAKE_KUBE_VERSIONS
|
||||
|
||||
def test_get_by_version(self):
|
||||
version_obj = objects.kube_version.get_by_version('v1.42.2')
|
||||
self.assertEqual(version_obj.version,
|
||||
FAKE_KUBE_VERSIONS[1]['version'])
|
||||
self.assertEqual(version_obj.upgrade_from,
|
||||
FAKE_KUBE_VERSIONS[1]['upgrade_from'])
|
||||
self.assertEqual(version_obj.downgrade_to,
|
||||
FAKE_KUBE_VERSIONS[1]['downgrade_to'])
|
||||
self.assertEqual(version_obj.applied_patches,
|
||||
FAKE_KUBE_VERSIONS[1]['applied_patches'])
|
||||
self.assertEqual(version_obj.available_patches,
|
||||
FAKE_KUBE_VERSIONS[1]['available_patches'])
|
||||
self.assertEqual(version_obj.state, 'unknown')
|
||||
|
||||
def test_get_by_version_fail(self):
|
||||
self.assertRaises(sysinv.common.exception.KubeVersionNotFound,
|
||||
objects.kube_version.get_by_version,
|
||||
'v1.42.22')
|
||||
|
||||
def test_can_upgrade_from(self):
|
||||
version = objects.kube_version.get_by_version('v1.43.2')
|
||||
|
||||
self.assertEqual(version.can_upgrade_from('v1.43.1'), True)
|
||||
self.assertEqual(version.can_upgrade_from('v1.42.2'), True)
|
||||
self.assertEqual(version.can_upgrade_from('v1.42.1'), False)
|
||||
|
||||
def test_can_downgrade_to(self):
|
||||
version = objects.kube_version.get_by_version('v1.43.2')
|
||||
|
||||
self.assertEqual(version.can_downgrade_to('v1.43.1'), True)
|
||||
self.assertEqual(version.can_downgrade_to('v1.42.1'), False)
|
Loading…
Reference in New Issue